pax_global_header00006660000000000000000000000064135410361750014517gustar00rootroot0000000000000052 comment=195ac07c1bf9c955a12a9a54523ef5ced9e64e23 hypothesis-hypothesis-python-4.36.2/000077500000000000000000000000001354103617500175265ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/.flake8000066400000000000000000000006421354103617500207030ustar00rootroot00000000000000[flake8] exclude = compat.py, hypothesis-python/src/hypothesis/vendor/*, test_reflection.py, test_imports.py, hypothesis-python/tests/py2/*, test_lambda_formatting.py ignore = F811,D1,D205,D209,D213,D400,D401,D412,D413,D999,D202,E203,E501,W503 # Use flake8-alfred to forbid builtins that require compatibility wrappers. warn-symbols= bytes=Instead of bytes(), use hbytes() or binary_type hypothesis-hypothesis-python-4.36.2/.gitattributes000066400000000000000000000001761354103617500224250ustar00rootroot00000000000000* text eol=lf # Denote all files that are truly binary and should not be modified. *.png binary *.jpg binary *.gif binary hypothesis-hypothesis-python-4.36.2/.gitignore000066400000000000000000000005331354103617500215170ustar00rootroot00000000000000# misc (editors, file systems, etc) *.swo *.swp .idea .vagrant .DS_Store .hypothesis .vscode/ # generic build components .runtimes # python *.pyc *.pyo venv* .cache .pytest_cache .mypy_cache docs/_build *.egg-info _build .tox .coverage .pypirc htmlcov build dist .doctrees/ # encrypted files secrets.tar secrets # Rust build targets target hypothesis-hypothesis-python-4.36.2/.isort.cfg000066400000000000000000000006641354103617500214330ustar00rootroot00000000000000[settings] known_third_party = attr, click, dateutil, django, dpcontracts, flaky, lark, numpy, pandas, pytz, pytest, pyup, requests, scipy, unicodenazi, yaml known_first_party = hypothesis, tests add_imports = from __future__ import absolute_import, from __future__ import print_function, from __future__ import division multi_line_output = 3 include_trailing_comma = True force_grid_wrap = 0 combine_as_imports = True line_length = 88 hypothesis-hypothesis-python-4.36.2/.pyup.yml000066400000000000000000000003751354103617500213310ustar00rootroot00000000000000requirements: - requirements/tools.txt: updates: all pin: True - requirements/test.txt: updates: all pin: True - requirements/coverage.txt: updates: all pin: True schedule: "every week on monday" search: False hypothesis-hypothesis-python-4.36.2/.readthedocs.yml000066400000000000000000000000421354103617500226100ustar00rootroot00000000000000requirements_file: .rtfd-reqs.txt hypothesis-hypothesis-python-4.36.2/.rtfd-reqs.txt000066400000000000000000000000301354103617500222450ustar00rootroot00000000000000hypothesis-python/[all] hypothesis-hypothesis-python-4.36.2/.travis.yml000066400000000000000000000023631354103617500216430ustar00rootroot00000000000000language: c sudo: false env: PYTHONDONTWRITEBYTECODE=x os: - linux branches: only: - "master" cache: apt: true directories: - $HOME/.cargo - $HOME/.rustup - $HOME/.gem - $HOME/.cache/pip - $HOME/wheelhouse - $HOME/.stack - $HOME/.local - vendor/bundle env: global: - BUILD_RUNTIMES=$HOME/.runtimes - FORMAT_ALL=true jobs: include: # Prechecks that we want to run first. - stage: precheck env: TASK=check-whole-repo-tests - env: TASK=lint - env: TASK=lint-ruby - env: TASK=check-format - env: TASK=check-rust-tests - stage: main env: TASK=check-coverage - env: TASK=check-py27 - env: TASK=check-py36 sudo: required dist: xenial - env: TASK=check-ruby-tests - env: TASK=check-django111 - env: TASK=check-pandas24 - stage: deploy env: TASK=deploy script: - ./build.sh matrix: fast_finish: true stages: - precheck - main - extras - name: deploy if: type = push notifications: email: recipients: - david@drmaciver.com on_success: never on_failure: change addons: apt: packages: - libgmp-dev - shellcheck hypothesis-hypothesis-python-4.36.2/CITATION000066400000000000000000000012421354103617500206620ustar00rootroot00000000000000Please use one of the following samples to cite the hypothesis version (change x.y to the version you are using) from this installation. You may wish to include the DOI, https://doi.org/10.5281/zenodo.1412597 Text: [Hypothesis] Hypothesis x.y, 2018 David R. MacIver, https://github.com/HypothesisWorks/hypothesis BibTeX: @misc{Hypothesisx.y, title = {{H}ypothesis x.y}, author = {David R. MacIver}, year = {2018}, howpublished = {\href{https://github.com/HypothesisWorks/hypothesis}{\texttt{https://github.com/HypothesisWorks/hypothesis}}}, } If you are unsure about which version of Hypothesis you are using run: `pip show hypothesis` for the Python version. hypothesis-hypothesis-python-4.36.2/CODEOWNERS000066400000000000000000000003661354103617500211260ustar00rootroot00000000000000# Engine changes need to be approved by DRMacIver, as per # https://github.com/HypothesisWorks/hypothesis/blob/master/guides/review.rst#engine-changes /conjecture-rust/ @DRMacIver /hypothesis-python/src/hypothesis/internal/conjecture/ @DRMacIver hypothesis-hypothesis-python-4.36.2/CODE_OF_CONDUCT.rst000066400000000000000000000051651354103617500225440ustar00rootroot00000000000000--------------- Code of conduct --------------- Hypothesis's community is an inclusive space, and everyone in it is expected to abide by a code of conduct. This applies in issues, pull requests, etc. as well as in the various Hypothesis community spaces. At the high level the code of conduct goes like this: 1. Be kind 2. Be respectful 3. Be helpful While it is impossible to enumerate everything that is unkind, disrespectful or unhelpful, here are some specific things that are definitely against the code of conduct: 1. -isms and -phobias (e.g. racism, sexism, transphobia and homophobia) are unkind, disrespectful *and* unhelpful. Just don't. 2. All software is broken. This is not a moral failing on the part of the authors. Don't give people a hard time for bad code. 3. It's OK not to know things. Everybody was a beginner once, nobody should be made to feel bad for it. 4. It's OK not to *want* to know something. If you think someone's question is fundamentally flawed, you should still ask permission before explaining what they should actually be asking. 5. Note that "I was just joking" is not a valid defence. 6. Don't suggest violence as a response to things, e.g. "People who do/think X should be Y-ed". Even if you think it is obvious hyperbole and that it's very clear that no actual threat is meant, it still contributes to a culture that makes people feel unsafe. ~~~~~~~~~~~~~~~~~~~~~~~~ Resolution of Violations ~~~~~~~~~~~~~~~~~~~~~~~~ David R. MacIver (the project lead) acts as the main point of contact and enforcer for code of conduct violations. You can email him at david@drmaciver.com, message him as DRMacIver on irc.freenode.net, or for violations on GitHub that you want to draw his attention to you can also mention him as @DRMacIver. Other people (especially Hypothesis team members) should feel free to call people on code of conduct violations when they see them, and it is appreciated but not required (especially if doing so would make you feel uncomfortable or unsafe). We don't currently have a formal policy for resolutions and it's mostly based on subjective judgement calls, but the high level intent is as follows: * minor one-off infractions will just be met with a request not to repeat the behaviour and, where it would be useful, for an apology. * Major infractions and repeat offenders will be banned from the community. If you disagree with David's judgement on any particular event, please feel free to tell him so. Also, people who have a track record of bad behaviour outside of the Hypothesis community may be banned even if they obey all these rules if their presence is making people uncomfortable. hypothesis-hypothesis-python-4.36.2/CONTRIBUTING.rst000066400000000000000000000345641354103617500222030ustar00rootroot00000000000000============= Contributing ============= First off: It's great that you want to contribute to Hypothesis! Thanks! ------------------ Ways to Contribute ------------------ Hypothesis is a mature yet active project. This means that there are many ways in which you can contribute. For example, it's super useful and highly appreciated if you do any of: * Submit bug reports * Submit feature requests * Write about Hypothesis * Give a talk about Hypothesis * Build libraries and tools on top of Hypothesis outside the main repo * Submit PRs If you build a Hypothesis strategy that you would like to be more widely known please add it to the list of external strategies by preparing a PR against the docs/strategies.rst file. If you find an error in the documentation, please feel free to submit a PR that fixes the error. Spot a tyop? Fix it up and send us a PR! You can read more about how we document Hypothesis in ``guides/documentation.rst`` The process for submitting source code PRs is generally more involved (don't worry, we'll help you through it), so do read the rest of this document. If you're planning a larger change, the contributor guides (in the ``guides/`` directory) will make sure you're on the right track. ---------------------------------- Installing from source and testing ---------------------------------- If you want to install directly from the source code (e.g. because you want to make changes and install the changed version) you can do this with: .. code:: bash pip install -r requirements/test.txt pip install -r requirements/tools.txt pip install -e hypothesis-python/ # You don't need to run the tests, but here's the command: pytest hypothesis-python/tests/cover/ You may wish to do all of this in a `virtualenv `_. For example: .. code:: bash virtualenv venv source venv/bin/activate pip install hypothesis Will create an isolated environment where you can install and try out Hypothesis without affecting your system packages. ----------------------- Copyright and Licensing ----------------------- It's important to make sure that you own the rights to the work you are submitting. If it is done on work time, or you have a particularly onerous contract, make sure you've checked with your employer. All work in Hypothesis is licensed under the terms of the `Mozilla Public License, version 2.0 `_. By submitting a contribution you are agreeing to licence your work under those terms. Finally, if it is not there already, add your name (and a link to your GitHub and email address if you want) to the list of contributors found at the end of this document, in alphabetical order. It doesn't have to be your "real" name (whatever that means), any sort of public identifier is fine. In particular a GitHub account is sufficient. ----------------------- The actual contribution ----------------------- OK, so you want to make a contribution and have sorted out the legalese. What now? First off: If you're planning on implementing a new feature, talk to us first! Come `join us on IRC `_, or open an issue. If it's really small feel free to open a work in progress pull request sketching out the idea, but it's best to get feedback from the Hypothesis maintainers before sinking a bunch of work into it. If you're working on an existing issue, leave a comment so we can try to avoid duplicating your work before you open a pull request. In general work-in-progress pull requests are totally welcome if you want early feedback or help with some of the tricky details. Don't be afraid to ask for help. In order to get merged, a pull request will have to have a green build (naturally) and to be approved by a Hypothesis maintainer (and, depending on what it is, possibly specifically by DRMacIver). The review process is the same one that all changes to Hypothesis go through, regardless of whether you're an established maintainer or entirely new to the project. It's very much intended to be a collaborative one: It's not us telling you what we think is wrong with your code, it's us working with you to produce something better together. We have `a lengthy check list `_ of things we look for in a review. Feel free to have a read of it in advance and go through it yourself if you'd like to. It's not required, but it might speed up the process. Once your pull request has a green build and has passed review, it will be merged to master fairly promptly. This will immediately trigger a release! Don't be scared. If that breaks things, that's our fault not yours - the whole point of this process is to ensure that problems get caught before we merge rather than after. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull request or external package? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ New strategies can be added to Hypothesis, or published as an external package on PyPI - either is fine for most strategies. If in doubt, ask! It's generally much easier to get things working outside, because there's more freedom to experiment and fewer requirements in stability and API style. We're happy to review and help with external packages as well as pull requests; several parts of Hypothesis started life outside and were integrated later (with permission, of course). For clarity, we suggest naming your package in the pattern of ``hypothesis-regex`` and ``hypothesis-protobuf`` on PyPI. On the other hand, being inside gets you access to some deeper implementation features (if you need them) and better long-term guarantees about maintenance. We particularly encourage pull requests for new composable primitives that make implementing other strategies easier, or for widely used types in the Python standard library. Strategies for other things are also welcome; anything with external dependencies just goes in ``hypothesis.extra``. ~~~~~~~~~ The build ~~~~~~~~~ The build is driven by a ``build.sh`` shell script, which delegates to a custom Python-based build system. Actually running the tests is managed by `tox `_, but the build system will call out to the relevant tox environments so you mostly don't have to know anything about that unless you want to make changes to the test config. You also mostly don't need to know anything about the build system except to type ``./build.sh`` followed by the name of the task you want to run. All of it will be checked on CI so you don't *have* to run anything locally, but you might find it useful to do so: A full Travis run takes about twenty minutes, and there's often a queue, so running a smaller set of tests locally can be helpful. The build system should be "fairly" portable, but is currently only known to work on Linux or OS X. It *might* work on a BSD or on Windows with cygwin installed, but it hasn't been tried. If you try it and find it doesn't work, please do submit patches to fix that. Some notable commands: ``./build.sh check-coverage`` will verify 100% code coverage by running a curated subset of the test suite. ``./build.sh check-py36`` (etc.) will run most of the test suite against a particular python version. ``./build.sh format`` will reformat your code according to the Hypothesis coding style. You should use this before each commit ideally, but you only really have to use it when you want your code to be ready to merge. You can also use ``./build.sh check-format``, which will run format and some linting and will then error if you have a git diff. Note: This will error even if you started with a git diff, so if you've got any uncommitted changes this will necessarily report an error. Look in ``.travis.yml`` for a short list of other supported build tasks. Note: The build requires a lot of different versions of python, so rather than have you install them yourself, the build system will install them itself in a local directory. This means that the first time you run a task you may have to wait a while as the build downloads and installs the right version of python for you. -------------------- List of Contributors -------------------- The primary author for most of Hypothesis is David R. MacIver (me). However the following people have also contributed work. As well as my thanks, they also have copyright over their individual contributions. * `Adam Johnson `_ * `Adam Sven Johnson `_ * `Alex Gaynor `_ * `Alex Stapleton `_ * `Alex Willmer `_ (alex@moreati.org.uk) * `Ben Peterson `_ (killthrush@hotmail.com) * `Benjamin Lee `_ (benjamindlee@me.com) * `Bex Dunn `_ (bex.dunn@gmail.com) * `Bill Tucker `_ (imbilltucker@gmail.com) * `Buck Evan, copyright Google LLC `_ * `Cameron McGill `_ * `Charles O'Farrell `_ * `Charlie Tanksley `_ * `Chase Garner `_ (chase@garner.red) * `Chris Down `_ * `Christopher Martin `_ (ch.martin@gmail.com) * `Conrad Ho `_ (conrad.alwin.ho@gmail.com) * `Cory Benfield `_ * `Cristi Cobzarenco `_ (cristi@reinfer.io) * `Damon Francisco `_ (damontfrancisco@yahoo.com) * `Daniel J. West `_ * `David Bonner `_ (dbonner@gmail.com) * `David Chudzicki `_ (dchudz@gmail.com) * `Derek Gustafson `_ * `Dion Misic `_ (dion.misic@gmail.com) * `Eduardo Enriquez `_ (eduardo.a.enriquez@gmail.com) * `El Awbery `_ * `Emmanuel Leblond `_ * `Felix Grünewald `_ * `Florian Bruhin `_ * `follower `_ * `Gary Donovan `_ * `Graham Williamson `_ * `Grant David Bachman `_ (grantbachman@gmail.com) * `Gregory Petrosyan `_ * `Grigorios Giannakopoulos `_ * `Jack Massey `_ * `Jakub Nabaglo `_ (j@nab.gl) * `Jenny Rouleau `_ * `Jeremy Thurgood `_ * `J.J. Green `_ * `JP Viljoen `_ (froztbyte@froztbyte.net) * `Jochen Müller `_ * `Joey Tuong `_ * `Jonathan Gayvallet `_ (jonathan.gayvallet@orange.com) * `Jonty Wareing `_ (jonty@jonty.co.uk) * `Joshua Boone `_ (joshuaboone4190@gmail.com) * `jmhsi `_ * `jwg4 `_ * `Kai Chen `_ (kaichen120@gmail.com) * `Karthikeyan Singaravelan `_ (tir.karthi@gmail.com) * `Katrina Durance `_ * `kbara `_ * `Kristian Glass `_ * `Kyle Reeve `_ (krzw92@gmail.com) * `Lee Begg `_ * `Lisa Goeller `_ * `Louis Taylor `_ * `Luke Barone-Adesi `_ * `Lundy Bernard `_ * `Marco Sirabella `_ * `marekventur `_ * `Marius Gedminas `_ (marius@gedmin.as) * `Markus Unterwaditzer `_ (markus@unterwaditzer.net) * `Mathieu Paturel `_ (mathieu.paturel@gmail.com) * `Matt Bachmann `_ (bachmann.matt@gmail.com) * `Max Nordlund `_ (max.nordlund@gmail.com) * `Maxim Kulkin `_ (maxim.kulkin@gmail.com) * `mulkieran `_ * `Nicholas Chammas `_ * `Paul Ganssle `_ (paul@ganssle.io) * `Paul Lorett Amazona `_ * `Paul Stiverson `_ * `Peadar Coyle `_ (peadarcoyle@gmail.com) * `Pierre-Jean Campigotto `_ * `Richard Boulton `_ (richard@tartarus.org) * `Ryan Soklaski `_ (rsoklaski@gmail.com) * `Ryan Turner `_ (ryan.turner@uber.com) * `Sam Bishop (TechDragon) `_ (sam@techdragon.io) * `Sam Hames `_ * `Sanyam Khurana `_ * `Saul Shanabrook `_ (s.shanabrook@gmail.com) * `Stuart Cook `_ * `SuperStormer `_ * `Sushobhit `_ (sushobhitsolanki@gmail.com) * `Tariq Khokhar `_ (tariq@khokhar.net) * `Tessa Bradbury `_ * `Thomas Grainge `_ * `Tim Martin `_ (tim@asymptotic.co.uk) * `Thomas Kluyver `_ (thomas@kluyver.me.uk) * `Tom McDermott `_ (sponster@gmail.com) * `Tyler Gibbons `_ (tyler.gibbons@flexport.com) * `Tyler Nickerson `_ * `Vidya Rani `_ (vidyarani.d.g@gmail.com) * `Will Hall `_ (wrsh07@gmail.com) * `Will Thompson `_ (will@willthompson.co.uk) * `Wilfred Hughes `_ * `Zac Hatfield-Dodds `_ (zac.hatfield.dodds@gmail.com) * `Zebulun Arendsee `_ (zbwrnz@gmail.com) hypothesis-hypothesis-python-4.36.2/LICENSE.txt000066400000000000000000000006361354103617500213560ustar00rootroot00000000000000Copyright (c) 2013, David R. MacIver All code in this repository except where explicitly noted otherwise is released under the Mozilla Public License v 2.0. You can obtain a copy at https://mozilla.org/MPL/2.0/. Some code in this repository comes from other projects. Where applicable, the original copyright and license are noted and any modifications made are released dual licensed with the original license. hypothesis-hypothesis-python-4.36.2/Makefile000066400000000000000000000003621354103617500211670ustar00rootroot00000000000000# You don't need to use this Makefile and should use build.sh instead. This is # just here so that us poor souls who remember the Make based system and keep # typing "make target" can ease our transition to the new system. %: ./build.sh $@ hypothesis-hypothesis-python-4.36.2/README.rst000066400000000000000000000032141354103617500212150ustar00rootroot00000000000000========== Hypothesis ========== Hypothesis is family of testing libraries which let you write tests parametrized by a source of examples. A Hypothesis implementation then generates simple and comprehensible examples that make your tests fail. This simplifies writing your tests and makes them more powerful at the same time, by letting software automate the boring bits and do them to a higher standard than a human would, freeing you to focus on the higher level test logic. This sort of testing is often called "property-based testing", and the most widely known implementation of the concept is the Haskell library `QuickCheck `_, but Hypothesis differs significantly from QuickCheck and is designed to fit idiomatically and easily into existing styles of testing that you are used to, with absolutely no familiarity with Haskell or functional programming needed. The currently available implementations of Hypothesis are: * `Hypothesis for Python `_ is the original implementation, and the only one that is currently fully production ready. * `Hypothesis for Ruby `_ is an ongoing project that we intend to eventually reach parity with Hypothesis for Python. * `Hypothesis for Java `_ is a prototype written some time ago. It's far from feature complete and is not under active development, but was intended to prove the viability of the concept. This repository will eventually house all implementations of Hypothesis, but we are currently in the process of consolidating the existing repositories into a single one. hypothesis-hypothesis-python-4.36.2/azure-pipelines.yml000066400000000000000000000074201354103617500233700ustar00rootroot00000000000000# Schema docs at https://aka.ms/yaml trigger: - master jobs: - job: linux pool: vmImage: 'Ubuntu 16.04' strategy: matrix: check-whole-repo-tests: TASK: check-whole-repo-tests lint: TASK: lint lint-ruby: TASK: lint-ruby check-format: TASK: check-format check-rust-tests: TASK: check-rust-tests check-coverage: TASK: check-coverage check-pypy: TASK: check-pypy check-pypy3: TASK: check-pypy3 check-py36: TASK: check-py36 check-py27: TASK: check-py27 check-py35: TASK: check-py35 check-py37: TASK: check-py37 check-quality: TASK: check-quality check-ruby-tests: TASK: check-ruby-tests check-unicode: TASK: check-unicode check-py27-typing: TASK: check-py27-typing check-nose: TASK: check-nose check-pytest30: TASK: check-pytest30 check-django22: TASK: check-django22 check-django21: TASK: check-django21 check-django20: TASK: check-django20 check-django111: TASK: check-django111 check-pandas19: TASK: check-pandas19 check-pandas22: TASK: check-pandas22 check-pandas23: TASK: check-pandas23 check-pandas24: TASK: check-pandas24 check-pandas25: TASK: check-pandas25 steps: - task: UsePythonVersion@0 inputs: versionSpec: '3.6' - script: sudo apt-get update && sudo apt-get install libreadline-dev libsqlite3-dev shellcheck displayName: Install apt dependencies - script: ./build.sh check-installed displayName: Install Python - script: ./build.sh displayName: Run tests - job: windows pool: vmImage: 'windows-2019' strategy: matrix: check-py36-x64: python.version: '3.6' python.architecture: 'x64' check-py36-x86: python.version: '3.6' python.architecture: 'x86' steps: - task: UsePythonVersion@0 inputs: versionSpec: '$(python.version)' architecture: '$(python.architecture)' - script: | pip install --upgrade setuptools pip wheel pip install setuptools -r requirements/test.txt pip install hypothesis-python/[all] displayName: Install dependencies - script: | cd hypothesis-python pytest displayName: Run tests - job: windows_py2 pool: vmImage: 'windows-2019' strategy: matrix: check-py27-x64: python.version: '2.7' python.architecture: 'x64' check-py27-x86: python.version: '2.7' python.architecture: 'x86' steps: - task: UsePythonVersion@0 inputs: versionSpec: '$(python.version)' architecture: '$(python.architecture)' - script: | pip install --upgrade setuptools pip wheel pip install setuptools -r requirements/py2.txt pip install hypothesis-python/[all] displayName: Install dependencies - script: | cd hypothesis-python pytest displayName: Run tests - job: osx pool: vmImage: 'macOS-10.13' strategy: matrix: check-py27: TASK: check-py27 check-py36: TASK: check-py36 steps: - task: UsePythonVersion@0 inputs: versionSpec: '3.6' - script: | brew update brew install readline xz ncurses ./build.sh install-core displayName: Install dependencies - script: ./build.sh displayName: Run tests # TODO: Deploy jobs dependent on above hypothesis-hypothesis-python-4.36.2/brand/000077500000000000000000000000001354103617500206145ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/brand/README.rst000066400000000000000000000037431354103617500223120ustar00rootroot00000000000000Logos and other pretty things ============================= Hypothesis has a beautiful logo, thanks to the generous work of Libby Berrie in `issue #1519 `__. General guidelines: - Prefer vector (``.svg``) formats to raster formats (``.png``) wherever possible. - We consider the rainbow version to be canonical. The blue variant is provided for cases such as monochome versions or printing with a limited palette. With that in mind, you are welcome to use these logos to refer to Hypothesis - and if you're not sure whether a specific use is OK, please get in touch and ask! For example, we often bring Hypothesis stickers to conferences but can't make it to everything. If you want to print your own Hypothesis stickers, upload the image to `StickerMule `__ and pick one of the vinyl options - that's how we get ours! Colour palette in GIMP format ############################# A `colour palette in GIMP format `__ (``.gpl``) is also provided with the intent of making it easier to produce graphics and documents which re-use the colours in the Hypothesis Dragonfly logo by Libby Berrie. The ``hypothesis.gpl`` file should be copied or imported to the appropriate location on your filesystem. For example: - ``/usr/share/inkscape/palettes/`` for Inkscape on Ubuntu 18.08 - Edit -> Colors -> Import... then select the ``hypothesis.gpl`` file in Scribus on Ubuntu 18.08 - Windows -> Dockable Dialogs -> Palettes -> Palettes Menu -> Add Palette -> Import from file... then select the ``hypothesis.gpl`` file in GIMP on Ubuntu 18.08 Once imported, the colour palette is then available for easy manipulation of colours within the user interface. Inkscape: .. image:: inkscape.png :width: 800px :align: left :alt: Inkscape showing Hypothesis colour palette GIMP: .. image:: gimp.png :width: 800px :align: left :alt: GIMP showing Hypothesis colour palette hypothesis-hypothesis-python-4.36.2/brand/dragonfly-blue.svg000066400000000000000000000440251354103617500242540ustar00rootroot00000000000000 dragonfly-original Created with Sketch. hypothesis-hypothesis-python-4.36.2/brand/dragonfly-rainbow.svg000066400000000000000000000514251354103617500247700ustar00rootroot00000000000000 dragonfly-rainbow Created with Sketch. hypothesis-hypothesis-python-4.36.2/brand/favicon.ico000066400000000000000000000124661354103617500227460ustar00rootroot00000000000000 h&  (  bEHeGK`CE`CE,_BD?cFF6̸mx@z_| {u`EFX69_BDR_BD_BD^ADv[Mr{~Ղn؅_CE`CE _BD_BD_BD]@CkRyԁ׆ىސۋ`CE^BC_BDu_BD_BD_BD^@C~[׆܍ŅsWOuYP`BE`CE_BD_BD_BDcCFdEFgւىƮ|eHHPgKIcDH_BD_BDW_BD_BDdDFiEHvZMԿtւڊxaDEXdHG`BE`BG`BEaBEfDGhEHgGG_yԁىߑnZ=BS`CE`CEM5HaCExgGEhHGrVIXmv}օي|`V9@C_BDmPDN6M}:xLǴafkry}{_QU8@#_CEhX(ֹMh^dgmswddGGR3@`CEgf e?IԸR#bufʷdṤcס\rWLvX:B]@Dmm(lSme}t0įjAU7?43  ܒtuYtat|| {]||{|u|wzzzDzz;z |ӀӀ(Ӏԁԁ{{zzz{V{ {ԅӁ/ӁӁ|{{{{{?( @ uuwxxcEIeEK`DF`CF#`CE>`CEQ`BE\_BE`^AD9ůj uvAxny{|EمԀaEHcIN_CE/_BE_BD_BD_BD_BD_BD^ADv\Louwz|}ւ&Ձ_CE`CE4_BD_BD_BD_BD_BD_BD_BD]@CvVtxz}ԁՃׅNք`DF`FG_BD_BD_BD_BD_BD_BD_BD_BD]@CZw{}ԁՃօ؇ىaو}y_BD`CED_BD_BD_BD_BD_BD_BD_BD_BD_AD_z}ԁք׆ىڋ܍l܍`DEaEF_BD_BD_BD_BD_BD_BD_BD_BD_BDdGFg|ӀՃ׆ىی܎ȆcEG`BE`CE9_BD_BD_BD_BD_BD_BD`BDaCE_ADlPI͸q~ԁׅىیݐޒlW9@C_CEDDbCFgEH_BE_BD_BD_BD_BD_BD_BDcCFdDF^AD~dQwՃ؇ڋ܎ߒߕg[=BS6=eGG`CE`CE%_BD_BD_BD_BD_BD_BDaCEiFHeDG^AD^|Ԁք؈یݐߗe[>BfJKbEF`IJ^<>_CEm_BD_BD_BD_BD_BD`BDiFHnHJcCFiMHĮl|Ӏք؈یݐݖya\?BbGFaDE_CE`DF_BD_BD_BD_BD_BD`CEiFHqILkFI_ADtVv|Ӏք؈یݐڔn\\?CcEGaDE`BE`CEA_BD_BD_BD_BDcCElGJqIKjFH`ADlPI­ix{Ӏք؈یݏԐ|`T]@CcGIaDF`BE`BEk`BEbCEcCFhEHnHJmGJdDF^ADdGE\qvz~Ղ׆ڊۍߒʉoSM]ACaGG`BE`BE^`BDcDFhEHjFIfDG`BD]@ChLGXlquy}ԁք؈ڋޏ˴|gKH^AD]>AhQQ`CEaDE`CEeHDbDD^AD`CDhLFiN\glosw{~Ղׅ؈܌l`CD_BDf_CDgIF,Q|?[u4uCW]Ͻcegjmqux|Ղׅԅw\]@C`CE?`CEۺIbծ:̨;`feeehkorvy|ՂzqVM^AD`DF_CE`a _>Ы>cϴNaeeefimpsvy{~eaDE_BDgFNaCF__ ^GѱG6ѶPӾ]deegjmpsvx¬lrVL^ADaCF`CEfbcMb;bִHղD շPNԿ^eeehjlҾlîh^rWL^AD`CE*`CC_CGijff%f{ƶT] e5ff®`ݭ\ᚂWلjQpTJbEEs\>C;aCE Y8?nXPkkjjjwwweįj7^BCL->K,>A$<"& 2noMnoo{{6{g~}vhrrs s{{ {{Fz{wx vvw wzzBz{1y{{|zzzz{|zz{=ߎ~~~~~@~{{;zzzzz%zzԅlԁ\ӀӁޕՄ{{zzzzz{A||ӂӃӀӁԂԂz{zg{y{z{y{^|{ԁӁ Ӏ{ӁJӀԁԁԁԁ?????????hypothesis-hypothesis-python-4.36.2/brand/favicon.png000066400000000000000000000062451354103617500227560ustar00rootroot00000000000000PNG  IHDR@@iqbKGD pHYs   FIDATx[mp~λބD)TXG :ZLikScK[ZQtUQ:0N:QhUDBg $$|{=?ݽ{!Jnfnvs=>.l{dE lɿ>bB8 6w$==6?_FH3`Si~U,(R t>N!fYIѳ,%)_P,$uKh԰V DzD,k4sJ/Ő0#;-ٯn-.w*-%T,Kb9)YdnKfl"Ezhs&Vh ~ ^5GрA;s2(+<*5 5uGw'"h?W `MOt ;w`ۉ#Lkf:{@NLb_z#558B#ͫ ]LxQw]oбn#+"2[J;u0נypTAt=Cw4AџhYoն3E,{v^v0Ի%xq-kjƱW^EZUD:fM__sSÖ9t/$_)XE"BL_ccD!~y?sH(SBgБ# :,eybp ,`㖥0 wڔd>2̠fۧ@֑7߽+3#yuT2LJœk2, |dY4g/bg9TŦOu^QqsSK%)H @R Ql5Śּ7svbuD{bSf:N`}79Xp"B קZu%CoGo-'I}qݲnw_y3Yc48ܾ4D "V.!)p.N#vJυ0 *z u='CQ`Z?}XXY./DxxDMApT' eLy 'J jB22c1$ ?hW|# KD "EH-“'`7#g!܋H2JABd~`ڧe!$, 4-J[AY"G2=CGQd9?ֻEt]OA)7iOeL  ^(<V`.,+m'_^mydK e6w}4F9Y`Ivr@ )% d!ʅzD[pzN!Rr, ]rUWWlT`+ В 4ǐ;!!`X*P(^ˢM=fS=O^')7 # AX,':/8I;aQ0DFF(֨{p,uԲ;P~򏼲sH?fJ+R/A(u60' Bs@م-[AF!ė}q)_HF0d+V6l~v{4:ma,BPt-ݎ3xxeb6*8D tk>@Am@= 1m+8+~bSyK,X[w; A:DνHN1D'$.jḎl dg֯ 8[w6 0uOećY&J/ pT "udLLPν"9sG< G$cpxz hhvL C:BO)t՟ŝqG8>b |>*,y*.l)_7h[WSaڜwl'-"HpXwYm&D;FPD5:V3M7`0g85 bg|E6N^uPM[qX`DT 4cÆUd*ʰ0Dp&̼wz`N|O> t9=Qc]^U29}.R+י~2n+vD?ȀK&*} !57)./$"4@$+F~P7)RʩZ9SιdT/ĒJ.Jƚjjo`,[i;s#8ȣ:qguٗ_afUV]m6N;njO:SO;G^Yϙ묹WM+Yr)oC8IRȘeʙ.F)g`*2Fv>#we ye(u}7Y[m RS2~;;7$ȻsdZХM\#8j5jmZq|O@`s^l8{Vq[×՝k]Y6=i{0YiN3yg4Rl?sdfw͍H<'OÝ&dCn="i^Y}N=.Cf=Vpyv?Z>r. ecRc3\.hNk թ!BVyg;(h608P杫Wʹ~6:ax|-=1=S̼V#cɰg2GE1wsq{=6MqV Ē wu'ur]*ר'wF\ڨJ ғyJdjW,CNnXT{6ޖA:UT` wZC2i((Ԫ?( !o&ahVʔȓ%nSTbbX5f*Yol4/VQ&%ZclZQ}s*"%ԷOz39] _Og6N.m0_ꙶf:PubQ:!&mqVل; qDs[9LǙZ\5$.]jG )`HrsdaT 4\(ﷺ|#"AU^p: ߸Y0y͐jUeG@fmWW@ @ ~hxXU`9&+1G@r*G3 Ba;ӲWmI{{Z QzgX<,A(nA[;t: 5Uo :BS5b{A𐓧@H1YTBT+i|LѭBWY1 :zeAH{Iaҙ `Iu|06l@?iUޣZ[)-J=0yHR$ݥ*L۪2Oن4Ae$Dhl+d%v ˤ_Hx!]@zie S_@fL4TJ&\NQAݢkXQLN8ڰ%uِ ^pA̤-Kj3pyրtu cG>ѿ䘚ېLʚ'|&ɀl60I*kuidM3<8!Dt]M|H+L2oH_ (vNA>5huKlEP1s<3 k(* &S|eH^3n{C18\E ;3#i~dVFRq;>ܙ|mZdsYED J̊MN`Eb[.Fpvn4-t%0D>pT"an$=*'VP_ꩯW%G-)y2!j`<9q pҌ8<ݲ|bJ0pvpJӶ BG5\ tJE0k~W:.};aaQ":q+XGiƅ2Qn @7xi@'*AE[ܠ(n #U@9=L3;T@",6.#g5MKz"NO+=q87S.d(vQ֧N ЯV:FV}zuw=t"nHm8߲uͦ{`vkހ VmK;M$t/Ng8D45!d?\;S5h_.j,ke7Zs C%#8sK5 0A&i -yv't|,Z%1TZу>\guAS\9yo`MGu=-.,S%V:m o|=<1iX2/OL\9'u{ `j"Z9 khlDI2jRtQ6>w`o  Rh6`8o)skkॠFDX"UgwwUg-; $t*uA!?/7qÄN;Np?"xz{ Iy X΀N(Bh2n)*tRT  1D!0'Cw@YPW3SJ) ) $Cv̫A~گ_+h" 6DHth~Zy!,;fOKnaϲ4&/3:>]-grpo H-ϷvDD35y|MyO;Ñ` cӷN^xgM׏L+O.Aݮy/P]˽1eekSm}F{@#EHbm!խ3b).dRGNV@ip}x<Ԡn!:vv*PVdeQtiⲓ` հ לqHRԗKYuhzӪTJ-VC:PCZ٠Q#?X$I6+8֣}*b¨z&8o##zAf*Qν*~% <2Ȳ:+Pb@H=vdWo^]`a;#rGY׾]=MG}с`c(rgAMA aiCCPICC profile(}=H@_S"-v(:Yq*BZu0 4$-.kŪ "%/)=B4kJLvU  `Xf1'IIx{zY!5g1'2ìoOoV aVUs1.Hu76өy0X`YԈSqYY+XuCH`K BA %QEV )ڏ{D.\%0r, Z 7)_lcͺmv?Wz_i3Z.ۚ\'C6eGy) }kno}>i*y^xwogoi/r 2abKGD pHYs.#.#x?vtIME #ʥs IDATxw|elnz#B "ł)*?yT,y{CA@DkH!nIvC_/_wIf}ya}fcF!B!B!B!1J*B!B!B!BdS!B!B!BqLEO!B!B!B!1͢(ԂB!B!B!%B!B!B!BYfDAEQ!£1=y !BO^LT{vpun9&ߟ|VUHI&Tu =2|4]|ctB!G pDүo:thvˎrQ5 Tz $9-lJU-0~)M.Bfx+(R#ֶWnhj =3a!2& ̱8h?`PrG]5䗔Bd\4Σƽ(k2X_ lm3V;vg8*& aҰ^gM7"ď^ևbHzYV<Jw'_>YPDϮ՟w>ʢrKmFgf>w3@侯1N\L]ߵ~;pDG5j -[//G8ڜ[du3H[u52^vׯN;eæD 8+񽒉Ł5|G65y46h);#B_'u@RRq¿ ;~l߱rӛٯ'$Z+Kȳ)*&%:ۻg6J䝞B!a3Y7tq.{}~l\z#IaK72{gUbj|Іx]x:abjtyջ5|eE(H#P,Wad>pcm'MGEtqfO7a*8.J3%o<?>q3S)I=Vu{EwpDTiXqJ;4Ë'Eaq[H5`7Mvp%9j ]!^4 C۞Ѥ Ϯ,GkM̴.:#I6 E^]hTRVibL{ FgUMFS~E".҂RN.q(N5k}<J7_n`j;{r;Nn.g#PGvQ${f]?0_eJ6RXTT#X0ÉaTu PQ5 kaB.('7`u5 ] hNb"u9-A$4E0 -Ծb}N1ՠC_Ju+TPPl9TV%3YC$19UT&%cW0|Ux* oOa#⃼U?Bg /ExM QDj(c:bHi}d#BՍa`ի y<:bR~#^M.Q!,xi֦wB!8D*tνqkunij7rݹLۑ(PcRVЍ[7W!Wm)Q{sә5!>o:yR[ԫ镠:kʰU*Ў W]9l$PȚ<ֽljځo+}t6/^_-wVa#jT60cJ2ͼ'`N-y<猟cKei6b :>;.,ە. D6e,@`W3*3'ߜMF\^g_^-sj x׏e~޲_]c/3F1S +?K?ҥ夫dM>؜S}[W]SX ىްm2H"^!g:]4%e(/*xaMސU@ӻ` TRT%Sgq<|z$GEᥢJT/ڵ^EO?#f&[/:ި ė\pF7"zۢ`dcn/{Y>]ӳO"[w@w$T3@xWRXT^`_YbaSԨ<*7pEqC]|H_C Z*FU)ʱphIY ^<:N?<.|IZJwsiW<5XJi>u iTUPYe'J 2Է n-&7[@^i:_;I㩨vdn>1f}?by31ܨ,|wf(A@H*ZƜhq)()OaK<-уIJ$.MS2p&`7(w5純`XJ_>CI'Ԙ[WP JK*iS~'25.kL QByt PE+3}@?DT"3xV|N?hm4L"f,W~#t* r;HLvbz9zɰIQZپ9 G{3|%>:vnAF];1ӵgzZ1NUb$exdنBlOTS*<S^ fn5/yOWh3fSyUٳU៻{p _~Ң|bd%O!B0$Ʌ#g&2)% DqLq Jd|z7%o~ &U<;3N޾>ڧ|x}0>+z|~=ɇ\D 'V [VHН;KJ (߆/ןOoa~4Y 1 qh eT`RZ\IԸxb(h@J f) \ Tv<7Y!21tsz PS:ѢÖuεC"u3E@.X=θhuòm:9|6NJze(J:Su߿ċEt~iOdW5u FN}t4Q*(uq C8s\w4()SIh: ;cF4|奚ԁ6ӻz6bp*uw sMssJKio.ݲ@*==n6n+ imOPpI7k2U#2doA)$?ݒQQ8"R%Ggv>&EKIQ}=? Cv}Ϻ=12> o{i7~^ (7n :PT^zYB!a[cq߽ /v4|i9F1k_1(`cPGQ#XuNCj]{ۦ(}هC :)x:t`leuSQӠ2ƷecM*s7^7 qq'K^{6}TKV 4'ЅT~-5Gfu`@◟烍%fQk Ci=9랛9).?k/qe[iHnf7wZ>+X(`zA݋VlK֕+bĈȞ騱-'HvkyЍF/3j+:{3(I4GMeĂL66ljC `S~.L8{G$kl#:U,.LEmWNs.1=ʡՈ7=ؕ3>NᰃY޽ipFE* 9Ym.3VCPҩ$wTN(#0rWvJXʒmMSS8Ԉ..F k&TXl:*Mv6߶Js|l(xi8v9:v /=ǔe?g?N'C֨[#̂MI?]>(y`W0vodS0Q8SkjX3{;һKʨ]U*tEKٓ gb&*Pq0RyEIMh>5q<[^!#y^C.tkb3?X{L;B!޽1Mi?r{TՏ5]5LhѹTzFi-z%ee(iB ? ]k3]Qx[!BV m6VX}GV/fpZpZM|oG`w=yW53k6  akSTeZj9LõrSA=M!PeMTG1ˊVS[feAI?XEAxL«<6C֕V[DqUT`Fb Dơ`W#UpZj>ͩp L&O4޵%X fqI#{X2GT^>2o_9RدۅԶfz0@}_N^寭KMkރyM>uǖO> f,]x ׍E ]%X{ŷ?`)fl2Pް6u^, Y?6Cs҈veådTud'jE F ( ?ШO{NXߑۊ"r}Su߿8Íөd'bՒT m8pURhh:*W {zB~YQAڠarV|+T*Cmٷ?C1R©T}o.#_}~(ᅸ._:R,wy0oέM-i<%y^_ϫ7ȫƉrl;B!aPUCTq'OjsU.^3K ߚJ/*@M:mWV UUyh nOZdw&^^zWԲ)BѶ3 80x)t[?層?avS?@n|P >>""oERk]H*:DgPKn~chhUs=/Iߘaxܧ<mcEOߺkj\QJݲKCA!;λ"/@ /OR">9ٯѳpF&}[{:3uX{4SgϞLn,ĀX,(YFif2>vחv'^??ׅ,eXLv09]2:ڼѠ̪ab%e`v04 ݤwP&}S ጋ%.w1 9syji))0Ъ`*0Po(51RS/"_/ekf9$QN_ Z//@Nfy6Yō??x&b0ag_sdDS똂S@Bޞ `N^hƯ IDAT6⵴.sGA+b}ʘ,~y+4(o+Wm{Ad$`zLD.JadJPPjdkC<*X4φ*S$n78[mtlB3 W[]8Go7FO Kdi86Oko4k}Z&~eL.tV<>qPL"3PBIeqPՈ5ˮQ F:/S;i|YO."&&5g,jKC|MN&%!_'dدng8Oofnpp B!D4U#v񱸸8s5(*.bQ=#Hzk֬k2@@o6 >%P'/K<=xRE`=x>߶ZuB!hSu(cUg ɽ2Z̦*E ־!)b.- bP<(W8%f˦TYjS9`FmĜ5,0Òt LA)&eO9lW$ݢ v4*CQ+Z-ק!,jgqsg~v/oBTh0mTI &n~_޽,f t<c`B!f|ڷoa!O^^>v`7+f[TeFM}Ppϰ4|`ژT&WQCvz>3e6l7J5;&n6[u]XmX]W@.5QLs6Iv/PI<ݼ>gY׼jTܿ62ʡD73{`?VȄS0fP:GbSTBqP># t vnR^?ϫԑM(K9cYfl>((F|^l݊0 Չ1~*v|i1jգ=Iqq$,{ `bbY7j)(ҋ}c0v~sm̘<ҎUdfKkDت.[ J>يs-a5+31#XL)*G HD5sG#zeP[~Ǐ|j׍I+'䯅y0B!D,?gӱcfC[iۗUMk6Pv6[1xΟ/s{: ZPãZqIS&B!mvYɠ `+˱k_ i\`/sqtfWw^_ﯹ-=&iRT@KCl| zTQQfy/82a17&LbDRBzKv7mo,z?_U#B!*0tѱSGb뽋3UU))-#7'HTU9*]\v5^;=B! ϸUw9䮾ڎ1tvQ{iv CfLgIH\ RRA'inV”%-7AކMdG (+2SP*X3o1{6ȋǤ\-+KMB!iBT<*j{qu˧('&EF9x2}pnj>>&*4>*s0It#hzB!hK*vN}э{Ϲ#;6(jϺrTj;~Y;v^j>\)'$b Tr`V~ deO-K,tiqč@r=6͝ú3{mf䥂-& B!Bb!::2JKK@ZnJlEQmcB*"]c7^X{)M;Q"B!B!B!|:^3SSY4l6E;ӓ !B!B!B;fjX,=QG͸'G1X|dg7`u1YvL?\;Կg/yQclmk*=.~fdŏ1.޿3W'yGѸ_g'GG"!B!B!8YB9(}kߘTTbe|7txvc<"yVb'ndX|>xm2+b;u#^x%18-l'#l9 {X%r0?u,`w¶<=TYЫ]5qnbȦytӡѩg7leU`ʸ&B!B!ǘ=lmxڝɫ7wtҎ3C螤RƷVj2?Npٵ\|`::ؾ_<[H GKaIlj?N]tϐG3A0/;ۦLNĿ7v6c)ZM&ɼ7{:̾.D!xgEknetH~e~tLb/-TTW,N/F+,6Ͽ+J<ξ+0>LO1;|K?"qߴkgoso/5~<7u3l* .J1,{Vc2 fŜCw%˧gIU|PZ:2,tc\x&G(7yaOՌ%t'}F:yB-g俍K+{3zDU h=r>tj?^C'{z2KFt}R TQ{_l-?-aaO]Y6Brc|S{-ՓNbny#FB!B!ma dʭ^wOWhμkﻊkeu[?Qdq$ yW$8tn$O}ac*&ݎì oug~yY5!@ z~)&2䵵mDMky߶a+7Fawec$"XemQtdE26ܭta`ZZ,җ3}~}5OɩNLJDRb,]7K.fE嚪,G@ 6XV}lW҆el!x7\>fG㫧R^۪RM+~^_|ǪBj?μ?WL'^K+i٨pNSa)SגLZ((⚕!2137Hc}NScN&8c }Ν"ks8MgѰfeOH@oa c#@%i .ey>*B!B!hEO3E?eԍӸ66,OY;Uoa,X^C^XwrM,f_e_ýE*(/]r~~EzŝwvsX q91%d\ͷy&n.®;޸v#^fG/}GyxMU!@Ř€1 6ݱIld_Iy6ݬM,k k;kۘ"C* K4 gFS?Hs]\L˹}s#Tv}N.۬[Z>BJ}9ACQ]xVi+u\~}ԏFVyI.U֟L-jtyymiO>1j IDAT:WO?n>9770!kǔїZ>w:G/hvl5*|NXeS~]},FsDwzkL4U4+€hY<庒~F儼q\m8S;]\jS q#|ݭ:pڥO-җMGt0=?p y}6܊mњ8* NY7OV BŰpBqc_^?#oxQ?sStq/4O_(ܱcG+/ZS ]Q2wLLF TX\!oM[TC帻X۲4zj~`Rw|s|T΂;^.zz~Dk_0{ńX]{o@kՔ_Hri/~k@'!Э'n@F [3{<q FQQQQM>_]]-mvY,q\2cZ#QEEf`hq׫*'ofnRrdXd6{\#z^'..WvBGwm:|p^W|!ٞUxxx93"##{yn<!4tP*w:,4 2Oz;, SUUUWSS#)>v4ǣV tŀ,T==;67]tG! =çVMMM{鵤Z5_djtX٥O۞]tPqqqn{N @Ojf5te4e4e6exrTSS{_t~uf2d0vvގ \u_d;=rnkƌ4hezU]]b8qBYYY]|@OѮh4*,,2L njSYwx[zrdZy<ppd25fkU49sB)q ~wGQ#o߾xLҥK5p@I#'g͛C֓vm =M&""":@AaaaX,MZ,Y,LF Ak4fQs m =y?ҜbQSb5h4jŁ?I>8##mKTUU'NF*-- ~}ҥK~z]~F VZr/rZz<^ F,K)d6FQ 5LX,x eZe[~t8O?kKf0l[+** ZwfsPCڶtK.*++Siiz/BÇc2)#ǰ~zرC/֩S4fIҊ+tn7MۮBhY-}|umذAFQ_5~F۷odە{VsС-.۪?o-~GlAf7|l- VUuore~zB̘1!4RPPL*|r8r:*..uiBy&#""ԫW/9V}_ݫ81BO?֮]璤_߿|>oꭷSO=Tx@(Tέ5`ZZv %I1114X5a_FVhGNM>o5g[eaB=^~W?68JLLldȐ!T@7zvUZZWLՅ Bի*--R߹]VO?$gц !?j֬YǏ]t:zN?c.+ӳ5sz'_懇`Numtϖ8F{U?1 Xm -o_[vC1:.];Cx]re˗/G+BsIii***tMH.YJ{ڵk]TRR>H{o{***Jrrr޶gKC` 0@}ileff*==])))4翿^CpW|zV :ek,lkMme6}Y7impؖa^Sg{Cˎ mvd䝈г_|QgnԩS=T^޽{O>ݻ"##իW.Yލ7*==]/_V^^_Rk׮͛7(n{ڶ=:_>ǎܨPvv8xp@sÆ tR|F{6=Jغck{%8[цZS$;]~][lѼy4gΜ&Mŋk.SQ=hlV^ԿEGGkJJJҰaÔ*::ZW^d6wk׮?^ﱯ|+ھ}&O,ժK駟ȑ#[mOϖ?m%Iyyyڸqc^3gTZZf̘x4XhƥKaÆf *!ll.OC[" WyyyY,V@klXhoԮ0BϦgwnuQIҥKTQQ~XvRFFC vl6jj8*--U^Ç+66VIIIr8䳮15֮]<֧OIy6׋?lnX[)IUVVdIRJJJ`X[I*++Ӿ}hgVjzA%}]zPM&L&SBjEFF8ϥ76+ՆxM y5z{\C=zTA? '@d0dX _W_$ܪc_7OHƍԩSUUUUWg]NSRqqq}:/uJZ YݮʠzUYY @}hfkh0膺bSkl~m; 3FY@٫W/իN6zzsu>ogSJJJRjjRSS޹Dim=vdj6e6["z<jbTnot;Vͽ=cȇ[ڶ}&Le˖i*++Ӄ>(O>aM&=p8tsN6''칽=n233۴nՆ 4yd 6LѲZx'bhFz=C:Y,v7P{?mƌe˖)33S~!,\P555 X,353]1wNُÇua tBϦ~b6MԎ;x^IpUVVlPš`XTSS7ª*EDD.`|>cm-_\tL&-ZH/^vJ;zT]G6N+O"ZҮj)OL&"##UYYosnos)bRxxx׭Quuu <ۮRo}߾}ҵkר(n,99s>Ӯd2Wf4TuuuH t/I(.rԨBaaaA p8 fs͝m&11Q_WZv*,,t~~}Ac4nhBzIv=dSPG#`0jfz8&ݮ񨢢BEEfA9<jjjr<)ӋnuҥV |ePqD^^Ѓ\x10ZC .t*-- N=Oȶ|>NN-GMMM>ϧ2Z]Tbu*++]#"""pHu =͛7@ջw@F [#D[jVZbRxl_E!\ӒJ9=;X߾}_~ fVeenܸ˗/ٳ*++?Kгg.ɯG|;"Fn_o["o\oǔ?>}j5z&%%)11Q8qΝug՞={tQ͞=[.]RnnnǾ 3fLFZZݻ4w\:uJ{UEEE-55UCw|!3]]3C>(}`+mf 4lB2~{kԩS UVVҥKr:K.R4~x 0@JMMUaaBZz衇Էo7bOEEE*((Ѝ7T]]l6(>>^ɲl;v直('{Ȑ!Z|>;z6%::Z<$IPqq1T3e6:n,}6z4|ueggúu$ܹs:w\`?ׯWLL$iȑZt^y啐i̘1ze2TYY#Gĉ-E{f͚XBoF{|.ZH&Iׯ_Wee,X ٬޽{sX^aaadž!Ch͚5 exx=rѻsG~A/_}Fn˯)]CwZ:@n\T~k}|lyNGQMQmb$:U߼Kߗ[gVM'ɞP/~ 7ܗi_֋ɖ/}O_/E7UO?ݮy >=Wkj8g޵kׂ[{E'z'GJ֏Q} 8|Z1ܫ3k=nG*eK;++o{?ߪX}m-o%[ѭ$a+F>&FO_TJǫJzGZޠkӟ7?==rŨ<8F:J_j‰W??ޣVI5<+hW]u۞k֬QMMM׿|֬YDEEi…2 '|)ZpRSSzfggPxjj}Yeff*;;]cH=</d2Q`С/ԕ_ ťS-eD'z%rk:54R9Trn\&^_5Pd=))i9kYu$qrZؿA9IkN~ҕޢsoޯ}[:rG о;]Kd&=_y|땩_v%C U՝ ۶Ul3&&FGVbb'&Ir:~u̙0m&M$ݮ˗/k߾}Vnn9KCԩS BOI8pVXѮ3L!g36rUl$Ճulo6iƻ|Oo}*,tw>*$KW_JuC1}[*4jƕA_ND8Z?~{^:}^zN*5}y /O2wZ7oƏ/$fY:tf͚zr Ç$;vL>]۲kT^^3 XO@=C=)S SXXÇs@IrxEZ~,Jj֥rHuD^O2b n[T~u~̯t}:_[?Mz\U{B_#Ͽy[WY1},Upjm)csO!J4۳zgt=4x68XQs~@OP$UUU{[ƍ$]pJMMm4ڵkHٔF@֮]:tTBBB'$$:$"-\t`i3טi5/xA+FjB|= J;S~U\I~qC:(R}k>G}ҕ[h$4*!FaJ:k9~[9ÚgؾJgvl<[_>zemJ:WZ}.<-$8kmxB7@c3>wz[+#澩Y E\}~ 6}WGjym7ƎVYQ+g西{~4Bɤ+ xi=o: E ypŋ4iMH%&&Ay;yd|ڶm*++];Ϻ}M=y$)==]E/VDD=ՙo\~Toٮ޺kʽ]xBy-.C9mzcΔ@t C`EF.y S '$_o _7^+;5D5F6vZNl&>{^36(.#GPA Д)S;vKv~NpNIfEXeԑE9:y ~G֡3~N35{u;kU_ܡW_ڡWoRI\o{/*j7m{mV/BC}K?^tQY>Judb5/Ev!ѣ &ڵk^W}Y  >\ӧOW||nmۦ)_|M6=v옦M&I3gõ{vKڒ=c*(( ˸o^gԢP#v|g?єe-O8~iַɕ*DG0O]Tv-zT_?:JrĝgLn&ɵ.쉮66{O>DW~~f̘ٳgw̎Mz?COヲ]-aEjCO@lP);w&LuCJo`0(..N*//URR")I Wll dI^[(uܳg***4o< M2Eڶmo繠_Pk{ޡݑ'VO=>@ݺ={4h ŵjWjϞ=!-˕+WT]]-á@)IOL&FmܸQ.W X{$dک:|%|RGI+ÿ΅壣ZN{Iq VtMKt0oR_핶VMA-5&Rtn'l'ug~<|s2LC*##Ceee:|pDŽ0zNOө7|SӸqd4]ĉڽ{wzNɓu%&&****PN"өJ]~]EEERIIoNRyyyl6 :TO>yUUU*] ^<^I*ѹ};uNkISVMIe?Q|*%pMC띭jMtTU]7Ϟ?;ʕ+jCN)I&I:uꔊ8rc8p@FҐ!Cԯ_?s8vuYvXY222$IGQAAT^^5kvz[oG}T኏SO=~[eee!ߟfSrrbbb$InR~~~5**JSJJ$)77W{wCDXbևz_$# r8tbSפadۦGG fG.LipUӺJD$&R'yG?`0j =һwoҪ9=C3ÒLzN:x`^n[vҮ]j+l7MWcƌɓ'tc(**ڵkկ~U}Qll}Y;vZctZllbbbtرQQQ+YV)--MZf;.:2\zV\yG5ehzq]jʋUsڴ&OmޤSAAWōK?N+je{p>(C' k)烺誟=9>Oe誟?D] 0@ϯ(//O4h  >vׂ?sڶm[w0`/^_vuM6[RmGU||"##O._}$'' <LJNNٳg%I͓jՆ _>=䓚7o6mtۧK.qu˸O\+י~3Qgv%6HDglmj@ǷԿ{ݤڥϞ9gZp-Dtfm]2eUVV %&&JhOЦDDDЄwVI&i֭2dUQQ:U~~&MWJmEee6lؠ˗+99Yv]?6oެϷ{!m[z.%%E999S (99C}Æ ]X@]p>(u 2rl޼Y}є)SOVVVV?lݎd04fIRaaak,^W7oVUU&M$yCӭ[s7o Sjj Tox۔8k)((@vzj}VRRrsszjn*zT{iǎ<ϟO*S2e͛7O^W֭k]ӕL8p] vgtJgyF6M/^T;ڵk>^_W믫4d1L3fp9ݼySYYY:y򤪪D\c]*F,$INN… իWzGDD(""B 9sۧ#GшШvu{lvOiq9#[P ۔p-X@]r2 S,ϟl=ܣ ZV͛7OqqqzyQAVQQnwǛl6k|r蒕%KԩSںukaMMЅmF|k>=<щ'tn+11Q&MRDD$iر*++'|+ յ0!!AO=To7|=SJHHYak^{MZFQZdnݪ={4hfffڶm[77NRm ٔ'N]2LR7n@~z!շo_^fΜ?2SJJOZZ(--M)))WII_xnڿG9sHҵ- }ҥVvvk֬Yڻw"""裏n֍7Zƭ[k׮uz򗿬ɓ'+&&FڵkڴiS{;VcǎTsM֏օ 4|p`0j… !9ݺuKXd UJJJZd&&&ˊT;r\LL*++%.sW\QJJ """TZZ*_P۞>}Z3fPdd.\"ݼySRm{ Jٳ!+UB2'hӘcܹs:r|>_.^;۵|@yK.rڽaÆn8q@ri߾}Zlh46wBOá-[fӲe˴nݺ=L&~al6|>mݺ5m6Tj֬YUqqۧVonڜ2 [niݺuz { 9rDǏپlͬ,y^FvEGG~HA)IO%I;w"Ν;W$:tޜ\ZbeX+VYv*..Ͱa̗YPPSNt?u-//jۭFHAPBB4yd+++Kɓ'K\}MLOOoӧ7ڣ0==SSS#H{9mܸ۾}o{֢E?{コ{Կt*<<\dق^NW-iUzyf= Ӓ%KoꡇTJ{! jj.N޷bKix@FcmYѨx г_~zꩧ]fݺu*..o2lٲ@x!}g5w\UTT<B!C/hq͵&]L~$nֲe&IڱcG͹TPT0xƍVm-n3y:'L QFu/I*,,TFF!ewܩtu'jΌ3USSë۲ٳguqI w+IǏٳg;MŹV=9=oG=eFau9b{r$áM6vhc;y\.$)22R˖-k6$NOOWJJJѣGys[WܹsZX;w6C3;;[fΜ}ƍڿ.^v Fg\+Vл|veZ3ek ߶m[ YhΜ9j}gy<  0@3fPZZZusssyCτ͜93Л@QTTT`(GV僙%u5k&N(ۭ_n4pַ%٬?O3szJ5S|> C  Ç… :нWy<UVVn7:mDD.]͛7w|辚 =GK ߒ5tP\zjc=F߼yΝ;׮ *g̘!(өӧ7jf̙3g(϶ &l [XX={tqoٲEeee:ujM&,_]]/vhnhӧ%Icƌd 駟`0hʔ)F_hp8\غ=4f ڜAl{zv=g̘!Iƍ;||>ٳGO֌3R/@||>/GQjj;|^@d虚u˖-:$)''G˗/$ݼyS|JII v]:uT lIG o{-8qB| |-Z+MIIIɁŁׯ_oqln{%zΚ5K&LЌ34dȐnPn6nX/;v.\ȫ.r:lZtN:%ѣGKۼ|[ԝ=ew3T{a۳fR~~~lG #F|+.dp8àL&?^ƍd Cn3T==kjjۭ?<{N>S^wf;wNׯWNN\.\..^^sq;3C۷TvkӦMzWuA^w9}T6qqqSxxxzj\@в4sV4IRUUk׮Iw]TYYǏkT6Gׯ_`JǡCH=tk5BO'n@F [3gh-^X _|۷KԩSݻwsUVkӧOkM>/Swv/Vaa6nܨqiɒ%ڰa?S$|>ٳCW7o.scbb,ݺu[6[n?l6ݫ]e\.=zTgϾT7iӦvk߾}w ~ԻwoM8QWvzڲeN-Gw;EBBM8QUTTtGfڴi;wnzyם8p@/h9V\hW}v=j۶mw@bbbduA 4H]|YE1116ksXv\Z|Vkz***z;رcztY^hTCرczW=nr-ݻWR377W+O3xJhԠAHt:uV-X@RФ68q&Lo߾JNNhuej;|߮N裏4`1 f)2e L&Իwo7i:%􌎎VYYY]\pAJ vx`vp}_o6pw0vp3&pƍcBЎ0}t?7Nf'xժUɗ%@PZzNUTTfRzz$i>}ze\vsL>]G5tZ`*{&Iѱc=8qBǏdĉ <%iС:thLzzfv'}> CamZAok04i$IRUUFݹs%I{VyiѲZm.lMMMU5t k ȑ#ջwoIRQQTYY`mٲE< IDATv.\( (%%%`5:$ɤOTPP˗/7XnРAJLLlv+!!A'(333jժvo VTTy5M='O,Iq\.M7o*??_׮]SUU$jv+;;[ÇװaÔNnnJJJ\Ly^͙3G?{wT՝u7EEQ$-FFnQ'1tL뚮]SU؝b6Q(JD ("vaqGp/GU v|9vM'Npg֬Yг}Ν(8qbO8ۼy?齘>}L-X@*􌎎رcUUUEDDH՟6Md4ˍQ``ܹ ѧOdtaQxxxZ|ݻӧOK'zWgjjVvޭrEFFjĉwv>Tqqqշ~;sr8qBFyyywO\xQgK/T;n8L&YF|'3$$DqqqoT^^.AYZZ0)((H^^^2jjjRSS޽۷okرZj"""tƍAm@BKO:q,L"ooFf@[gjju9m +UZZڠ͛7j-[8O?aj7|oiܹs5o<:tHӢEenݺ `1vOǏמ={d}CiԨQ sم+--Miiiq7nc}@@wuܣ;((H.kgNNrrr+Wv:\pi&1c8E_}y߿%RWW'yX̙3u[vV-99+VãOǵd2FTUU"Rْ5kxuzzxxf Zu-( eӧ%9gΜѣG?}4n8J~mˎy+']QQQ:uԠ̙3siӧeKs JKKUZZJG`2,++S[[۠La@s̙32 Tff;q e < =\rBW}ڇB 2F2BO#'F `D#0z#?O派\ΐ'?ƌӫ}٣\:`uzn޼_e4PTT$U__k׮}Z|"##UZZ}}gΜTIҩStܹAmK'IjjjΝ;uuZtJ͛7UVU555RQQр>JϡbteM:Uk4rJ}Gnk+I˵}v /8,Yr KׯWVVN>͛ ra:"/.bbK .p111JMMծ]NlkkS~~M&nԚ5km6544mcǎ~q QѨ (44TPkk렶g֭:2vlg;vlg;پd TU /wqۚf%$$(##C ʕ+e6uM}n >Ν_SNh۷o룏>{kJOI*..ѣGaǺ_}Ѡӧ&Ir劾+555x\mm__m Sqq|||}=[nզM4ׇIa 5gΜ>{I%$$HMo]РS4Q ~fLGvL+`}׵{nl6Ǻ6㏒T}ͮ+,MNNv|~TT֬Yt)Sstҥ^ '_hΜ9ڷoڪb۷OsΕ㿔СCW}}:۷o]ڳg4a {O7np[ok_͛7J7`L;vPKK#ho~!t4o~Xn,_ %=vڵڱcZ[[5}tsi޽{WW\ф z7>>^K.Ձ\ҖJhŎߛu)33S$)11Q/m6UUU zrssUQQիW+88Xo٣.ill5|gP4_Qӟ~{BOX&+?'֦ ]xQtNՖ.)ӷڳgf%=(e]j]ES>~H;@)))v?tUiiz@"##뫆fi>}$硏+<;~c<<=e$//yܿ&<~Uz655O>ѽ{:ڃτ?vKfb;jŋ~f]VAAA1 C]w\Gb &&ʳHw =n+=;kg4; <=Z.A .Գ>8q}rssqFyyyu֬Y?PMMM!IHHЅ zvDyRe~XV~F {)lpŧ,YmgoUVVjǎZ{BCCfLnl6?oN?ݩuyJ222zd;wTQQ/:x蒒ٳG=;n8ꫯ5Cb6knz_5a„^WL&ZC%///*33ӱ.??_!!!;wn'''WCBB ݻ~@q4`'`3f(99YvRmm^/BWS9>_yM<]xQ_}ՠ%--Mڿl6$:~{Pd0njjj뼵2 @+VPtt$魷ݻuu]zUvҪUz=dRJJKC~7ء$M:U/:bEFF͛ݽ{i $Cu$͚5KZn:sĉzX+++KOVQQvϫW$X|Xː{͛{'!!A6lnҥK:uꔪs*))Hg-]TSNoQ+oyǝ;wN/l27N!2 +Wx:0;I>m[[['~H-]tXҥK:z-Z֢E777W5c 5jTǵh׮]̔nה)S+ 7]Wznٲf_EFQ:Ti'=R >}~8ݮӧO\[o@=I֦Vyzz*))IW\QKK~]{?Kկ$InNϱcvXp۹[[[~ݼySK.Uzz&LЫc\m۶i u{mݺÌ7;544Su}TxJ999ڿCQ)Inr{rssu-\RSLqUڻwRrrrx/,**RQQQ?/j2 >nŋڎf8qbڻw̏}믹/<tC㟵 ?tcH=hF4BO#'f~.h4Ӕ)S$Iʒfi,X3fٳ:v'O\9|:$IOL?iGæ>L&f͚E N=hT@@BCC3g:3uT!!!5k奶6ݽ{W?rrr>irLJJ92ldxԈ =M&̙tIYVIFp:Ntж JOObqX,Tddf͚{mINNrɓuQvp{" WՍ_,Yǀaŝ[7olٲs=ٳgwXwmjئ&UTTb(<<ö3gѣ.Hk=V+ZV{.^vZ~}l߾]}3ӕ0ǧWoݺ6mҸ_' zT%*,,Ԝ9s|ɓ' IjllTll%ߺ$AϟŋСCpw9B=Zׯ_$ƎJϩS:{4ZREE***tUVVV҃nϟҥK.+d2iŊd2i̙ڷoжovǔ! =goӦMN B ~w1ZT^^rUWW;fSffL"tx۩S*((i7teI&I E]pAv]/c]RR<R3gΝ'Nvxbv*=!`0P0eT fŋ}T~~*++ <v}IK_3f8///u9;wN۶msKeeRR<<<mmmŋ;.{Fjʔ)z5o< LO. ECϪ*ڵ'/TYY9(=v.\XpBmnn$y{{sǍPW^P]֦O>D۷o]֞˗/_VSSZZZtҥwL&EEEi̙1c^xEEExl!xѫгI;vPSSSOҢ?QY9VU'Ot,w.ϮϟTOiii?~+􌌌̙3n2/htOum6rssSkkkF=m pNUrr;Rxxzv… s A'04||XxbUWW;=7oO{gwƍE:Gs찮=|||(Iɓ5w\]ƍ7ߨ}<<<:4/:p5~fPG%=(3\'\zS9´j*I+ݮ\k̙;vvv]iii4no ڶ?RRR%ZСC×/;9/L_|QEmmm:t.\_~~5sNS+t)kXa]]]/~@rRR<8EmllvkݼOU&g4U~zXBEڶm[g۷o믿VCCӶ m+sgRR<<<:ssrr:,wzUPPЫ*..ֻᆱ7osj6lw*((H6m`xQgVVF&O,i`z;:f!F}ᇼш =m69#G 6zS3 5:a,0F2BO#SrU IDAT'F `DU'??A[F bbb =  ˖Ҕ)S4uTyxx b4W^ˇEk֬~3`3wq̘1۷oKuy̝;W͓$͟?_vRii Ѩ %%%i޽n?{+Iڰa>Cˇ744Tsј1c իm χOIի32LZjT^^\Sׯw 6 ys(##Caaa]zuHW_}UQQQb]vm}љp]V~GEs̙3;v]{Qkk+o2ݿ -6|g%IْYf 겻̚5KSqq=?ӟYjjjUX;vm׆ Zx>Ѳe-;;[eeeni֭[i0L{hew2e.\(IJHHPLL>UTT(;;[/^^PaYPPO{xtF=sJMMt{II֞M69# =+2-[&I*.KBB^z<<REE2lkkΝ;U__ I=Tz_^K^{A]v0]Vf566?_d:]~nM1;lVzz<<<:///ב#G\~GOܢQCC$)77WѣGeGK,rfs i[Gvy5`hjTRR{u{\ii,Xh„ :u'NJN{0/X@ӦMs`kϞ=tҐCw &l6+88X+VP@@>溽M˓juVZZFL&$O~zgϞUcc,YիWjC<111JMMttǙ3g* iٳ{ YteM>^ժض6}WZtfK<`NmX[\9r~!PŢ3ft-((hoxuu k׮n9jb*UWW][y$6:&&FN;!mی3鶞qu;wΝ;n=gߧ7h o `D#0z=h~d6+99Y111:vjkkG܅W_Uxx$i߾}pO0)4 >}͛'///uk9w\bl=zTAAAZnF%I:tx5JK.Uttjkkuy;wNmmm!zzhŊ$8qB7otkcgϞ-ԯctIEDDhݺu$;vLΝꞸbhڵ $k…zꩧ_e6z4eIRnndx3kO#𬪪ҩSNɓ'5vXmذxfggԩSX,Zf<3aAAAZ~}j{cJMMVOzІ X-:".z0,,L<==%I?=*tܞ`24|GEc^^;斊FMSss|YfX???%%%)//%mKIIqZ7uTefffz =.]ڡx∸7oj߾} ϓ'O*++KfY\ڎv.”a]EE eOǧi`ch[Oo:sܸqvEEEjkkVWW;wOׯl68 .OWرc]ޖG}+CO;fʳݭ[\Ƽap:x}xN'nqNxLtz.Xi]qq񰿨6}jiio@577kݺzFW_}UCFݵ"""d4ӦM_QYYرcveee֭[i0Luz+**i++fRٺ}*ܹSJLLԊ+v EECwjjjd42ݽ{JT0_gkZ4i222oݻwmpӦMNB.CG炔T "??_Vcc$iܹ4io>l6-\WC,3FǏnիWuqP`0뒤S* @4j(R.mcLLL7NѺqㆤ.Yccccuy`tzFDD8juZo0zjd29mokkSaav풋qㆾK5JK,"##ua}WFFF`Z論VWWQF9+++UWW'ɤ4I[n)44Tҵk״~zӎb(ި(\iU矫TK.Oz]gPPӺN5JHH$fs%++KA!kjj+$$Dz; ;#""\&v횦N*$ਞ ۷yp]ǃSSS5o|X9sϟfZuQIҘ1c}}}h"-RYYcRWhnnVAAcy VKKZٳG1cjjjUWSL&?^yyyjU cǎ222av|?]mmm:gϞctBYsp Ţf566ԩSlQIIy YFFF꥗^t`kڻwҞ-ժw8oPۓ'˒ԡ7bccsebY[[lhhgOZ__?iƍ:uJJJ"@?^JLLtHWWHH4nܸn ՛o7nHUUU.vҤINS&NfGgNN jxxURR"ݮ%Kب(Y,&< =oݺy{{wX}|t̘1ZrеkhG0e04{NF=SCjjuڷj;:lVTT]ƛ `8@'@?uz+%%:ooo vI>֭[jjubwXٜ>s NyO?cЮ-22RaaaS__ .tYiZ}$ׄ҃!n =<-K]׮]SQQ%Iuuu/ǝ8qBgvfwZZZt)u /bIz0Lŋv###{';;[mmmX,PkTXXm7|OC< =v+ Ց#GTRRa;wv_IҩS\NJPPf͚'OJ4j(퍍&IjmmUAA$j555FQ˖-Qcc#o3f+44U]]^Bn… 5m4ٳguĉ>}Ɯ9s:srrߺgϞ/Jf͚֛\PP v:O$}w*ԇ`ͽ>ƌڇ|2o3uuu$N5ȼ"///yyyY}N͎ϙ:u;l6k…Ztf[orkk>ݻwi[ee!}4gΜ~˛ 1}s<>͛4iROx5ԩS3g߿_U//ǧϕٲe˰z3(%%E~~~y>>^庺:P6<<\ъQDD/ 00P-Ziٕ &>,nD@@^~erQQyB 87oٳg>>>ڸq"##}|zz,ːɤty{{Kjkkgvad2 t03s񊏏w,'&&j„ nk|RRJNNқ+**Jdٴ{nݿsYxz-f:`wza8v˗/o&7zP>gҤIp /,,LQQQ vTYYRUTT(!!A3gt7ߨ'szg秿/tAtSCW^yE~~~NrJ}'.͛jiӦ)**Ju ڪFݻwOwՔ)S]tIΝ撓_0^Ϯ7hV(W`u=66VSVVK3SRm(--Q(OOOyzz*((Hƍs Z]]}DS[nM }3!!Aiii=7gM8ѥ96mP뫴4ux>=:pZZZx"M6uXq0L) +/_^U^^>Ϩ<ӊoWTTO?t0ѧ_GԒ%K\/B[nG]zi@CSI رc{U(EFF*4qNӟDg =#""4a„> !!A.]vҥKUUUھ}UWW/Rmmm)..px s;|{=Cס}a{}v5448ֵwtUU{ 鄚ҋ((GEX_,#*##vbATD((BJB {$&!fk8exfcWJJJ}c IP@@od6|I![MOl޼I_$,,Dhh &Z/''GsUNN\]]e/Xem޼Y<wwwT[0*OOOHjz6i/r*mOOz?ƌSmb0j[:{Y@xy(kt{zznWپB͛7O 6|ҡgbbiСCZm5|͛7 ozC:u* <+s82MSm۶!ag=zj߿_Cx 8q$bcg=?P;wf+Ƕ 3=B-\BQ#7q'гyT5ssĉh\v.=k,o{W^8K=k`ժU5:oΜ9 8=O`ʔ)5>wРA 8=u'P#l!!!gTc3yxx8pFmsrrr9>>>8xy& WUEs$i̙*))iЅٷoim۶ed鉗^zJJJTRRAjҤI=-YiμcfzN<ڴi}VylҤID˖-kϞ=> VfR*=RY`ZYn4~x9}|Cj޼9O8qbΛ2e hlŻux@6M|򉲲Nʁg=dFsLTaaN8xA5 FPY n=x`r-Y";5\5ko>#u]\sM9cƌј1cʶ s=|s{Q6 sP?kܹ={bcc)@#uV߿%I-R\\n ̘1vW]uUV۴isг_~z7]all:vxܾ{|…2MS_}V\[n{5S+Lޯ3bʔ)e =´`mڴF[aaa=ԩU_| C˗p -jW\!IJLL Hg۶mzw~ 0KjȐ!*~S.][֨H!p4 UZ:Ȩu,X}骫Rxx#AkxnWBB-Zm۶VW3O]PuTm۶lp8{Fm׮]uCrr2Eh`LϽ{jRe ߋ@bD0 ޲P'zTt}}M< lrUh6buk(wgR99MEzvԏoGm͔CO>M35 jD >q&jS4kkVI=4n~CMhէe<-*Hhxk!rޫ_H$OWpͺC+y[Tppz7} i$/[2b7hdz@Qsy;OO04[jӻzI:ɚt*{f6,HVǫ΋uTڮzKk#$93US\S ??nRͶ,9%ɑoӮKVnL;R@>mRN]4lʀtڰY֎}t*Zlo+$7N[dȥ YBdh_>xU;.I2UMΐ#Ͱvrz!:.~w\rQd\qFyNt=JN:.""}uCWT2 5rjJP^n|^ 3K E3-7CMI)KH\سLス^nwzj.1ztkwϦZhKY9E laIE+5h)Egn]Yӽeb(c{zm ]{wvqnPgR6R"_NVг87WŦ OC6%5XwW6;י^z򶶞5o|rsTdJAe}I ,Qn-bs 7"+;W\%>2U7{uk&oqd95!NHP$%:MR~Neef4B"CNNӱvS΃tm +Vj }ݮ.膮[l9-Mdܮ}cYBBdrf+%+[vT9J$Iim_Uzjwz>iv\<"~̕M妧la S3PӫTe *ffٕ|S[]O\ҝja9Ѥn{$gKvi{?ԢV?ھh|dOw@͜?7.s(YM]FfFRi׶.7.9窙H;6nW,GY(!ǐ_VGdtJr_TPS"IGjJ-zަ>_9EZb֯)he\hJuRKK6٬,6 U,SrƍyкBjm(T.UҬG~^̼AîH]ZOI6ZVktW=ok9STzm G Յ\(=>UUsu庮tW))I^={*`,YCs_!sM۾qul-Sf(/p&~ol}Vƿ^ٕ_uIR^^l٢AQp@8IG2 %P 8Kx@#LOZ}Cϧ 3=k5[uwXp,8x@CUeYxz{{`.-g@I{M4@Uz4$oooUz(}/B IcKD'JÏ*OIEz̗u'*<<,Q>>r/<48%*쨻Z_8PZxE8U7P>jKn6SEwPӰww*/O~,ҿ{j03Q1[7+:P5 ׹̜FʹW:1%!ꞿFG)ɵu?]!M?Kk T~}tNk:vR\\4:Xp>Ov^JNUᡠv-]bSaȐdX,ZO<aYlΚ4]C]V%%o7p(O߽aOX Pf7&{"w.CMo~]3.XQW&h9nW-G"WGFkPڨ○Y \/ kgzp$k;/uOЗoLrdJro3Xd?FN}]Jnm5۩3C>'ٯnֱh~zc,%򲔕aWVFLޭOF]u |S.ǔr^y F+$V55D֬47\.mt+ꘗ4ãNWzAkh]=Tl%A|8Ej@Q߰je٩NOu7+Y:kӣ ]mO?r#.eT=yIeF'Xu^iVL$9.T;5ztt%DMY&~j( ]dJHFArשؑ_JW;m5k|ktd5ךuα忏LmpT~d$9$KZhOґfjm)SMlBzB>9^1wK?j}uc=_ʸ=ꕟaV|׹d[91Srf)/LcqǩkvtieSqoi-3W)B }t}9d3M?8@iNᒲNT(Yk0$37G$׀@\ɋ<21T[y?nIZYoGjIdNFCIVYa1\*4S~V2O)fzOЧ,V);uU18ǎS'hnlM1zp֙UbR]Z!MEv~$ikf$<ڋ\LMSaR1aumvV/ibϭ_hoqkm{ %k܎*ru.RYcȽSkt^h8zHVy5s˒5k4س\ֿ]eM,h`7ePQzrȱPwe.8C2#!gf*"8mEh_.9M8BW*qk U旵.PE|}$eW<83SM\zʺWsF=Gi;DP~a%%U,)&X3,LUJ]JIV⧋tw[K֕ :?1KY>HI#||yXtdXe:}:]tKR)Y\j+Qf)P#Lv瑗=ڑgUmUu>1s3eKU龆Ph"-+zSoPFKSu\m%I=?:׷RqZGnCrTrq]pI;[ioZ׶X$3z^CiUGveiN)ڟ}*)rS,U \■do 4KݣaʗڙFns+7I A_nUK(w6?iڿCiOjܶVԑ( 4p3(Vu]Ojtx>v騐b9vN&~{ݧV4 z3||cd}wkȈ~Zf^wH.jxϱU2M%׬UNڻpN7 QUwseҤI'O\} '=___ >\_}222(p*lɟrWM[צh~Sv}x5;@C)Z|ȔI_̌ҳcgsz\`>x B k*{~%7GUI;kn:28Uq|׽}-Rjrgr% 1lW~}GjE^#KQV)vIEjCcڧr?e_om)}_O.{,Wq?37E8OwcKeUJJ$3cR22u˔/u*IՁ]5S)t0=8vKuuޢ KmF^[^+~Rs:M{䐫wn-{U:5k?ǭG?bҲ:7U_kpi+_Z%IjC tZ&fs94fzuٳ=vwTc}$OOZ]pڴibbb>Ϊ`Iƍ eddhegJJ%KHÕ,I Qlll"##V@pBӮRLLW֭S%IU[$IINUWG Q^^1H>Pu}DIN[H9*0v5{??^M׫׺7I [Zު7^=Ө%I^^^j7xN3'77웞U\uIґ [cǎUPPLӔƌ#___Ԗn}+}31u\.3U0] zz@fOHycr}"@H>FYnKgi5'PgUY R,ozkر Vjjf͚#F(44Tƍ_ Ԇc>{>XJ{Jjtk=w[cBmx LSNC,):oHU;ٳOxJu.4' eBBBӧ+;;[3fиqBqt)<} ʏߧ|,K65 <zںik*>I h'o11q yyyz) @#szBOkW~]EDy~~ZOnG*vzh]9 rm_(*)w_tzn֏Kw[AWS]'Wת/56iڳv!幷ՐO ٠ 핃4h'|m*QnCٿ%4s9tɃ/ ifizk%: y3Xcݬ [{)/᠊?$=`9a:7zkUZkFϿq~~uf/^Eqd[2%YT,@bcļ(}ѩSn{Fil#[噒mU=Q!Oݣ7荨-=15At}ku-nѝv͇׮_ <;KK {eF(;CMZ"@uN_iꪉ&wOwZrwHC9/6 ka_oUoQ %2WPQ\,WS pQTtL?g?y~hS7'T@N_ifhͻ4w.YU`\yQe˖4M]V6MaT˿[@mwu1#D[|)##C3fPvv6΂\ r(jժFxW...}GGQAA# :JrEQQZ@ī׋bWBĂ(TP @$$d@%$~!;<|g{|ᅲiZjU𫯾zlϲnɓշo_͞=[(x|[R\Zn-ͦ9sx 饗^NExWK, Dɓ'SoV3= ƌSO\ ˖-SLLLΝ;r^ZK.P_[yJ>^p^,@yF \#Pz(=k5%-"""*5BO(ǒ)5jp8d5} npztr)77Wi^6'@Ə\|PW}斾z@9Pi5""8'\ @ L``Y~.\X.۞[xxxSe3! K/Uŋ0aϫoNNF'P eaӧj׮]>IIIꫯxվkԨQr:7x=, -+]Yj$Tz^y>KH=T8@)DEEL gϞj֬Y׷1c\.ש k4hʝo}]f7''G2 Ci.7 C Y,)4vr|Ӡ7jIyommϿif9' MVwͲUOeѣKرcXF Ă `JφaUV2MS6mbv5ey{{룏>c=VVlVA|}|y%I6?[{08_-̳6B;1ZM dh`P2FnlUM~TYEy[}3喡jݞk-ꙷc[=tZ7 ;3m(Q\SCmG*azﳃ񮦆(+הM2׃iꪫKMm۶I&O\8SV tLkW5{!=14O/RGϖiV٬7.XzԮ];͜9u= СC\>ׯ… _oOKӔ_zoۢ/SPeA]QGL+t~χGU2~eY+[,ϵQ0^GbM_~s]+ԳϵJ}Y<}ۭ,uwGϧjQ\L#HzN*?3S7|_n z!u[MAR؊iS{vQ/*zU IDAT`f*wD䩬'O>cȑ#KX,Syr+;k.٦[m.pf%B?Wbyb:vFDDs]BO@ԿRqqqZxq t+㏃?fhսFrlԮnQdy:PMiw6}7iە⒎_oޠu(Grq4] SvI>NU'K;菿^Wy^-pᚙ_P ְ=)]+3ӕ_yھEnD~|y-KVu6=>8QNLUۆi]7fN_䝺7b>~͂pؖ7vdwRIUǝ/ӫa}TvIJJW_}%w!ѪB}ʗYdfjt[jMSe>DyPRΙwn WGַӾҪO* {1b::'%|Jt+uVV`Y$t)35ULe,_8n;Xz]Yq<$^FW/Px.TWPvė.>= ۷K:`aԇ mJ۳^>YCt:$S<_@ DϞ=լYo5c \m(`?ok{Um{GJؕul[i[RfSWQӴ+-L~Xk{u-qZxbC&UThM|C6Aߠяx?j}z'4m?,%H5/ UϿ(C!Ylg#?gm6w4}eꛥL.0խ(/W;t81Y־=ʁM62MS֭SLL,x>ٟ7tn?.4W]hYO[+vLl&rTӋ]*@ :T%~z-\EVMm^SX+2|S~?I'VWӷܠ C6TSSjytx?G/={o,L۱Q E3e[ͽj}}ZTP1=z@*ŋ tsڽ3Mܐ>4MnwYNNg1q(qLnY~c& [^-%-)U$g}>n>?cY>5=9F:u$-Ydt뭷SNQ Z5Q/:yɾ?ee*@%6vK aӧj׮]>IIIꫯ `ͽ[ԩg.K=X>/ׯ/I9111[ՠA~+PAS͚5+v]3f̐*/Z>>>>xKѣG5h ~;PA,X@ ,7x*BO@3rH(D MP.J7x#.r *oϞ=~'b  a($q+qTbIeP*;BOW>N} m˧(v:D!ŪSc=? P g:<ʉ ֵjբb:t0<,o' 8Γ_ּ{ByQ+UVqO N0ӴkR͟5I-t}˘ Pz ||u}LdN2\~FsL,Y/zqq띱Sr@E Tbbv)?8ԕw=٥o~v2"te:\GV~мԣGuEV }ڽ{%(`mUwݬq41!+egZǏ= ʨr^5.?]b(66Vs… ` >\[dc$*]~\q+4w6RAePQrJv˨Scf~aps@#F~^V P^7K'qoY-ZPVV~m}Zj/^zKϗa{vIH #WS$3!| =Pa*$_-zU>,oqݺʯQԯ }޼y7oڵkW:ڵ+\Bl6uI_%KhӦM-\;Jʑ㔷 Iá\ kD yt,~n階mty[G6WÔbX]U YޛZpwq͛gyFvS۵kWvwWyny睧,>|$iٲeOi[bZjkFsLO@d*m^y7i`Pp&IW'* #@AU\m6uodUDTo4oCA衺bgzvNOS=j5zxT?^E\<"~G;#4`ϕhe[GV|ɿ7(?Yh&ͥc鑁O䱆.oIo{npןPSU\`io_֠~`]fySfM^S:{YD񷏝G۾{aO['::PUXšZʦ{^%_kt[giJ;T%$ִ:T٠k#[:Us4QkʫM޺NfmTbaټE xjA_EGғE5*H(<0e47O> K*X3r*35MRhE9t(1E*6S5<ι~hRJ߳L+wTVhöm^΢OIX,x jc ];ιEU3'8tȑ(e\Ouâ,ѿ!HʱJ;״+99W!aI _^! iwTA jb׎CJj&ӾI}=93’nG޾9:r *LCt [yǟ~.e˖Kah…EW~}YV>|X =0p4eh{ֿ3^GMSs]KXCj)Txu_elI+=M١5-?QyNYM|j0yn[Ca+ɚy|Z͚2O} n?@CLzU[lju{p"<<ŶhѢsӽ{wIΝ;/ =Ϙ@]yף]viPAW.9L~6Y=1K 9N_~+F_T?+۽nz7˕J^փױ:j@*)ZyZU/ڥt3@OWif~bO]w:,a1dȐ%o/fSNGz4;%),,LSDD4c [V}j޼tR;'p◲aW]w>Vj?BûFh}@*Wr)UpeݵkƎ;jʙϐM3MOԣ7)TjiaS.x#ن>.*PfM)w>^|ǔ!@gg_wݎ/(IfhƵ)ؖcڴ)ӹMK~=C'2ٯ_?5iDԤIO|V]+THH4m4egg_xLHH$sFɑ$EDD(99Yyw}J,G}R'hъRPdS;mۖuԸqcI׻vKvڥZjQl@jڴio||| _\.=jҤF!I:pfϞ]}w]*Ȁ #1g \!5$*]qT}WJJJ[΢X8T$r8#C+oC9sv-өݻwkƌ BL3g{6QdI$,P'qzR5jNfΜyF{R=n/ҷ$MPFw ʆJ^^\.MvsʾbuPzRq8ͥ(=Qv&@ǵ.DJe钤prl׺ԩCVZP?Cu)'BO  /\P:u(00\~<= TӦM)R}zR5jNfΜI1PZNa &D'U~eJQ׺6oLqJRR֭[۷) =Q8斺_TTBp).ʲիs3zR2MBZ"D2}tIRxx8@Ƶ.d3BOefSuu)*QFtj̙׺fS  h+r}fzR˓(NifkT@ǵ.@EU~}y{{ri߾}r:3>BJnor_ܳ=y'*ӧK)ʵvi[k'Zu#jœ/ɱή]Nyvwbz@dS+;[jR+HVgXK6@O"͜UG6z+ϵ)çX-?ӄ%9ZZyo][G}K[iz1%&0@jͻs=?)n W{TC;S"h۸ 2uXGY?INт,|@][)?˴5͏ܭk)-~ٓ%5}tU!5uh"}uK^ԲA,)IZQ 'i[ yiȭ >К=s2vhYo} e0o6[Njh|dJY,RMlѦ~9m_}@X,5QٖVx[BI2ڲˡEt9,AU/_ui^yѿOOi]9Mϔ;eojw u}"j9kθi0ezܯ*m倎x|Ta ߣlO(#\ڷfɦ-NmV WXVڞdxZ+E֠۔.6.w*ÿ瞞(ʲg==]G{\nVWɐ[#:/ڜ`UX@y5V~;d6%g*aΖ\r󕟟/TzUHN㏉e\rW|k+6.( Btt4E@gda_C;P@`̜91Siy  ˦knSVGMlA'阻cڥ-{zյ6zhNe[Ey<*' oy\k+6.( BTTT?W|vE="{i꧃ =K_^ʶg5=uWau7r(ZL]%M/SxhkodH&բjjuBCTSIױy-vU?ƼֺFom9RP.xm) f VQȞ-ڕmfН5F] *SgjztK{LӁ+yҪM-VTPY.-wR-m[1\ns5iA#Duq6%2 {K#孬ױ̢,~U?r )m\P!+uRM߱k ePV$''jժʌɓ'+%%EժU?OլYSiii㏋C ɓuQtM2EE!PvMnE پ}{u޽u.]>#v}_a-7C UF -\Pi^6׬Y_|Q_|Q+WH BHH Ujǎ:x`oiyz/A3=u?Sw_Zxث֍웚T}s=ⴙxv3FܥW>xUwG\l3&;?Ӹ)+7@JQ#-SVhG缇|P!!!+99סCuUW.;wjҥK97  &Pe>f]bfjo}pNhlY?iK :HzEFGܒi:}f͚t-_ץKuY4f̘.KVkl{N>>>Xav/x)|Ql!H ,Mq*($X!LPZY:ZyKrb*i^tY5$y<͝;Wyyy罽z{{wuLtTgx (Tlǔ\]lA$#@ժ(s{ m'$j;pF:qs{Աc׋/־}.h֬Y#ͦ.]_<_f;c1crJ\* %"ߠ0$۾!ziatYu[ýZ1YuqjP-X5"T;(ܚ;M0Oy Q߫Rz=8!P(222aIҦM~+W,mjWXmma'P e'^^՜ p?VGvhIZlJکYgC}49kWo'6`*?qm<[&gQF HG%RVԣGeeeiΜ9JIIQllEd'T<@)DEEL 2' XSڸNNřZw>=eMϦ,aK ^m];CVUիrY>o x-/Ӿ}{Yׁ{vAcƌV0g̘1֭*0fz.:0VZ|{^ZԵkWIڵkd T`cǎjѢnӵm6mذoyիWbjj=ݪUjժK>iݧ>3:y'}x'r@F \#Pz(l*|9)I2 ۉ}*RM9/*Op)0'Ϩ*\eddP@g _s{[3=ٳպu217{yb'R*+gY GJ ::"ŗ?1 E @TTBPy{{+T͚5%IΝ;r(\!JW=zP.]xJn˵xb9NU>!Y2Ljepgz@%ҰaC˺뮻<%)00Pz+ R R^.]=HGP%!22RÇ222͛7+%%E-[cǎQ}YM8Q[lxOk iu@_ӕQo,MUV(7%0fq68BOZh#Fj*&&FfRnn:t^z4MmݺUsŋu]v1b&Lm۶QĿuګWSn@}]ղ V8դFp'TpuѰadZ5{l-X@ahȑ,\kUdd&LO>Dݻ 1c())b^ë5/㣶}W4PDMt,BmN[/ߝ{r%I^icԽe}h6l /~|꙯>nlR]Y:^gMT3ТsҰv z|^]CcqZ]r\1DWսooR5q]zeu i{+C-wumOb's♞P'VZ H:tH%''wѻᆱdEFF}yiժUO< z XV4߯hMU['G9GۆٖfTː{{IPSvᣩU?;Ȳ7{Cw6W.xj4Fjnoj:bTk5Ϫw`ئC&T0w]J=]ue 7jJl#^,2B:ktS]n\+Aa'T`=W}嗅O5k~WIFH^Z_aÆjР|AM8͌ԽU/LSYjjISoXei'~ .W=tVǪe4lZُۚK7UW~l^zgRMC!:UGg)W}Ң)c6SJNI~_P,xM\QGuӭ7)t+z54E_'?OG 2u[dfv3's" ˏLQQ_(;\}nգiWߏWz 2 fqXe5 W֞v+,4ªJ:|H鮂e6ooY S.SsPPj5<߳OIJ̗VSŔ)ɰ؎GGe yNLR1/߿p-[$I}QӦMլY3G[^ՠA%''kڴi#rP`P5\o_רW*Mӏ)ϴ wm2Kӷ;Ug4zT_oU{_+zJ#z^.k1rTEu]&?I^Mr1CWΑ{'dOM˴T(9H*,?^/:uꤣGjZz"##gypX]VtwSN㕕EA/k<fWV[2奪_H[L imZ{%MMU#Z/5h5~Jm=6=}qSݪ+-ySMpGy}T7Ny[%CkC [Fjܘ:VV1A1)գh}&t90*Ciĉrݺ{4`j„ OX}z私wrݚ8q(Rr7]}˺2l|FĤiYݦ~ܩڳc.~sot]ynj쯌C=!cxLHH$ST8999%''KÕpA|ttT%N7ũm۶n:5nXR] fu;VH 6LPLL6oެmlR;vTPP\.&NXx+܊D.Gv H==RSYCW_L7뽁4#%$$D*-[hܸq2dj֬;SwyY=r&M}QJ*nQvjRz:M-$Pz@%o>?ѭު.](00vݮ˗kr:17'J"Jto… uWf͚ÇkΝr\ PzMPa\ N@B BTTT@Ij坅*72 3=kwVuk lxEn>sJ&b$"cͪy̙SYca55Ό6].c0h3fIMPH.[`?{"¦LX~c>cËo{JOh'P4( M&zE=@DOh'P4(Zj5ChP'tAZ}6!1-P4( M&zE=@DOh'P4( M&zE=t]V3#zVP@DOh'P4( M&zE=@DOh'P4( M&zEk1ئV@DOhWVx1$q20I*VޔK.uhh'tG}N_f(BJnCzzkBف[WWSs't{n|M3z?@wCs֞=ҶqS @ó : G3;t77o-}Qj`DOhyK]SV; mY *}r 3^ȜuushpVzB2y/R:5>00 I4< ^Ϛy6u_1F'zBzmԓ7nȦ֞i=$I=oJ=RIt߰!}'@ó$ztQ~JKf v[ߞ/̇>9&Y?73[-DOP_9ӿ9Ӎ$no M&zE=@ZvV@= ꋶ m@DOh'P4( M&zE=@DOh'P4( MbuZ {Zv>@E=@DOh'P4( M&zE=@DOh'P4( `j!4(Zh ( M&zE=@DOh'P4( M&zE=@ZV@=aT(@cp{[h'P4( M&zE=@DOh'P4( M&zE=^V3%zBTmBE=@DOh'P4( M&zE=@DOh'P4( j!4@Z|,4&zE=@DOh'P4( M&zE=@DOh'P#ݫjРDOjmB(@cp{[h'P4( M&zE=@DOh'P4( M&zE=t]V3#zVP@DOh'P4( M&zE=@DOh'P4( M&zEk1ئV@DOhWVx1-lwg8ěVzB#2A*i~8Ja1th[rϟV( 6P( MbzZ`Ű(С'GO0qYW7g'k:."Ss3yܐ4 @=ai[0?k7?mC_ht'٫G6nJ=I}ljVLOHԳat#$imM Gz4<$ztQO~HFH%dѳV@=ݖŷg OI͌dT(@c=C}]LrL7 4P2+=COta&+s3n. JOhY 횇䁿Hm\L7$DOh+?bXϚz=kOץdH IDAT zꑶROR߸!Z{Uhx>$lX){H%IZ[}ÆllHZfD J$I=]F+#c-Y=ꋾBh '۲L_~a>1qݒl1'zB̙̙n%i2d'P4( M&zE=@DOh'P4( MbuZ {Zv>@ QI/^oooo7?Ieʔ)%K$Ize"u֯_$2dHVX$9dɒ隯j/X `5jn>|xm޵J7X[`/Tӻw$P%=@DOh'P4( M&zE=@DOh-FW AjEۄP@DOh'P4( M&zE=@DOh'P4( M&zEk1Zf F=PV; m@DOh'P4( M&zE=@DOh'P4( Mb{Z ]PV_Mh no M&zE=@DOh'P4( M&zE=@DOh-F]W jc1-P4( M&zE=@DOh'P4( M&zE=@ZvV@= ꋶ m@DOh'P4( M&zE=@DOh'P4~KjcgE-F2%zBn{ zd..z<8ItR= z;5j(C^U[=: {Jv&Lַ5 .͛sQGӳg<9lٲnhFzMgРAO:͟??Ir}un{$-̰xux,z@5lsw鸉'C50^6ۯLAu~'{';wn֮]k`l_幫mΉqk׮;;`'t <.7v 0xYjE՞5'Oڥf̘'x*V{@׈nذa]:vOv]:%MM:nvʳ՞{ 7o޶7ESS*J*NCٳO=Yi' N8$g;,vXw%I;tIIs=740~k;[ndɒ%/g'Z6Om%URT2{$ɢE:W}{Yj^=ٳggԨQ. Uo3{Wn>5kV/^ܹoѣGgȐ!_tR ~ӟ&I.\v_*ؕU;{^?Gy`'Lnn!˖-sРA4h ;!zf͚[}ݲe˺J൦Y 4gy&K.-<?Irʆ} xf9 4v!.#9RٰaCzR5qNi5Zz*Jz}/ׯ7--3I9,]9R---ӧA3=@DOh'P#x^a~HgAN>T*\p6mZt׳>;SN… ػU˛~OOdk/#O>9WܙZN'ϐ3%_Ruǟ7|乽r'I>O|" =s:u^J%w^VX:+_裏fEܕo)>}׵{w<ϖWnOO[[[|<#9ӫWL4)gα[o5O=TCѺu?gǯ\rI.†:*K[[[/_ d3+<27w;Χss=fKDnCN{>. t; 9~yiyx v?Ors;ϗ~{YfMF뮻.ԧrWd޼yӀ&Lnݺu='t=իs+Ȕ)S3dʔ)6mZ>яfڴi:uj\2=Ы|W\qElF퍢%'|rF[n]n<{5㰓rgǭW-92KMoIiߞ;i|wL>v;x3W}vFn][R3"wNC{YtӏziL9G_~w6va_Hv7I킏/O]w'ܚ/'?r'Z'o7}z4'[ڲay|Sm.=~y'ﯦ喹Sє>ɧ5 +W]}[}w;sd!O{g绽ECs?\il!:Lܒ^;wzxl{G2qy?q_c&IQsgvZkݺuUv,O?#GkM\}G>}cŞ=ܓn-Ire.֭[3f̘viE^Yfeڵ0aN뮻y7.ɶ&JyMGSO=5ӦM>q2eJofꪫ2yL6-^xan;w߫|;>̛7/sw<,X 3UyRɹ瞛>:[nC=e˖5Gyd=м,juXcr`yMi~KzLU猉϶H6={Ņ3:sn[q\i֭嶯~;?[-<ԟ_`'ϭ頜|ч3v7˩|N.~_[>wnoN\uU5kN=zt>'I&NŁ'_ f/e팚a_{?il>8?hr^R{2c-zƽo\ޟ ɾĹ7wsُs[=>{c.9;̓YWm?d>O7fʏsK^":tS[5ė.ؐ+<|%}훖9q)[|v{I IҷO%yaAD>5jtAnRv`߶cx[K:6I&Qh lwAaѝs9׿O<1oV93&Νwޙ$:theȐ!{]wyI:+5=gΜ/3 ~3s[>C8p`)]tQo砃SO=<ߎyv.L0Eüy77 s9裳~|[;};vlN=L81˖-ի _li96>wyln-ސ%k:06Z^%zJ,tY=ߜm;$eefˮ߶^w3~Aٷǖ<_)?՚ ݤ2}#s6Ibzlj>~Â6k 'No3~JӋrЏ>lmٔmYr'g_P'-iijNnIcߝ#W޶b8o~E$?'stXДG'(xT-[~̛5=myDZ?uxsSޑׯ*+{NMvk6w}=l͊ca.o? lرEE+26lxM6ǏO[[[.\zYhQw#&Lȧ?Yf6,C \?|+_%\:'xb,Y{>z=J% $uou9ǏO>_ Vzo|93sws?uf׿=_9eʔp I[o5gu֋;g#lnnΉ'$~M?q<s19r뭷xem;ɧfSoyC޻#:DTz_ߖlZ"X*wNUw ?]}lM~e/s왞٘۴|YVvpS*z^ټ>wTk\s;9;5|O\|FYk~o:G~u덹vѺTs'gH>|3`7.^Ժ-C'~4>͸>ӟзIH}鋹yIXCOoX'>.N3Gsq. ߯ܒ}81KD4l]vG/>yis]}oʝEagc?}Gxfؐ~,Y4eS'&o{ywqU_(ʢffY8դ6)>\B5Mi..KJ6n" /.p&A|ϡ~ U0ގ!6gPbRɴB!z8+. EnnsNNfhL//Xyq S-[YfYqxzz˺u䆮e]ƒ%K7o4lؐm۶m{RSSqttdΝ;wδk׮L0xyyaeeEpp0r ,--QÇf~~%8p ޥ8tP-Xz?%+= dƌ|ڴi0Ukw曬Yݻw:lY^^^J}}}8_ڵ+:u2P*)oI1 ??Ke1> aGH(cXZfܥLцa綟O3>Թ׉ǀ{npprZBJ\x==gs4b(ʋޮ$/fW'`_3~*ddƩ$%Ф1(؄k\ȪÊٳg_jۃRRҹB!?qFȓHySjٖqwMWsD/J./f,b5;SDi2- 8u͟w~/ Y`ywlN#*2 oOkq1D--ӉA4oEӨ#2*4/=5%Վ fnX]JFk 4Q=# '8yW8u/'Tz(B>ciQB/vr恭@5`05uB!B#77ׯ [[[&MĚ5k|2salٲKjVmJNNߟ'*>4h`߼ys:tɓټy3香뒏?,N3Eƍ6l)(~qFlSTT7o&77Ç3{l&L޽Tg]гgOƍW.éjj\hhh < 0Y"ŕ>>>VдGfH̽䈣[OdLzQ%=l*t kGzd&8I](,z,VQj2N8;*UAcehIDAT{hLuEg?'4Oi/C1Q4IKC5]KExB1ki9϶cW+vo#Sm)t35Ao}?p鍷.^hPUEUAcaU UcF4hLߠ<=  T,~E 6v(ٞNj64oؠL&dѐF%ല-dednbM}G%Уh?KZxUebO^>PrUÎ+(P@B!BbY=hҤ &MbݺuDDD;w.lٲO?Zٴ֭[mXd }666t???6oLff&NPZdgM/0n޼ɗ_~I߾}yꩧx"'Oɉ)SQK.uVmorr2Zݺsy+ªUL՞%}wؾ};)))fwCK˚Y bȑ厯űcL}Wr1۲\]]@ô΄* <sg:9fq%Ly駿&dZxIiJgp-wyq$F}Ӝ>'0#VMcú+W?Li[8=?q=g2N1/xݮ\ZBT8 {Yyc VaiҚKRh_X+b掫E`Я\QACWbg.Boڶo&&jO'~[$[~|qtE`LNBӳ J*h[q$xӶ[w~8`H-#&# 7Ŋ$tyJTciF6q LωJ/yILQx6GJ\H֕05ގ&&kOoex @imq_cl>6 9˃DM\37@=%ƃ w򅴔T!B!n-CSJmAV4mmխ[7LիWKp3g˗/g֭,]YflVk޼y,]4]={2e 7oFXo$hQc餤JHH?6m/^M%($$TVRӻ1cpyܞ?1cƘ֢ iժ Tcaegv޽;۷o*>kb 22n>ŕ&CTӢj(PhsQDtۤ$Ulukx,rQgM+|<~+hl׶y~34W5џV xeI᯿`o%'|e>h(K_y{֦Ϯ/p߿N q'Xh{ﳰoI';i"и9ʦ6—7W0>YQWyjl \h۶-5nAǾgȩzÛ~{["E880f S:b}?\(1;ylx}7(:F7I*MMȁ#I|0l*r*ܞPW-^1 ZF O%F*n 8Cf*AJa64wO\MָY<_n_˫s :7azholB^aGp^M3r/#ʼn27RzP<ݮŹxqݰC~[)"l<Gҹ!B!B!)mh46mڔ?O_ ߴ>… yw%$$0w\,Y–-[2e Vvuޞ}*vh777;wYoԨQaggɓ{={1kѣGM4Y#=[nM֭>|Y\6m0|6nHff&7n(w7QQQ'ع .,x+3JOƎK-HHH`׮][XX0tP:t^unG+klm_no N.Q2Α\\Lki ׻CI4VXYdfQS'9!KA+/Vu 0&sst=lzUDvC}!mq%/;_q"R9c!Y$ŝ/hdFnk#&6hɸElnG~QϿƴe܎\,$s~l0낞ѓCضdxA#ek@J*j7_01CdFCL@l|zW9MU(gA|fUbdĨx{-JAkVfIxjc |I ]7;mf5PI=;׫( @Q(QYDoYmFųֹhÃfqnM7uSko@q>]=[F?!B!)j߿ggg b̅/k׮%66{ٳYxV'h"h4ԉ1YNJ`` +V`޼yllV.]pvvחכEۺvJΝ||xx8.]2Z.,gMuSNѻwoN>M^jbÆ i&zMӰN8|||Xj5v7O?MTTP:,_20icǎOOOƎڵkiܸ1^^^o{{{ $/ gÇl cP㺈ގv%ptgw*hPOCϊ΍B.>mϹJ#'&>;[$qg9it~*q/%%V8e'yqRx`LJw\#8}g* +ٹX9ĺLU;sڋؕkΒS1+ʾ .Oy=_4$~Spfw"W602y7HtBB!B!46sR>bUddd0}y… ܹ3? Z8Xzuk2a„Z`>P^'((lc^^^Eaaa^+CONGtt4k׮Ϗ@ra~7\t(2vXJ-ZTqDj4\6w-=pM ؼz\.x\SCƕno&aƯ{1hEձ_-q{a,}-ΰVB!B9::J'QQQb9j(ϟIOO$&&LJիdž ̢]vvvU>ޜ;fqNV2SN5Mrr2Z3f  ‚ (Fϊ׆2RJHH --ݻw$| "1igJYaٝA=]iܰ>tnF.0{>&ZX fzM<B!Bnt:5kFll,2kgRgڣjykճGmB!B!ģ+|qyJ3sj:d w%~~~ܹ=zn:t:*~mڶmȑ#:>77`Ν;CBO!yгI)B!Bnz !!B!B!B!9S!B!B!Ba$Bjh:V16 !B!B!0oBڬxa丸8o/l}n.mB!B!Ba$Bzu.8kڴB!B!BH)ztB!B!B!82B!B!D]!u !? h5IENDB`hypothesis-hypothesis-python-4.36.2/brand/hypothesis.gpl000066400000000000000000000017571354103617500235310ustar00rootroot00000000000000GIMP Palette Name: Hypothesis Palette Columns: 1 # generated by @kathyreid # from the Dragonfly logo created by @libbyberrie # the purpose of this file is to make it easy to import colours into # Inkscape, GIMP, Scribus etc # Colours deliberately prefixed with `Hypothesis.xxx` to appear grouped in apps 0 0 0 Black (#000000) 255 255 255 White (#FFFFFF) 165 100 250 Hypothesis.Violet (#A564FA) 98 183 255 Hypothesis.DuckEggBlue (#62B7FF) 136 255 127 Hypothesis.AppleGreen (#88FF7F) 255 252 54 Hypothesis.WarmYellow (#FFFC36) 255 202 120 Hypothesis.Apricot (#FFCA78) 255 128 128 Hypothesis.LightCoral (#FF8080) 163 255 247 Hypothesis.Turquoise (#A3FFF7) 163 233 255 Hypothesis.SkyBlue (#A3E9FF) 128 211 231 Hypothesis.DarkSky (#80D3E7) 101 195 213 Hypothesis.MediumTeal (#65C3D5) 92 201 236 Hypothesis.OceanBlue (#5CC9EC) 39 135 178 Hypothesis.DarkTeal (#2787B2) 87 83 139 Hypothesis.OceanPurple (#57538B) 68 66 95 Hypothesis.DarkOceanPurple (#44425F) hypothesis-hypothesis-python-4.36.2/brand/hypothesis.sketch000066400000000000000000003740751354103617500242360ustar00rootroot00000000000000PKĠ document.jsonUj@R.꼥!Q%#ɷub03s̜=+YdDltUKPOY{b"婎8XJ Q`BcS-$Y Zlrj9,UQWX4˺ԝW5)'#iqdDP-Y4U}+Rh,i|HhmwQW(MIX˅ 3Zj8R}0X LRS&%pq#/pYsC*ͳ'Wt=&hWy#+ݸj>a<6K^蕖 L2jO5F4:rݮܰźW8':̃`ʌ3T8(ka1>h&`=݃|XHm[+ ,(ZF)H)y֟un_;OV ?4 J+{9n\nk&܁v9ح{nzriH$uEzwϴÌ;GQ0jyد/߲t?ݣCP<4+^.7m>-]/>ݯϱ:2 F"LC)i&̶@=4`%!T q(ue1IiyŒ J(&,c[ BGiPK.j/pages/B526A7A8-90A3-4B4D-8209-B5381656AF0C.json\koǒ+?W@7f?, Zd,eR Nf(ڊ NMwu=?G>>ףFgh͎7/k\,="d[8#&ͦ.Qhm,gd3_.F٣r^ |;l /G|:08[n귝Dc}ݜl?^C|?̦?gs:?;M_,{?kw~XM\y9E6ٶ?כdx6뚴9 F}vVmuu48;arzݏD7Oa\LV;m<#67~|1EKuR :>'ۓkP;fH{춢 eLT (:O[zyzl{x;ڍg yu|2{z Q~^تӷt]2]aS\Zg}}v.YLfnyMm\Mkĺ5%qmBd΍}"vd%IJ3fi]&gsJi#,ё:q B)lfɋjy1$7mGڹf1A.w<]I L\8FoWbmϟ{r=ufs/ZT>i\Vmdojr`I$sC3#gJrC`cG^qri|u3[@{eL6?voV5_'PފSΩ 9*l|yg2Tl WA),usr}lwk2|՛޸af?nV٪tmWK O;1d:?zaزK1D4>re/EwQć H6ck3ɧrԽ.w ^z@21"y<ӧѧao(Td1buxQi>q{rK~FЮ 8⯻'ݕHX_CΞ%xFkS AFR9gIm`0ExHsw I0w$>X"8Ե/qrGSHu( dxXNi^'3H4so%XP98q]]b s0lhM&5 Iww}g[7$ٶ5l4 MQ)UXjIHj)bXIAr{]-_ę.:3 xfg 5raAnTm29W`hOD~` ќ93OY?FBnq?ӅED#(Xbߏ~fRKPtB`'Q$^10]`.@#K2q[&u,0 ע×b2P!"f,xEJqDyc&oLѲn\^7"y@,$l)gghzHa^%bBy|Ml7р؍`殙@1a4؃[4 @`^rfXp =Qcvhe[&K" V9w7ӗ˓e$ n7 CiZRJ(r%ӢbeYbO@5Ǚ{_/WSZ\-V p".\V$mm o (əж6)m;️kM1tp`)%M*ˁkjx&S~ h2anHȞҙX7{;S&j57ʹMYr~IU4Sǫ\4"Ҳ4dpkMYՕ"A;tiHcu""K)KNcCT9fYew Z<֧鰠])]ԙ)x D:I@Φw5N 2#!t݅O!5z.FauEAGF7.Ȱ;D Vijx-`Eo˖9O &C ]֕NLħdS Æ/Dy!3K4d\$ u&,NQ@K"=ho3l類eiX%tZ"#kEvM;{kZV,USVZIM$ ,6P#tI_ [R?/ _ʢ,M5BZ`Q%i 5ZWœʧElcda "@6]نj@$SEnF&=ع0C9ဈ{G<3&5k) #!5mzb<(U`Xf=[tk S8B+sx DQoSoV`u1qD,ǨZ R{4Pv&ˀziTz9ޡeSmRږDi(6!1m Z+%>M ) lW0 cX3`XJ^(rW@c+aFDtrG7%R0xO}Il2 Zqi!=9܃quNt@v"!uW.MRrD)> 'ȰHxk 8 ΁ECg!0n2x\&0K]*he0NX2,KP_%oQS5)*NUyn,l2_tɬ ;(.~qS#"vinl#p}!tqе+MvzDekCJy05Oi2P*;k8M(T! BO( kˆ[.話a>|f(ܓ9p}دÂ`]yt#݋}1!e=;Xvc |]g@v-Ѿt:x8*f7lA.ru=L=Š _rMcSا}9=|:(b==Hp1 {}RەӳnIW:1 =tPswnf9g-q!g9;XW[V""@mH%&TIbSPHNQm\!jYOFLQ4XRU z8zD ɱ2181”M i.bNc~` A@IưMd[8ṔADZ-s*ij.) JlY9*ig̞gOV0"T&X3܏=\֌8̈?U0H)ƗmζhBVqku rP iZ.ڎ롡R>*U4Ӎ~l#N1,,َ=o@p0;Oz6I-^q6?.dK;PHZCHnخQq`tz HZ)RݛzBu.zJ=Xib/=IK` CA HZzlzN- }nE;Hv3' l6`u>m;X 2"kJ z!1yk),[uu[]H"ؚS h"_7Lʀ)rQC5mpW3RSitM!\~i%2p4uiM5k4[KL#cbNnX1\ȒQ |Ww㊞/{leHWI8vFU[86bٚTݦgZL=5e0Zum74Le[!.-Zk[ם&McEӔ:tU1ؐ4ߑl>_L-Iw[k{Wل:; Z.Cbv=p >?%Sm;ҳ bg} wzbZ hh ŶP)ǻo^Փz]>=OW> ō\4tM,^._Χ}v\I:&y2'_2/;{?PKLZ/pages/F0CBDCFA-B82F-4FD4-93DE-46AAFB2E994A.jsonMo@Jg["P l"%(Ğzٵv;yc?k9 ra (^&q44OU8-rϧ Ns<-Q T럫F`0r35?~tjrgk٩9DpZ: K ,pg8[ bMcRi(‹h2b쑤/OfKrkvdžsDQUj,A!fd%OŽ7m]r~u@D %ke,󺭯YtN:os]aک.-B NΟpk l7KD ؃s$-ouC}bEY-b04{&8c $^uL-Wʀh<_PKG/pages/EDC6739F-F030-4250-93C1-8E3B80936ADF.json}ks_q#$cEL=6lZM*$힉{P|S*-ќke5[ ̓<_w?ݫߟy;yNr(mF|Bԍ3>}٫߾:=/﫯's>'o_yۓ|ɇo]^޽_>Wpɍ׼>}8>=v߾>]N~a_qӏO?NOk\~?p_g8'|/'o?N?~O'#OaEzKg1_;e^އuzp~~y矮^][__K~~x_?U<Ļ?t-ī:,wo>7Vp5wWmz1HPحz)TJPїeRJƛZq|xW0v;v8~OkXh<8HgS$N)Ji_ڣl%z|r%_xT|Ԙ(6|r)8>&Ҭ?>yoũ;=;y?;&~O8n7~Wp݀k7x閾prrvox{w|{w]޺k=[$]I6'?}rT.&,9ppo-\{x'glα^=,6q ~c^:WW_r?@WYwm΋`Y-eHQ4k>ō< j_zglEJh}WǓ-Q\.sO$gu[X~w'o9Y1s-٠o01'g ]p^i4}J6 x/+x-GR0.c JEjqG(!NC{`y_)^SXǫ?}O~8O>ܲ7?}ɿpv}~|sxG>N5HSژ#|㉱;ۓK|˅n°ˏ= nx/G]r9M}]}t abȭP)+^$"khX|N o`o%$uGc@aYIV+t3y/m߲5ʋ+.F%Rz&b |riagN,]m.1KX=[`CƦʫ`i3<[î`غB)!le,yH.%$_ERx [^z4t'Ӎg|;7$r؉%I],)>c'EbC\)y^d~D5<%#lH7P3v.@ f-/?od9!0X2L}2޾W"Ԉ5fJB)oo28 ܞD'2$WBl\>訂l` @z폂,`]F DX#8׬3El4_~{HĺZOv@>sjjX9) ޜ8$v%>9dʤ.G[<"M-Q zw=QNdɨ^_jE\~Dr|QWL齘On%"h:ӏRgkb콯ynu.~>d"< ÕU!,| V{$!"/ V?Kcc棄F璕o??] 5h v+D-!m,0$mPe`}pߐlva+v돹L]͌j0(fRĥFB'S wDb=a s6,ڄEkX&ҷ.uHեD|IlH]69B+nçsR_|(ʬM[K|]ےZ~7 lrdo,uMۋ/v-aǻeIMfds L7v~4Z/ "&g"W6DXi0R{fR 7a .cgá;E4_l3@9=CߌMu{s}?dS/5elÛye@nHOqIpuݬy CrNu5טSTk< YAH\Z5 2s0qJ]^f0~. | OЋo^ ,@"r2n뵵{?vϥv$XW-|,tX4Ͳ,:Ar]c$Y1k,;7 *t QYd^ڒo6 䵰|q.M ~ ρs"U"ٱ!d`HIN(*Yt)<Gvkqtd1W, K19ͻ+͉fϱv0kpq$FÞ;,_8\Y,u!܀&ӂt^z0UyBAe c*==ږ;``k;xDX2+m X+fy|hLnZ=f<wBwo=2ҳ>$K߾+̲O7˟>gӷ'52Ϧnpz}D秷g~wߟ3 q|D2".?=F^w|qp ϣuh$t=4c9.0'vO| pn3DGh:LF ~h?vûN/e$K큍9J7+&NF7>j*c.(>\ȸV𸵎拓TTH/ $Y"lHg.8VI\b 4w6,heګ{VA4D/cWZ P#Ypf./`~LA`7^Rt\VGoӶow ! o6xZY xhpr"J$@nǕBk2@6(5S|$0jCY͎ꡤbK#gXmliR{FhuW{DYkS 2;8W'~mMNv$Fs}?twߵZ+G}JLh$ Nf;S|] M?vA2~+EF-&[f)$<ûD&GIWD&.|x|-/^&:w`{`'[4m<4x4z=cv#}b;@GZUL&e'a3ּlޘH%Gp.-Ld8R;.qHn9aRH+ϨD93PyF?f;YddFhP%O qknBa&M72b0yn,CgVt$)Bfkd 8 [#drG0m9XWJ?erx@uR *ɷ*4zO J}%'JrF'Fٰlk̹cDH@9{ZssTx_uX8-鰑 = K2 >qð 4~R-@167& @: .*v_&m}q6yOi16X`4ӷ!W %}\E/"o&2 HZ'uq(K#OX'[ORjBуapo-OVd VAJaqEZY8?VV5\16?Oc e_B?iI.M:3^ڐV#jʋάoZ^,wYԘd"W%DDwk*W!dAWsIXI$WYS ǧ'f߀m!mАO#((ǻx UpCeciFj֖!)$>HJl$sRs3ZvIt&S8ݞ!+&:!ZϰPֱdd_94OD8X=U7LU=5Y ޘg+xS c#+uo``?EX"EK>V)XqŅm%SC  G%V݅+TTC)V#uӧu̶r"nJq vge.!SBT#aQvY(i8$g9S|oc9U KJŘ9qY@VKܿ;ܺ<l$`e)r´B*[fc.V )T^G:v|E5`LV),rxp["XzyXKj7cN {UKJ;tOy~ǡP8 d'u$Jlm+ll$HXxY@Z`ATukNe/5n{/fm޵˻L}+1G4ڈ[B: 7n{"ĠY.BJ _^QL*EA;I~ex<^4 6- ,֦9#刵$䚴 gb ~Mxc/i"Z%6-YrRm$ٕc#b_vPhT% ȈSn\-UO._SV09̹f!t0*y~i\${j6{G"WE%7#aW[Q6YEZ |Wӝ9.A!^8OJ,>S ~).d9DRW%Kƒh݊y0e#qzYRYm<北_eRXcT/~S G!\X=p ^Mi b́Ốs WgfFO &WG.ܿFI '/]ZB@<C#n1ǥDreL+F$K6%t6p,jnig I!hEReVqE9P}XPvc^.fU Ɋv)X{2De)P(^YOERL& ,te_)bp4"-ѩS8+1AlaBJDHMI6VbkkUbJ w̮J ;h֓mG%\J]Z 1"`SnF_YHkk|\MXI 6Vh$UA ,gw|}8 FCeuD!Z5@ +l3  -<;VG k'G*XVvzf<.Ph"K]8V2pB ⪔4 D8tsMca9e5z(g0a6.$ ooϚq`2#.S';Qi'FOڪL_lgc)J"IKv=;7ݙ4x5ҧdt}'D}<8X€{5Se^6=ti4,Aeplظ1pL$e zSY 3䵙!"g ;SlB֊648O({"]@Z'h\|r&M"\2,G_@ZT 9{Zh>X6iQ=n_Y8?kB6"jK#MqXS+&͋:^[h(63fXQ@Xq&2~װv"+>΋y{7ұƦpycͩp*,qe`ַ7=$Xzqu|q`jDpnl"ݴpQ,MQK{XN4BPMLj7 dF _+_Q4,69`bYo\J8`>AұŒ|61$ sJ xƂˁ/"viὴ)\!(Y](iĂtU.w Hh< Ŭg,h%7FHÒ'9GY!d[1÷G@օ4V72"}Gb ,Ru0 B僵+RVK- XIN EīDRX\ى)Wy(CnA o_x]bVՂ3z}ߝ?='{>b.5ٛߝ[g9< H~2` 4Ͼ?y{ӓYEWkT z؆36o\?hpڽ8+xR}Ѱ nIVdƯek: n<:ljUfv~hBϪ7uYDrEVܑs,,oTrsf-&DR5rG.W@93M84t}Tq*FG9cKc7<'7{`$^8+Y$IƺMgri#5:(my. i |G]d>P32gmtv\)ٲ[PBvN(oq#߳ yW B bߘp+鸒%5'E&N(K)^s(X#G#B6Jai5U}jxHإ'KHSiHxV9,,O~jܥPt 4|dŰMzQd~Qem5i3? ;K(XgE.8 V ;·5hgBZ&(ĺ )32mVy"0S6*ź*v dbBY+A-,,PINY$/73Nml#BMVd9~BedTppW+a ks-Vs ~2zTZ?4Vp[ިjf) &X)|:E,:бo QkGHZ Bbn auE|MKS 8ƥ2wl0^oMI?aYr`¿ Z%LR9Ǜ51nbkV#_8 KMh*c )'zd+*TT$-adZzUz;p[0;?&`+t?J#R^j^R&BD)Z`;ZZ $̂:a3-KqNRi8rIsg 9KQyR8V@p.f#/l-WIC#9Xf(rHjs294#%'%F\AYn*qrEc[F@YR@c=Q-K hmńn;VH΀lkY[;I.SɁPccldzr 8xC*ri,k.8v$lH_k. ,XJHVBd!Ar1--TU&-_ Z X\QQO?Uͣ9̓IJ ʵAɘ7tu0qpGxID6$qZBu>C#=_BifeQ. SL*~6X]/ȫ{yi}WGZ F" :iuH8#Sp9"~i c _ U*(6O9٦9d̚l- :Ri #j ">i8GfTdCY!pe'J %U m/sρU8M8uS/Pk#R~c90u UG6 jDR5*9ǚPi'rU]0^T>𰎝vT p̷M B%;VPֱa锸<Ӭh|rH)_tzB FNd9ԣ"{:R]ldO.^2U  19Z wLIHRit.53HgYcQEܲ_/o|L%ߙtaJ;*IcھṢ] XȎɞT#=Rm^jxy?wu Ypj7ƳõK ΣWQj50#D!Ddn)C6;1dgZ 2qB.Uh6hsI95TRy&O DBKʡ(AH/9}\̎/ ?#箘{weK;yL<o4mmAg G4.釉vL-q UL<ȺW2 }t6}H3Բaa\X28ۥ"sDI#g dUZ"-r/S%6lȯzk y(ٿ# y<"[5Rn'!{?mK̬ \P)X.m dc(S@|Xd3wcBhCz?J:Ыo_G}=T$N:ƕcv1dOf־c;bǑUFǓvE`ڎ<W AƼ&dn ծԐFJ,ʐxAp&F N&[ĭI}Y,LN3G~HCP['9e H6{ZZB,(z b\H>ۄziT*uzGd^Z឴ny b'\<"VMUXKZfg1{$ؤXUh[\˦Ƥ&;c֩!_H `w ;|cN"TC!U8"lf@"M@vv"MFR"q(dvE1-5X[FHvM\~Вȸ^◉Ry"28sZK gy޵9XJEsZ0˼7-Ml\t{j*<>A圾O[.hx`$ۚR1ϮErX, '[ (v$;η 8 {Ph ">ImHGqVȐϤ'/l˒OFl t:,;(>RjBBk$]qm}jN'YQ$g ecd!BR<6 "BWAMR?8@?#ő>2|bS#7$Z/sU+ eM$Q$OU #oZC:Fl;('U֒/~y Բklw=J?Dk6 vBcdIn#qQNL 0CEWO:M7jWcO` /-/*Φqza*d'ʖLfqƩ؍]( K@퐿*\`Sʤ~Y%6F5MJ<8MrrS4 a(㱡ȬEjE3XvI[Fgňc Y#]Ld}\^^1"I52'BTH$NcmS9_Ɔ\xbnZC5CMIjOro<ٮDrıGmʁˋzFCb{OXe>~lIU!q>SPUrfYNPr#FHY_fUD8y *kw9[Q cnE(zSUq |]'s*o5P& .D BDT!":`k[eU%W V e "X㸋l;Ϥ+$ {@>O ۦ[&*I[pߐb+hq; xd c Ty`oV^=Z̈.NbVzZN GtE^*ʳ";wWjU.<ӄ}f;̶},oby6VL*RBD&]J*|-Nw q[TqA}ɵ?ylWX~W+b_&>-9˶Z'.-K(;*ʑD|=ÃёIkCNo^`?n]zno $ %y+O_ ҋJ8 ?K=wG0yi-V !'#c{ѡOM]1X{̦t1Jw:7M=*\,xj!`,QIZktpàb4&Q;cC8b|n9c(ٓ; 9vxyz{֟Y }Y*;. dfv]"SO48cݗaӫ#Σ7h <JNp&/%v:ZZ[L?i>ư1ILQ-@w{<ugXw&#eEN#R;LM=LVW /groIoW84?pH Œ/S˔ ln Mn&~[2IYX߱L[dž08JgYO K7*RY.*?=HKy ^Mojo>w~| s#`ϧZZ~}=:>{sw N]~n}0y팽[u8>[OO”=YN[T+&=&:3 fkz/L8w g@z"8.4tk]Cmz zW{$vm}Fl)FBMQ̜(}Sşz1C(ʣy NwhA䙱(}2i~?.9Bb Hiج}C/ab2ޚ.s2Bs~C,i_h(ӜMfX(a#,ġ/a:[ *`H*a;ml}1G #I~σ5@)SSJ?ٗUhH[.iy8"oT ԒӈxnK:$r`πGic@lLTT]ǎpj$Yj3%0F&U,ApAhHk_o`DLlzdGF"{ĥ @cFSjY{Ƈ#8G :rK2dguٿҸڡ=4tܣkRň\ G2e?j\Vv|m8>gִfCӵ{Vim6Pȑ[\e_G HmRZoc_n+/c-ccR4c֔jV9(X$yqF S*QJk3NұYnIN&|{{#FCSwcsQRBjdsn$%J\E"6JƤ@OI34aBYW'A,óV$]&[ĔWۥ+Op;m(Ƽi<{>9N9HeyBv$Gl̔f6K5HBZ?PQT$pA4gĚ7S蜑A8) /<tm߻x]CPKZ` user.jsonmn0 E%lH=M,t(Э{$.X+m63 ACP 9O\?.ṽ-m*FuncojEәy>p4ySQd|x*:]o/tj"laN l, k?:N}UzF5bK_hk4 9¿֓YB$KhQ!;OMIkT|+0r/H* PKG] meta.jsonRMk0+CAߖrdiݥ KVZ[(%}-^YϷWRc; a1)QuU]Ȝlmc ԇ␥ YSuG5n}54#ټv3` ,pj+aV%f\W3mQCj)XH7gbL"i*B-9+ Ɂ7PΔ,[\3jp4b0/q  o*Jme5_SPߒ8׼jǩ^pg rPKSڧvpreviews/preview.pngPNG  IHDR#xsRGB@IDATxydUi/g֞BBIHH! ts`t陞gΙszY9󇀑 AB ZJڪT%=2=b^/pg=k~ܖ׷D@D@D@D@D@D@DNu\D@D@D@D@D@D@>" " " " " " {" A'l$" " " " " " AπHP 2HP3 " " " " " "'{¦L" " " " " "  쉀垰)>" " " " " " {" A'l$" " " " " " AπHP 2HP3 " " " " " "'{¦L" " " " " "  쉀垰)>" " " " " " {" A'l$" "ЀNo/" " -%0U4@)gK0NIP__w`0b " " "PuZD@D@:@ >V7UbAaMdt(" " ;Аם(UD@D\)I6o ^F~a4eS0VV k>up>lD@D@D@j' AY;+Yt(djr9TJ[D@2VD@Dk PLrXnbD@m$(k$+%!ySqm-" " M% AT*LD@Dn!&YU6*OD@D@ڑe;ID@D!;9q8^ʆ' AX-%U[ B$(wT%Ex;RJIPvWE@D@I@U@LC ڴsD@D@:e;\D@D`yn";V>" " ]G`6puM\J$(Q@*R9 {mG9^{n2I\= " " "Peu6J@|dcdE}$N;," "  D@D@:SE@D@D$(I[eFs [gc5 @slCIm+JD@D@@@R's ^5e s3(_틀@3Y w-ZR4J@r+ňt{'VMN% A٩gN0e7aV J}D@D@D`+ ʭL#" "Af()|FyHD@Dpd3KYְfV" " D@N*" " wmj " " " 0hk"8 J}D@D@:@&@L$' AO@G(c|rvE(eA`VD@DX-vKZ+" " E@N*+" "4{k*wY] " " "$(!8||Qn&c^& Ag_m%@1ٜOn$AbD@D@ze{\D@:@2Pjk~tTql0P(@+I\i격lmE@D@DHPQ}D@D@v$|dH+h't6T] ̵c;IPY] <6+{f#uߖ+n J/#b/0?U#" "Q[;IXmP+gg?\MGRE@D@z@K*.ėʥdkr8 lD@D@D(ar/|n6|b6t##@Z"(C0.{/(Z_8 Qyu_e"] s89%=Q+BiCW~{l" " N%…: h [pZ_Dw v at4:!rD#{-" "P,vZ {Ek̗mx'+:>+b⡭LPp{%r)q " " C`ZڎU+~mf%;r}?~ljc#RuP-E7W"X1%W!,o{,|@ ̵pW6+RV\s809~AϜlE@D@j RA9x5b!,G( {@²LD@Duu+4sׅJ~ҞN?|(PT*@# TP!8ߤ^<%{z?e= " " E,նLjތ^//¯<6v/FëO%Z.(K A7ߢ$!GAD@D=,ǿv6E ǦW~|ݛ>07j>(" "Z.(I8\e:3xF]  " "BsިfqF9XmOM>3̛hcETO>E@D@DdMKjA>70r e@s p()_X? 0c/i*ڦn )p#s\ghP;2NThf:6Xz%+&ٞ{h2n~8+@{h+ {)qI|T kaD@D@'0iZ%*9.ZtUo92甆Vh+AV`Av \z+@Xy " " Ow^#Qbrl? u-)" " [dKٞv\JaYJ '" M?M-<ʿޅ9}]ݨ6_?`8>Ҿ?nE@D@:@ JVh_oSXb(,gL$6o}?} VsӓVtMזO Llˉ 8MD@D@D)NPՇ8p{, )ɞJ.ÞKyRK: bzu-SIL(/EfentLJŪΈ" " "ЖGuiέ<a(a٬ϯB\f?=~@4 *)$MF 21> O>0m>8?*SD@D8m)(܃ }pxýb.mD@D@DyVP̟YZ}K-ܓ'}ؙZٮRB)Z)g1qI,FE&Dd=^sk[_su֮@wh.O٩kpo:MM`C-}tykx4ydc%#O{'c{}Aϧwh2Wk|HWrJTǒ= " " ഁNձ\gŒ:R,2P0}oqJGNZ ? " " "*mqP[Ʌ{ D74LmuZ2X7\d̮9!dn+y"@7{|" #Щ'IޕiXQF➼e:\Jƞ~Ϝ"" Aa~oS,BXIXdV~d=flX80RE&8;_Oa]r 8f! S(^qQN Fx1^AD@ڙ@G Jl;> u X qr蹻lz!7v/JKN g]m#'ga~,c#‘Mq%Fꡉt'<]Q|a!|jVi!(|`|/Rp:OI"  БW襼3r:WNy~& -bOk4!"t8` &#]0ן lwY }\]#Ž4Dil)8Gu۰+O@G J6R`k/gvb+9TAD@@7̟yL0c'$vvST Kv6PL'f…!U?Q?Ru'GsZ 9%&ڊ4@f5/^/g!˛@;X³'廌਒ZzLH❽xddLGr8]qr) M&Ev}jHłD@D(CI '1pY W&HBmd ]a ʞ& @ ܳaq!ᅇC w>?BL^uBCvd8<`GunQz@G Bf?FdO'o@nsJO@`Oj~nȔ."feka JΏXG@Jz*,o:Irhfz8-OnB-8Qߠś'{=hA6.UB{d+SeAkVr" N?-;BDZ'{$5i{8IitX E3o ifٵuPD~DCXGAD@My8=sS !,b9Syb{oWFD@ڒ(*)@}p8DW4PY&iK(1~|$BbXGIwAD~\UAD@ZI%Q-J ϯO_Y24{xe6=Ql#,+s'3'yz\'IJM8:zN~i*@;JAyM&\=,P_yNEo'9R ip+ǸrkR<T@x;װOL3"_!Us'R%@^N yV%砭 #Ff+괨2"uy+OW׮%r*[/%]{Qa6eg48[ o:4 U>D@:@^̧z$'p " "Hݼ+9㫦GdO/谾kYaSvx /!"_ 19~E)" "uVPLlޚm/W0uj?=FFD89ͯYJ,?&0L_|>X T>qs9|c\^}|$  " "ZP| <z_DO9l~%%Ϣ " {% C$z&2L<=P|DHa"'F}xnjA&RM%쫰/;qJαNe%'Ӊ]A/.ߔJ_/SyX_lJ-X 5D/aZ+5ىRqyR6sK # |.7P9*"(m.c!ΝLӲķ4a}^\\ |ӽc{O;M @w A9^IdBcpӥjpv|dGFԲ+ryWmPxW99xŅ镚qh2iD#(EYE@D 䩞D#?/y<[Z!,_Y0p6'XkqD?JHJIзc> RxrU sꙒU )AD@j#ГWS5Ϭ@W9Q)" $z-6`KuyT={|mk^pHb ŋ@zRP4M)%aA$K<<@wK= O:kivq(q]&OFLT4֧gZH|h8 ķn " IAIKKvbpQys,w*&."LK?GT5yih+U7/LtdhٗyŎ?m:6~\Gz'VD@GgRDSh Y\0_[S/*"P |[1Eךez%${ K k3\S_<<9{p," " U (z)iF oEaK>EAD{ ,%Խmܮe5 CII`{PϠiո/M>m5~ FnsBk 0$YTcD@dԆF=6|Bm07Jn>?x>J" "  $WXg\ 4#wW<_*A\E@wl,gwVpa.g|<-LPք?COυmM'ɏD@D@F%ۧ\|z+ocdeAVH[ls%]HJ:7{Qᄡ:oG ?~d}"bfQLj4sya_9˯;{R@ ,] o>زR'k~&{,}"D@DɁA\l.%ʋx-rS߲_DPΟL4K8ǃJ-]76sWmMn7oո)^D@M@WHK1"x5ܕgi;q¤€1Z4>+L^zd8Ct " "P< c^6ݥ`ϡrnڴ@ymQ\9ĕ?h+`ȫe?gK/-lH~PW ـE" "݄y+iMT"Qy`מuVDpfzXGҫIȈK=jmxDH~?>`()VD@& A a=Fd(m5a+&yd " A`qM'y&8b7Y}uPQPn!sreÉ??o2j)"  A 1{QYa3=N9ggXg)x w[Qe@iR88I>vEAD@Du$(?aqR E#U!v<=S8˭+)pWZPqۀZN0 zרncmE@DUM\da:tp"D@D@FClvٱ7ݖkjBw v{@w>"X|.F04v+Ōf|7?}z# 1p|za{:zS>ktbUzqxշfUC%J@rWDkAj ]w=' ,u߅'WMj>QH9Cadqv>|ƒe'oy 3WWEGdwiKp탯=?ԓVjo'O ׾{0D@D@# AY/Y:yi:ʴGZ}CATUH('\]ђ< Hn{`j1}yO?rX5Ӗ/b?\ x{%_ h*N8@=-J7O}X jD׽2|*ˡ類 ޖvEnOϢ??s`^">f?Puk>&052F" "& r3Ye|)3LVj΀Z<V#p #' gJ?<~G,(y}7WwVByjdO$d;H{%$zuGxǾf?r#w Y~嫾%&/2!c%@ ʠbBM r~ݰNR~m!ÏV^@TC/ I~I75Ĥmx)~9ˏů~9ojv#;=Dq" "s$(xϮe#6;=fsϟj^,vidu\M48>wVyO?0^t_9G/+ 6"k?Q)ƞHocUh2L;ɏCX_YE/z^zMxk^(ID@O<~_z3қ]ޙFo\荪(9΄ũpge-hgѶDʫOX<Ρ'rrH&"i2-@[+r5C?ڔZW+# 닋c0 qY5nb͵7|W8|våt.$ AJAIaл=jVzV&J"(̈́fWw rx9̍\ ˛DR7rH z"+IB>q&ĴԺ/mMhB&TcZ3TUۥ$^EPV"c뭭2 o$"Ǿ|a+ַG~ךOv" ")$(yVqcDҳOgr{7]P5Br]x^#6rD߽+E>y}v.`r(8YlC-̹ݳs7J854Ǖ"74d$hD"Smc(-qTAh|IE&M|"L}a谶 W~-^;\[&Y@lI^ۛ. p+L7G1/ϝ~ TE`[y6Qz,ʯάLxhb0 |f6LѶt0;SτpX>{AzQгao7u/~~uC@lglWEWcFN77]`5g hN=h: AQ-gQϋZ(CH>=>}sBrxeT+31 G1hњ\}dME!$/M41ka&bq4`=3g>ΟP k}zuC[I}c¨.I-taM=IUҜOԙQ8N Y_ixLKKS"{,?tR3}⒋#U  ߁^ ZćD@:e//K9 "PaV0;J͟0N_Nz53?8{i>lzGáC+Y9)Wd~}J*ϘLӇ&Oԁi+d c)x\+=v,:ԬX7 +o05YG2~xE\[_~G_^oh# <|V $ڝGIQyd9jUr%W؝W~hA1kK v~$jkEw\LVxgnIp(9&.q#c d\M-6_y /}(x"-w,>S)e-T~6-.; ͷL26wuY&7h' >͕J`7c ZNOI͟ #fQa|pse=,๞;u.x!TVl=-4R^ eE乎{w&ꢪ̗*7SO(Pzz՟[r_XC?0/FɊ1_ņ{[*-TyK D_U,R),0" z+b(@X9Vz? fdzR[K\\gT `dm!$w MmlχdWd˾57$H]3ۜ[<1m -W-3ʃLie?w>Fv[Cv':80VL" "`@C0؝|#(!?QAPrdB NtdhCI3:L1Ya;-`{{&ВL[bVxq`EbX-'۱ky})AD@$ AY2A%]H`7pY*@;w]g.mszg8nA\C(6):;9l~Qg HD|wg[z!D{ ː=9xJ|&[@Y^ `30FFNp/cG)-+VJXv0埇|Л4eAWpcDD`?8n,x mH),DǷaڷJKf6ϓܩCJ!}UDXn}8K-KG$m*(]#ie[n߄]y]b l!?b0zt|S`>,4z y2~_Mt(" ) zx}{2n4cgqP榣pm@`mPTl)][1 ;Un+@Ga];C M;*&E.]E1 wC 1$,w+e~{C1Ңcyc>&ŊS?\R I63y$l4jo_§?Ւ/"  ' ApPMyctZF z+oAXGk{#%ß<5~0PE<'"M ˗B陧կ-ݺabbhb{(\hKJęW9߄-E'=m}FM}Оioh7|ܚ} =zO!#Oy<88QyI6GXD@ #W°411ihn܀!F p1}Uu~n5b^HR4޼7am6^Sx 33U  Л~< >\զ]L%1Dd"q_^spL i/<3LKo1x0 >[~f1і-6I+=5fuB6<^e/3_z,?$ ezK[}en3zKx_ w̆ÇnIS4i4m_Wy5S 32w5 "qU?m|W.^kׯu=om"FF0WfvA&P3DP70 se)̞.2FѕOFх= bʋm0Gvh̲@IDAT,OJئ96onkHX~z8J>mS+=}uu>ncĿ~&Ǿ7B(eql7xb*W0MA"یNkU#mgPԴ1K+τǾt˗.o{F_aIyqӨ&2) $ErQ1=< "M*0NH|G\HM61.G V7SCѱlVfd~%MJO}ͫ?BPfpk>20Ylp?}axPX\nq{*}(eah:>KU_A$`s6NATƛ"˔"8߿ዟBoI_s'?pկiY=v,Lڳ,IYb\|A%0 oiy -Ŏa5fX/ڦ)=Ճ)/w8KLՉ-JJzթ>~}aҹ;!ӆIهFCjea虔툀E@()ck}x`lc(hE0>w'pw__oc4KX96GHK{LUyAdG&\tz݆L $*b-8CbS)RH,3멤_ !mst|N yV}Kx%K)嵊kP`{]lWL (Rs(4WPSUFg8^b2wpo %,6!-j@ꘉ- 0ɂbF]{[o[ƚ!ʶϏ}#S۸;yTrVĤe bu`JTWe~7~< M>>~+bv#oi]i#" ,?i[{E@#qq +r]] &O>6O~`OGx k)gihh" e`CK7[o&LPAEmgRZuwǶmqkhϲnkZ*6㔚 3喙XO˴TsnV>Ik1tp|u7 \! !P5o^*25T*T L,v\t9$IBILg\@4Ow;?V/="bY9fTYMι,= _J'Ȅ0dPVAyeB@q$(c[ R5DfY˛}L`I\ _ )0lM Eñ ԓĄ%wo3a뵤zG.Lo (2ۚ^ MC3rVY~dv~cmXsVU{zLfͲY^xGZ ,*Gf` =ݡ??ƴFkD[`/ʇ |E@D$(I_12^'%Sh>&URXHhkC̗dh |[?YMn-%OCxOi` e zJ5?nq^N $5:S04YyFk8g=0s1?Kf(#ONt0D,*`&{:H )?\W2~#^J#aqT ;3 -zgv%0ͩJD@D@.|/^cF.KC[ͽS_Dv81Bʏ Gz;GFV4&?}%M.LVD@ ! AYڜШLe%G/%{+CXd(kWŝx#;0~;2q঩WF~xJ !%F&4Ǟnjo94|pKSfSegmi9LO|h:oC򚄖؋F~FKyk>}Kv_-·bboq:6LN i/6L[0oCfgm/0Lc *s:{O?aϏϗo=?|]P,̴Y%#"6~tuSۯq<^;1=\#ޚ܆|fu%L1#=Iؾv}Z7~OljSEO7yQRq@1S'>lJyn1ln8yf01gbN 'MMrKJ lXX xvƕTHц3SѣfMY~sW8wdσ"\cWcssS o63dm7CT[:I12cl}N6-7==h#" ,k^Kq }Ye(m糸<^* h8OnݼLLl8~W Lba'K0(<)JJÇ,s am~nDs:zlL[D2^ч|L_#fm3;_2.7}KXf3$ˍiIk>l)-> wߋix x#1Ɓź`Q|о@$( ZK^(*D ,FDUzbɕ]<6'ڼkGxABxU=vLՏn&0-`C`Wϝb=J׮&oۢ Iȸ#IȨVBK-j!HJB{ #"+JE1c^fm]>osC,x(Lݫ#YC;rm}?~f̥ey,ecNLRJ`0%Ցyڴ쬆@q$(c[:5璡4 =a9`J^ZOm]{/oЃ{a2-Rᵎ}nט?0Ӌdas%*Cllqyv_KX'\G&QJ)#+j3p_gnAҼ=ɕocg"iihM\挶˟ġe\TVzh@֑pۘ+Z\K֋vĨ=+cr= s0B/%6瑆aeE@MD0$~xρEIqws2ojw:M*XŴ@/N~ o ~킳QJxQZ.)z“Iِ&я#ÇSX zg ˮU 7$SM37AL=R9#3qG[?~сe6aЖn|{|6y./akE\XF6OA ?tusL ++TmE@D`$(ϰaNnZaH 0`VtSX@1r&zh<ʹ+M#ϞRy1JK.0P _WRTF͸S'(*z>n;bmX8S0{"L]͆4eU "Ub:()|guCDVvbj%g٭iPyfݜ jp<#Gjϋ$$Yȹ"JwHPǶnp]v "dS_),N5Pq ^|ѯ628ײTǼ˕M(6`ɰ7hfH ؍`؁-wXypmʷodXa˖b/Rr'e7փ}OOv,?q/\^|à"Em[X4TO9qLrhc˄̝cuB$;bhӭ!ع]xn:Ov!jȫqқ@q$(c''\=qSX]%ʋc}"GYԹ@rRiG&||i!詜 c9n\z|<&hX(g^gV؉uGWf`m='-*⼭ {̔+X_Ocܖ{xbcq񲢝yV!N=ўExt1s.X֎hs1ax$؀Z;" "Xooca"^`LD ,{+cdU\1FuiCO[8 v4} 2`ڀ{&=Lhp{܆CXuh+[x{XJ 텷$A{c ?H汖1$/ID1t#${7?% i.}O̙iyb0JVeJ3o.?m1[ >N@<{3V~*'h#" ,~]\"T-m ==fgqEIR"@40U`G)LId YLM;AΟ~G7@B@|F"e̴lLn;uL[}F*!mS9,qfrrdf~J6ELa=6x+}  #rs(cRo" "P"6 ̛ EɅ"z)#0 vz-OZԜ 7T-ՅЇa5M1PToxVСww˪ (Qd/ޣG,MUDZO1&PYylc2T('cM30^/ofysÂR:i1,~uc[HLF' " "P  .1RAv)<9/qRװ UR`D=I W&p<88Qy=nf"m⇇#zyx ޸Ϛ#+!G2_K>/vv1>?ܔf-^{ M22X?dF|1?6CE#Vt^YYHD׶ஂB@qz + q*/"B|bU =ƨQ5X"mOAŕ>WW+" b&KVC,O=ap֦n#g !Y6Q* K W|THB=|! }sDžd& c>-*H`ۛW:G`+BhuOF)q؍n }.?沇 dٽet$eA " ,~]BQ F`7A!*`(,Z*/^r6YHO>VԮ7ד{F\GvΙRIb " "P 6Q {l Kyi?3}6VnگZ# ^zѶ?++ Q<愆W>EdDOq4gٙp7k2*[1 J7]ԎkV/N$<m8s*pb)/඿o06`mi) }%,.ZD0"ic -(0`$B͇Z>DZqRh~^kl̟bEm~̃#U\]DHEmo^k.ehM3a^^{\E^y8qv;Uc_12TRPeĶ5i ̩de=a5zןO~5ы 5'vxWRDUGmei;qj']M逎8XYm?gq̖6gwb/'!iroLELcPsл@q$(cPχ0rĮ4 u+g"Жxst *$e5R+I-uu wn܉{:2kNJ%^LܤDL` 0Deea=CS6G^{_R4!ef W<RǓb)O>M1+e큞 *1Շ†6R⛥E }Z̄W*">h<sx97Y.:mҋ֮#1?VJP:G@8 ^ʆ#6'1%Qh';2}*x?{LӁHB/m}}al(e Ý~|2i}kǼ)?PU$K|1yo !=I:1<[ʛ9}[X-my"vL`iiӱe*#r{:#0 Xң90> CmD@D$(8K9ƫ$6̓y/OӑͷG`N%R0KGrR+[&0EG>n ⌗-tUdˤRXy6٥$ӄL)L c ElX1#Y鍁ϝy~A Do]y=O{So}Z1L\|"/eވG0V3ݪJ4%a*@$( [Z!0 +,{0GéOm kk:o #I|xTB<<@Qyo|?=f Pgp.R$mcrGbL`M&3!툀C@ʤz=^BygXMy̯Rh^Hzۑry)זM_PLߌY|;]T?fQPtYH rMdY*bqܦL~l3/R}LcNj[/-=tr`2-s,? Q^wJtFE :3.ۏ~9o^ OVX7gi)Y[y!D" ,ma$S*aRADB#F@\3+DrGzDs9!(6(**M$Q49PasyߣxOm¨jAy~T*//?DQE@X &1(X0dvhK.Tq.b|RLUĬ7u)1b ige<ҊrQ?HG;8"s[ʩrsNGMe2).J`Ca3A2QVDH_|O1ͥ3?edo%{-{q, =t_ (FSɳ$ L# [I޹ٺXFD C:m.Z.e3D_oہ1X%F[ τ DڹC~dm wa)|-vLiB 4l#* -\%qL4Bz(ez)Q'[6ΫJγTH@]7ѽ?5cLP*2c/1"iy&)rr˅ Te".-1^SQUa2~.r[*}Y:yTc:X0 acܖq]zǎS_юpm%QfȟVw1>1i Ai&"P0 ʂ7=/!*D@j#`,WՊfqkkj`yC_QH;KK)4G iNy> aVVoeB,kG;:0'S&$aQ#/2П(w˽^ HPL@`r gWfrD |v%a閹/  kk% \5ABE (rbZ_rZBivUVΘ">"r!XtKMLY^` PzSLr}-- >,.^C=Jh! 3|OQ,9rYl6m~˶J.$o9p=ʩ " ,osЊҺWBXɆvMjnnG>cK6ŝ*' $T, oM(1 5#_1`B,:cbӰqWn)?++Y gik,"uTWZhgN좭{O[]Z[oax\=W=6" ,os<"ܒUt>nbRO.LU?}i^wHaE+)ΔS/vm9PzVQE.BQ)ΆkXNHC.10ғ:WX7f=#,;f4D!gi%W5dI:#:i+% y?X6# yM(@mWb $^”$a[IqI+'#OJ,.L QQEe=;єNrny~SL qkfKfj b`I2ϼFY-[bcܷs3#c qiNU0ƹh1;Ie &ds^V'Ka#,gt)Vfix]٥4JO~s]4{lE*9Hu4OV>F$D!K"l5I((~ː@!prZ3>jk9=J f<7f;_t zH{+Z 0}EfacxxȪf9HykDUoD$+![l<:{YF/Wӽ+(R,4h\=}^)%:TomYQ~V)Kg3K"$[> 4tUʭ5}6wjfGDŽ}H?n % Ieg; \d_\_DVI!,oQG  dlaޤ5&^p&)q+'lkȪMuۧ-GnH%1cnd?b~{?<RcV@"l I(: ,@"@ V*ʫ_lnvO汕i$TALDH`H I =WtV3wW(LEOD"ɕDh1PC@VcZcNEacrVrg&05 YlK IXMrr|j:mPɏCƥ$RyW)c4_ >rD H d[;>R,@"Lq\ L!ifp@V+y*,HruAhO$CIP kw{{BD5IX o{C,܍n愇&nf]U?;ncuUsaM9s/xJ$'yP=d46C7d1EφS~,ț=GM,.ܱ걜sp|~Çh87@"l5I(.y. <<3,^5&fq::Pܺ1{{~79WRD Y/|Z{A(dKf1WF-كc)K5Y%0 tĒ"~0 i9ml6%]6ea{-]!Gw؆ pNr$oPd$[@-sHRn ;>^!ܴrmn̳[r:z۱rE(JX>"yZ0b]CI?FEϧ Ga 10sqk{`# Djo.dad6PTȀhKēma)RCRWR _b2{\&) JE`O޷ 8u60@" U@*iݶ8٥DAZwߧxjD [6DwHz[85X^G" tƘ̂d1'ʠ%؈dQh5&_CeScj59q>4Pu#}ʉL!W|Xp> ?#dV[pϚL=p`_[D H $[mw:>dIED+X^mNns؞QvVK+1EZFDjaÈ9\C)ŭ1' Rg/R%%f"TF8lZf12j1iidgEU |`r昣nK]ƕ:QϞ=%1lRFX4]n"l)+lit*|vÞ%GA*䵖Y͕;_t{JDIFh8#C))7A_a˜u"}(ڏ\Qܙmy9'I? ~XR1sf2Ƭe %A*]RN[Zn2A)3*g݃9uMڰ j>lD('&a2P zK|:nk}h&PR$[@-(˒$]-H%5bk: 8e{o+xڍx3D1 +:JJ(W5"rXN>4+[ ȧ>p!1_7;ʭVMo4V&5/Ք\~YRwy]ō^K3or'v _!aMЍj`Er $5楑:hE:^ b 8h[ cqI9nacOYC6r#GIy"l>I(7Ӟx ^#K"t'\8;?'byn 4On{ͧūd"NIcdt*;ʌ/;Zc-]m rǠSb!9Q6]e~ͥŏ9Nyi¤T媣GIy"l>I(7Ӟ[ʷ@Lt`iO5@c(TAxea Nlk;h W 7wc5b!)t@IDAT2RͫTRs 4j9yWc @6}CNshGi|^9}Bk.ʥPIY"l& {w|\3D`0o,Yh׆ |fځ >x|U&]&4d9ѻ"p 3𐻋0GPt_9FL,]8KFp놜-'m5YcxH7з `>WWy&,Q[VۼVX"l6I(77|HLw!8 a<sens1&o{/cv욽|ĜRȗ;5OYm͝18bżlk( u6X K4c5&OJkxS^ׁ\$zHB^V)G0TB+<®4}#{jݘ ?nLkssjqJ $Em|y<6Qu$"eKLn(KJ,4b˫A2agHsm?1H]hM[IёE /*`za#kțCe~f"1ۭ+EQD LPn&=lsG^=f=< *;AGms—xsuDi^{j~zI?q@NkݟS츲$c#iďuZ,N;\+5h,jV \(+&H]P)1J%عߐdžּ' Y$HBA!;o/31`gbmodhޤiR^Q¢tMJD PnHe_<޽)\[lgMn{бǷ>GhVό1 {[j|\$_i rf+zHYdcrЋ'u*G TNݘ4zRʉ:u?4'7Prri" d낭?br/V*$@o#pm~Ί:4WH;BJumxv"ldǰôJD`$\'pj6OI*u4V*i#9@;%ZZ}oؑ:)΢LDzD"B2lDǪV(" $n9nۘns(#dtn~BdkR摫`QX+7l"Mfm:i+) kgID`HBmPNcxH|`N֙i" 8rip7ع[)ؚ{`o;dӋF( ZLP(, @ۖ=E$^lpzޥH"lhk@/KlُD[p\>8l9CWĢ؈gea?7n.WxK}pi$HBt ;{s^Ɏ` LBoCobd@,Fv+,F}_JD򂽑E(4#F(ҖhF$5(uT, v+RnDk϶F_9֊k]*Cef]q]vi$FHB(V);86W>3Bb9cFULWK e ru䖍9>f޲5tءP172 :)մJZF 0 UVr X$\2}8Ubd*<2駩׽^\Ji$[@r@|`$YD!dKƑi,eo>u>[Fx^۵vh!wWֹPjV;qDL@p 5;AsN>IL,bR =ѻlRkA-'sΜI=s QsӸ3{#h'h2w|[l@"lI(7>q>aN+LxZ{\S 46,lkԥi ѻ~ 9ul-(Ag;V@p*j*';C,/nt6̜tL ٱ0<\2t<`IlhCn/ORc宻f4HD`3HB(a|PɷHޜ#06 <^ObD#g12t#JDn2S^UdGFHHLҐ2#}M#x 7F1HM8"8l&n<(~4k,7Oz ̓#Ȩ†ǟ \T.=+:vp R3HMF &o<|HD v->mM<;;[ȑ,<>9[ÓB^d}-X[6,bd3ŹH ?#SxklH`oGyZ1׌ܑ>֡^ˏ7FG+Ӕ/zv\;wNzk|[ٽ{|-&$"rSOgGFsb9D`@؍W*p˛\ld㗧˿u$5Cel bf4ʘX+C۪: ?\k}]!IMޱbX+9E^6;|]/TLOֵPOCk_7~Q׿F"$;G;U*uYD/?[R$yӋ8)V9\f12=GT.x達 z+4f)*ZeTJuK%9SI93u D`X@WcrA9 4\ZF6WsU͍Zm1D 1PDRlؽ/fq|g@,g |}7;^p5J(ɸ*3"~;6d͕m\VSs!+)un!~A8A 4!HJmle!i4T7FYW ݥBX_ o?eϞ ND`GHB#^PPr2K"Kܘg]NţFf"+Y]7ŔF { Z wvբpsѐzʱd:F-s@a5Z6iF*떿CiؼM7kɗM^JD`HBZ\=&#{hA,ʵǞ.\tGeQI{} E HH!R0j LBrĆlHa1@6F&, (N5 L&6jҖO٥J{7@"l9ض2Pw! Ƈq,@"Gk{79bٯ6[~hI(CWد`dXAB.q/R6)PIM#jlb Ҟ1f1= ݓ2w!)t$CA.~Ie+O=]fNfկ~YzRC)KD`HBcfc##Zf։@#w rXAz?&8{_vO>DD U+hr p1"~iZfmI.gexPAe@kxL \ CVO>h -[罨#ex/4D PnG@*$@#-q֙2M{iwt]#^‡+"K+.#fI""~.6ZTYUItɶW җ7chY ;tx pL!T3NzС*uX,kE%BϽ/7K>r$@7!Fr^ӴG4D`YljX,N}r:ܽ$2vϰϭ@(EZ@cU/tH4e֍F !trvnG\sG&>cȡ*AE!hCb)4]|[/.K?29x D iP<2D`؇/w~Լ vn)}b޼Jw@ȭTS* C`, obX[Y ގ?wJxYO9FR΅jxr5s\\[T6=ɱi3ǩFŠ εHQyD#&jEC%H9?/|r=- ox $@"S$}NppIu 3D`0x O=*'!ׁ]_]-'M`r\- Xjuї}^~+WVgZ\<+#+rAejhYq2&5d}PN͎dֈ j'yI &uf3"ZM{idK_U5iD HzzG4FR32K"6\5{~ƪ9 ; Dvܞ6_H`|<ÁkT@#F+0;SaXV޿-c/wU}4U91H|LUЕ >=Ȟ4sF|xhl~zVD,HLID4h~ĮHrrOƵ1culR]Ž C$R-t9֑kuHL7I]svs4K"$@߇]7Ju$JփNݠg\(wrՖs OL>"\n|^`7ޅUʓwc{wzzū"fJҶ{%{$f*>^Ҿq;A쫺ABڹ)a}tsavv1doR(AyC"L^F`~<}[؋l[CgD DxB.w_3 q#+j WOm$'~OYC@j`! 4wL&* ]&?}o {. M (1T p1j7V}D#tSvž;XĈ,(LYh&u5h>~lmE͎rp =,@"$쇽؅s8w|½)%kG` S_:gp,^3qV{)O^Í}p7&h @nhrT&[셺Gs)Fᖭj7~e=qK^$ L$˅u̩탆 OW"Q> * u rec`:U$5eNQ#*XiD.=.;"W'gD P˞yC]ee:@ pmﴮa^\4O^+qoƒcy;Z$A"MN. $6G#* ]9Cq˽ej%_ח?e ! (^T ͝D]ߪƞkW Z{M6Rde 5O&ȯ'fUC.->sPïEf7Hz;S?|u^1M+l/4Xk~7اϗ?~zZE^D&U2RK"b$u`(s]О=eW|u?^v5s 8Mh9?tDOj4︐~$hWI=LB:{Ón#SxG,e$wo¯韢P/:#c*l:YA_?[̵CCϠ̒$@ _d(V5sKwN8`bdhn7U$򿟞)L%=A  VibԨɀ90h2RE$a_U5q&gpSGL|72%_Z&ܤKޡ?1+m9 d8pnazj5oEsBWk*6Kۮ(l=y'ۉ@"߇]=~TfI@gtɯ|L8{27v[m _ aGz6^ZĢՅHޡ֋ݢϻĩq bBL普*ddZύI 8;`32FrM-I,䀃fO>b9GTTqbR]`$:6Cgrbt5 {e E5m۟S&zW>Sf|'yQ֜7N܈/k'a1‡a+҅=8j-F5ӗ9SoiDtx[bJ{Y/'DHBG;ۧ2CXω|3D 5PyeS,WzV;P ?&8V7/M꩸8_bFcCz. caD5'Z渒2x=s]4K"$'a*gӵ67K"W;Eѧ/͕OA0J=G`/!UJ6#)2nr%m͌ jYI+-F8-N Uܘ=x_OU;T;-oѷ2w@ o82 |QՇDH1'cdܲA{̏ LhUc6f=֣E9@^㻜EʗVnDHBW7&sgWDo$L@YR^%ڦr7yi犭{ 37ɍ٪fKFRSgGP/Sh)6/i$a/MȨF1 ڔ$n7MິuK,u\^0 :|TY~1~hfm>CA Z8 B'}_{20o¨54ݽF  þs< G][V̒$@ oveL\Q@D  x i]olx<9&o:ש2c`nfU^}W%ey_#}¹#z26 ecٸ"vđ9}iфrh+91w w޽[$@"o$=#Lj4!0n>gufk%t0Cy;`ׁh*]D h(閤l8jHj'jTGJ$ss QsQ_>2T^r㑇䣏 `[,X3e9ص &S݈l7}]ЉFiicױ#F_D匶{cU }-_ub̽g$@!gOQ|}=`z*L6HCzvZ.gøWEM;XֳSo6T^yА<X4 4P̧<5 td,#>85#z3EE-EŽuufub^hNlC+|:U.\ە6{n?Pv?f5.sU=3U>x+ݸ}/cfu?Em MۛNmD\i'X֭H) *˸ÏMVIwMiM"$}@>ڙ6I,@"_*$&Sc3x@o$lk;ׁ?`MЩ~4)I%3K#" 㫓Cm)%7M!Nwhw2<Jб%EAʼnm5X/\,=Vb%sQО|x G0/?>Vwr 43S} Q;8"۶b-|~&gN7Z by͊6G;][d3H $}C{m:<핧fIA`8e܍OxDCVH+xpo\; % Hy6]C$|Ȱ倢I@ئr~@ *П$N:Щ<Ν>Vߍj|jĘ# Ҹ_U[miLluof<|eD He漺 ~C}dy[$zM&|i\okr#GF`*yC}?wdN|B7H8ti>=:_FЊȈ0GCŖQ:n+>F$kȈ4(s_t@¼8.(4GjM|7<cĠ[ŇF|S6g39C/ꃫ7UKozD9kѡ *4gʆܢ&֡l1?΅.ixBX[èdQ;BF3dA+[@"Ч,ܧiu5GQw2D X}ދq),Q8w YX{Xq,x `aݩDuΌWL2ڒ9"b a]҆b4Rg1Zrt%"5AMnTBL:AՆfc9L) 1ď={Fm쬾pv}wNv W̑q5rH'PVJ˨'#id'RV'mC [ u"/m̌w;\Lj>D"pӳx>e/ōxګ_]v` }";"Km80Xz7bA7S ߸sk<+qR!QLUT錏Qv7-?TC LN)25_ ;?#9@iÛsd(){n E˱E!ُd u{~y,"Qְ']k|o3ƮKa`Y|ތ'FnD S6?z^N7ۋxFKq0he;; 4_踸jѼt(~%#a%e6̀e$*? #t1Yd7io^۔*u;D|KTTܔR,55 4Mm4mhY!|ݺоyg[Ȯ2j(x ̇سpD_57(7)3yؘrpM"=? n{ ʒ$~ܠg4??;S!|F<'yޛ+;7a N?{-C]D&'5f)i?!fEQ;HW`(UQG @//9GoZ7U9C=n݅rh>->5~M J*'p[=\ge?Sl>B$x5.|B}'`Hb4Mmצf%HnD >ƽ29Ad>r;ur$Wxl6ц覶is(BfbK͸Ymi#u[&09qo c(f×˓rkL2#,U`P, 7tnu*&iԣH, EKUђTXM-QH+>e>Wz5Mwl+>,^yVL?l7[#&n4g|H>pNFهjxy 3OMgzI&d/)Gg̍C60e}_xd+YD 9{h͕ s=q:{|\&BEO; 6@V;C9)7&$k\1N{X(c ex$>a֋#HgAV/߀fglj5Qo֑hG|ච|Pt9>x])\O4_`0`j4 J\/%X)YD 4P|HI qEHg?Ӌ8!Lqӳqx=- A*1qGqV€8Ib[#EM?29<@iA9URJNyLd6mm\Ϳ su!Z=._\~#(zkq]+tϩK}g#ei$x1\:#F:Ʀ=G aӬıЋ'$le? vӱ[oLjv[QZOp=qհ|řQ̋!\| SJ#yڍM"I(w 2W8xϒt=}9S9)e g"|7yl',*iUNJ 颞 Y[Urz 6q M;S,Ƣ}E6Mi[nJ6Ƅ"vdgji"eFsO3w[j|#3 sw_NCsyyWȑD H8m1"<ɒt|hђȭ&;3[fXT) K16 rFҌ@-GH!a(Glu%k2,RȂj} EGX\G][O"Ȥ~H6BLoUbA+Γ8<V$@"I(hgTvhpMqFq|XU{}Fۛǟ)pڭ(L!#U5S4^kA '>NT2yCbc\Eį̱ok 73 k JKh<80&c2R#@IDAT13v1ƂSj:]dL`< &|#>%Ao@JT$@"$gs*PD[!qܨgP[]nMb2є-]& +!EIF\&`Z] ~;m<B* Ċj4^;GnFªLoWy9,੨rƫcB5Gl$@"O~.˹\Q҉xg.&?0IU:yqb|  U]3M'6ah MDbgQЦ?V,3ŗ>ʼN~<; mh-?ЯUA)3ۖ̓$pe>[}E-5l\Q0L*RִTmZu`N $@"QO~q\Ko~۫7xyϗP[Cnq1A%ġX} ژ]aĚn܁Bݱ:1ѯy>>ŪٹҜZ H>zt] =nnD0VHzIJȨM6)$^8TţHVtÛbQ qխM*`MiZE1f:6UL B|T/6`c=޼K?Q>=^SgˍSe||o)w>9$K"$6"0}6lc laju7co|TnO;g/B{ 6FlJN +NvC3NY[K&eO9ij+cNJc*bbݔk3Ɨ|O|<ϖ|255ᖭw/?(oxkɁD HG c+IgIzYd?;7WlmRޔݘ/y[ 2#$1烬CіT "oEBe"I&qW>񃐉R_WďsEDԠEEMɜM+"2+VѷwAQ$ӑrC&Jٶҋ 7Y P\e,e9u1﫫-3HD H #C`?Oۋ[_̒t;8=;× Wys\BnD$B~0%DfxoDA4vo&vj1An̎#)}c(Ȓ,dMsZPYXis⛴3M|Ҏob(&1`Ҏ,=U܌O||Aȹordw|D H-E ›ηFa8X;(<>gUKEG|჉ āW*$3V#6 6*=Y[ĉkT|.x|=l<#kP'=&+7*yiZXЫ6h_aî堙 C!J()Ԁ՞X/oxu1z/7R{SV>O~~/ߤ~zfM_@"$K r PRaT#j8/Bpm~ȕ褑IVKd NBd6fBd2j;V-FU|QneӸi"0:2ιпn` U=^ Xzmgv|Hݤɵeص3P-61yǞ.x>p?L3J~wbȏM&HD`khol)\ i+(mfaO[m9(r Ly}}+O=u&FwO;I $@ޔgvrO>7ܗUc;>0nu_lH!!kզљ 7:JBdllc7Ka;Ҥ\ݬHGgq+]Wki30t 5飙SV ײ!csq*Q yP| fj.GPX_^~n7S">Bpgp=q}tP>;u@?Xe?R9\-A&+ 8m,΀| 2rk "oVm" }A/L,#E"PTL)3(NPѵqam:ttr(?rӟ֞vgS}[o~c$&@" eLqeqsxd\5iY^Do34UzM>pfI!&BUvcaK+q1h՗ݲqи&CCqb}PR.QRgCɪ+ÐcfΎX?ARÔ0@Hr*PmG6ɴT5Gڒ%X|yʕwo{e||Wֿ5=AƩfDd=dde,}ˤi Fm({gy<ǭ(kJV=O|ߦ*߲g*$@"lp|A򡳳8+ ж; ֿBgS%77ZO^@+Qȝ$XzGsBM|4<%ض, aɺQTvP#]W΁?EJTØ CV1Pr>ϟ[lWxʻ~鯉lD <;zZ=[oz^n,yx /ϕꙓ9":DzLl8a!cmCqoJڎ5ЮψYmmH芧W ioE[J.b5T6"7zJQj%#:ÈI_hs済-ORklCk+cce׮2 ؘw΍_j~8 lx#YOKXel;#3CeFڣD$.X j9Ԛh*9EM ~aZvKo%pS|'W(wS}UFםՒnOsϽ_wϖܬrϖ=92$@"00$]=x Y~B:N=77_Dvff왓Ei&K(/'oZh5j!p"hQ#nF5zA]|S20@u` H墔h愳RW;54,k뚦q@c}]q*L/*/^ޔ;\ɯc/$@"0(MyeO<=`xVmxN|5 $0*V|弦5bZܒkБ3:SjuzPFb1l>dik[͹bg߾sQ|( uJܞ~2v|*ȁ&~qQ YfiLJE_(&`3dyu///ayի_˳^(?Aa_ HAB mS s}(O.A͕3qUX;CA*C:r~[U^(D  cɗIINicD".7ֲ?4 0 o1/7\ʁ)-fTǀ)}<\aGW>  RmZ-{5-S8ݵ/(=o)FO.?3R_QWi$ r`v`NtI;9"p9 b+7p$" FŤmo'qQU)EkՌ k"Epf:=t!&R~Ő1s9$q#?Z /%u*(ٳ)ێN;>k|GF-y*{lZn‰Ard)0P%k:@EW=w3_U~\Ϲ)n|ӏwi KD H$}spzX+!I~irVf\؜qmu#By֕dC#Pmj,lFč!}yܨ1硪զ;[g {ƶ蕜@!HZD]ju{Vԧ_4 -8yԹH6 [*x)ṳ̑JaonR ׫:ٍ/|]?(o/ >U~~yݎ0H@ o3;9XܠɼAO˸,xߒ7! I40@*j}kڥʿZ959[I؉1 \ pD(t`<$?$5j;P$#-4CI) |Y dc38R18EMza݆8{>=puJl(m~CN O~Zbnruچ2گG~mƉ@";I(}*O9~CfD` +FW.Ԥ܉%xq ͨ9\q=F0#)IZVjN,V(0}@FeO,: rME6^<,;'U:ֱݴyŠ5 BÖOYЯX6> -Tsm3gΕ_(<.~w|Cy4@"$F1O։@#pwS&I+}hrF+iqce8:;@i`(ƖqAlCXҠ@GY=335+JP+'yƥW)a-f.A[AOԼeU,\qadj"uT:~?Ϊ7~Kd'N-?So.##)?'D QG'i.(˒$kG4V7ĕ*[Y鸙лctg,Ƣ1 %M|hx9"c*S}Yh;|pfxY[ lW*,PqZ[|45_iuaS|*uѦ2rZo} ͕'H O.wMaPCy?rQNd].}GeF@";{#*%o4%Hֆ<X oɘHz,1Ě^iqS|tP>.auS| Hv.Jo *%W|\g!1& Ow%sŧUx` 9=ESZɰb*# oӞ7 VmNr&򲗽ڶxy\g?gW0D !PT7l,@"6&p 04)6/-!))zxlۃ1,"8/>s.զ'GjNr4`U}(2$JЬӗ陵dNr0'zD #ȍ׈y1]nMu)/?;S~@211Lxw~Gyr$` r0sβ [I688b%#ŀD4ԄTTQa B"R:r@$@V@RPӒ&4=iWy !ȼqտ9hH5!|*)1E)}jӁ+H6|vi󠎈+ wZcx=Z}W_޵LN^-?*gϞ_m$@"ЯQuٜޛive§^N'MH ?CA\Pp?1Squ\(H6 ,H:N:ꮭ/޷zzN{9sSsOyX'4ԠXl.H*0=onŽin68,- ;D 6߉{pLžfR$/gQJʳ?k|:VI,-aG O?24c|8- zvWaf~˛ \ԙrT$B@)D(do= j!pRmp5 $ɊL]')蠑mBbK<33ω~~ /+ۆ"__uE{w_'&&e'e! jD@r5UiAlm+ 9#BrbD) \Ϋ6h* jk8Eu3aҠ!ME##IYŜGȩd)J'2Fvf-dcf-Wy0 02*70\ s7UH0ib8Îw:_e|ۿً"o pnQRB@t5jN&r*ݔ S!!VSNvITQd @QY71knӖ_RNp(AeD*vR^:WErd諤W 9m|QSrF3<2L3 (ݞ?d743279Rvۜɧn\[:Mxs^8||! ,"kkD<RE#Qjc7`&Y ',*eTLE[$|Q1EL HL1ҺBnc$&cŌ##rjjMn0h`fIN9q~6(VU6v|g-H/EXLY uCgEO`?KQ!\mgTY4[,:^P!pBb.n*c*  P6 4IvHH`8 U%O,pP(=8lׇ'WCkπ2ڲrB숓u1p-+4oǧbdrbO  xvw]k'_X~7|\?{Q>/E! jB@r5M8pЮNف @1"gĈ%܎FBޒ%F:ڲvl¯9'o6V? /N(pVrwצx'cEȱ2x[34M uIoaJ@ϴ+GF? ӨlhK~vJ Z+ #zK嬳6.hڿneARB@&D(W\N\ ]_O?.+VF[%NrZyX5eUm .aHȕK#8Vͯ!HVvtrJqdLh6cRnk#^1̜6} )G5ޙ=?2Q1}C/^e if^ۿyIʝw{R])! V"lj.9xrdEi9X\''3iH|ƍ&1ZYX' J FjI҂6\6ᗓ=4; m7Lq¬1n ?k.OG4qpf>dihL>Qej! V+"j^64lWBB`U#pɆ !aIR+q,U4I۶+ $j7Ձ;tXu}$\ɬA<3St\ #@X=iӷCʡd*M{i[;>ܷنl*+؁ǿ_?1z8 >_SFt95y2GƧuȦsha? sb|w0[m̌xF0||___Jܽz|ķ,40$fcaCg>q}>[$3@5 {rn摱ӥxUȻl@鴦ow_r]; WvzУGrӞ\-o(3O'Բ> po|]y;<eA5|A*B@Fw>\H,+Są2Z;A|5X)8p`nHU{#fQ3FQ%\.2ׇ# N[ouG =_Idg !{SŨ:,B8Yh_}s ZWr9v G).rGҗRnrc{Fy;\. ?J$Ca{7}\C ! zʞ?@{ $  .cjkϰTB8 Z-m-\-L2g(Wr,?s7938tm3;Ws^I/߭66ںl:'+oi('=r替\nr\vI Qnͯ/-P֭;};O%+VY~OW$B@:"~WЭ]W{]cSX"Ml  ,F*1duEX@0Oi[&Yͺ5kh|_\i`\<^;MsUjplϰҪG?15Q~OI͊oQBC=L`W?//pKerr8s;;K^w%72KH_P/Xyֳ:kL! @/# BgO/ uY`VD8TMٍK1L@2JU'sJ巧r=tf*U\ 7pT.#N^i\2ht#_ԵRΎzV̭;31'#Zl>R^uׅ:t/믿Hjn7^ww-[v%wOÿYnܸ*^=wI! V="kgrOE'-7WfT"d&L$qaInpC%qEYtfCg'Hb>H<TϹΉ~P|Mbd.ӗ[tiq=3>c8UX2i1f@1#t`3^}.]{S,ejjg3^7M6_^5/f%}߽;;|e ɫ#9@HD` dT! pߑѝcneDIArԔlP30.M?:$-=d]ZGA1K/*+?3χv$B`"3QẆozjaɅC4Cw,q+I,g[\,!3n]7C+3tYWώ L4ZvjW&l0};M6H߅f>J8t8$)<.0p[(t֊R ev?wz1%LĚ}K7^ӛom/xϿ]*B@^F@Ϟr_Vqw6Э p>Xqd$1&dDNEMc5YuSDjڏ[^YЖ&&Jom&Jl"cEqN1'K 鵕]"M>\ֲ7^&z.۷oz~܀M^U~=oͮj! @O" BٓMI)tB^qW*ylP^Ɏ$MR(eiS65H$bN:X{ =8wOu#, c㘘2̉`2rx(M; 3ftk'.Ƭ\5mlsu[nC; llZ2_Q91!K2O-q'yC|Z$B9/t_0lRĐ1hy4-WT[b?UO}r˗pW}kyKszd-B`q N8Ф*=xEtLPe\1Z+T3F#ݿ?F[ M7n./:nVqϞ}媫-5{8Lk_Q~o.ׯ;m_r ~ W,r\ȱ[_ 3sZ_EsB[=&G߇h/t8V'pLAOh;DPZL.e[4\NWnJ|Ŭ6͏#xp'.ۀ=sرO^kݻ޾}[Og_~>d( " j B>R󔧅{& 7]5fe;ɛLѶݞ^u w2G='qU]*S}Y I̮ąNVsD2imeУtva$͔׹D1'˃,Ϲ׋ o8ؕzHBIbyէ+=>O)}W.`$~@S-֫.!B ׻;8*I.S;ٹdl^q0'AlV2B%cK;AӨӖͅ,tfD.)y cӔZe>R/.5nkm/?~2{]6W_=?|['pK՟T~tt|\}`hFB@.! B%`vm#)_볿dNJc7Dj$8F>E2?s~P6vfi~d`j VS7698MW'/GCN3:Xs|h\! e *sYȑ9#'{ _c{ʖ-gU4o! \0TRC` yJ^row zd-Ie-QIP4jR5ju,H9(I R9];K_;cfShyda jkrIRjTNWsNI%7 !|.-g A,|v3' gU^9.B@8"I]D`?|HSva^|rsoPUIKeBtag;es-s#_$],N FɡI3ԜQ r |'% $xw/mDeE1}3$gVWp]g*rl?_yyy;_/f@B2A*2rxT{ܘ3l1=Y~R3jLxxVCÂ&As-z(;wOc l=?:q[*-msrV d{./^H80wM ꛯPZ~d8|߇]yn:t 011aY~[l߾VB@@@R? B^zuar/V{8Z%R%=jZ94Cd]#f8C8 [qNRҔKKj6.-Iwj[2gJ4:M (q5\:A#yx{.Y"ocS_Arf522\lذ:_@h&,H$K.I*'޵< ?`Uȳ!U&}Ң,Iom% 1o^/X>+Ozir U,=wVd&Pa['Euy8#c w珔m>Xwqm3>>Q~]oYk5p5ŕsrLIYn"ppx] Erć75868Zo@Aq{kmCx"=/su,-aB5dzRDfk GEh;ɶ)h`bcR<\4 O^7Q! #bN;Y E!0]ՑEJY$ z 3OR6X.S' $AW>fid.Y;Qss4/ޠ6aKJdӉbXͰGf O' hBӻZtfnUgL&B+PvV9s# 5g㽇$IƂrAol5&HNɑfX3A!@E5zombHGAX6p ~ӤG\n٘j j62K!FVڿD=e@IDAT.B[ʶ[ʯeW<Ix2.ǦڳjS5$##;iRzfR;W+8&aٸaIiʌ& ؙDicQ6̋i:1Y@vNҋؐ[*^{Sܵcgٻw9n|ӷ~]yVXB@ ;!Jgm }`Lrt-ژf]MdC1}g?N n=ǹ!#j G&Yc$d!VWgʵI\¥gvԤdurɟ:%u9s{c `XNB1}]#kߋ,F<;vW5WXvܝȑkoyC!{n5~zXr)w\qPщBAU8W&AC>R_@3m" ne>'$r!NZ$c$rA<@uis [>kD[X8RtaF ?묥/媫-ؿx{ϼ<,D]:B@!h>yJu`UjF#.} SAx(9 vWApByj4m N BvZ8kj;}.GBѼo1Sf jtؙsGAt_6?O43#7WOr'XXKo|g;09td*+ +>rQy4/NWzOn=z$I(TQMʎ,ԂtsAoR-GkM9\&JA0>OĥJr8%[Vp>ʼҤ/9]'&&U_}y罙)մ'){N^FB@!PyR'C>\Uw`jl](vmMg5U>? ƍЧJK|R$}|MH?O56-4F#9A 2h@dB#1nLoHZ&[aA<kfXX]dlznnu^^ldGyH??:eyk_\ܸB@ND(Wyլz ^j=+OM;wJwn`'ΐ0nq`t< YzGH@ nu <;5ja]S@P4Q-RDfFDiXIvo4Ͳüe{օz߽ʟ?O}Sզ{Þ=&>_@KCB@!PS \H>ap!=SZ$0r:g~ɆLFDTFNg]KbG\̭xܩ5IbgIY:W@d.}f(C0}^*u֘TTy4=ue+V&䫁|>u~n?a)XP,! R>_S#pCW&+cx'%PSd5'I򅶹R$Q%3%41rssD|!F؟k+m Qd3\fEi'u?%'YʟߗnvSkg[ (B@!P.+ &xpTy+=*B`"`ISdJ')$BO0Tn=~s33MnNCNtưiCRپ5}XZ (m=P">ڝq"jSʃմhiowؚ/RTKgi ~Y0|G7LD()XD(d9e=.wTN8Xbgní[  㑑+GURPti.y['$D"K7>#Il4y Fڤ2da[4_vzʔ6r<mkM6~+fϫg㽓1,X5掭G[vܝ3R_ͅs3_AB{Pv[yh_y`_ٍwUZi!0qSIՌv7 =G%plIS=Y2II0RE kfAj2VVSK1^uDHFvJn e2ڤE) oF,-j>0jJf<8Gg˟|]{3ؚ|xPh! "@U>"0K PЉQ[°r%*|Gꁩ2VXOy#rU1MRflqhBُM!,tΉ"&e_|0DtY,s'-̄AWkf #0J s󇎗*+]ugD(WIQ>B@%@@r @ !ląFxfm/گ++KY0:Xn/\A4{ lkaH$A('j+'cdJ:&C$rv|JM7]nžpIƗkǠŒ\27ƃ6&;?˅W6nP/~ a$BпvƔ_prau3W!gδfpي zM|$!a?2ʤޡ5W%v戡WI,<051P?` c& Е~ԬR:1峚JRIיK=`?rDrӟ~  \ʄ]?W|7:t\/|* ! @! Bc'L eVK~R\m<$a$_yps3[Td@ٷl07A"2kms573SA9'ue/fI4XCBچ>HpW `CD/›Gs̏d9/)I=<*$\]\o}G/O^+B9/:B@Ι2B`᭱$ yH-3]&5V&OEBm  X-皘59p [MӽY8ɕ3h4]q>#!kמ+Ldf{d/WpҲq-CGg 7ᙫp/躹%B@C@+=v”X, =\w r lp~Vt%=4Wy>8IX.2M?YX.YC9$y* =S#Kh%\F\͎,0`f繐4t}wϰ O|u9\y|&?W\V.28wqB?~o Ok׼ dB@!P.5'V88Yxwd$h.SKu/"5H{V#W.0ld-o5?͵ ( k$& 槥<ͽwԛ?nyEߢPn INoJSAǠv> ֍wЖ-)ϸ| f"κ w}{_9gn nys@ (^?oJHFۜ WL9Zѳc屉`Z˒ɪ2/>3ii7ɑ3A(cnI6f@lR1I$c%Ѷ.MU{42f9#ױGʆy姞ll?nVV##øCeݺYc! @o!:_V, #ҼӏdqqMKh^\/Kr 2/gjyZ%ˍVNb?LI$V(-[Q7/8F,y;*S({ Qn $RnߩK=fl1VTo/*Ox"t3Xd{{Z޲snvk9vl|U/25B7h{3e-2!0Q/F~LF0g:hDJq4mx1 @GdL9Avɦ$t_aXϸQkuҞ3| H^7V֭)"9˿0SB@ ZJCđ o!l5ۯ?Sg̟Juwl52uZ)i zn*O= SL{K:"dGq{ :isu?.ë;j@zI1SK;GXYgZPpw \0<M\.KHG_ws,CCF! @!{)]!ЋpYchOTx9M9iF;Etmc+pN@ANrDqz}c/r$u$'^;Gm<+)cm&Lc'8Y쳓Pg#\hmH%-J3 n7c1˔LX<Ԍަz''r\et"?f`I_<@V97ˬ8z|#.X׀0-_Wor'Ǝ/t{y 25BsEGiZ3B& |i\s}0|ΎCI#M?AP8YpH|z(~dǙztӦCiGe'K/OG'AA|6VQ9JB${U?O)Ex}ȖᾲA<0oxdI|cbS`(C&@pq0臸2cPLRi3_o osjz1$X낕QYi33XB]d '̓?h阉egfy7ᳲlLA&G ȤE@uہɞ&O{%B@0嵇ORB7 Rcb"\{ĉ%&Jm8'C1](ys{42i&k3tj[Errng okjdPh8zr0B@!P.# %BOWL xOl0X^m)đFP;L=־H)Yt3aq IuZ0<~R Ǯj g~ic0>uqT "$bѩi# "BL By&PWL! V5(vl݋$*mX>ɍ,=~y Mq4e!.Q2Hԡgh6%%6U '.̼m!㤮& IDc6ю^UB@)D(+CVc?VgN1#&JWn6D&T$"78уIΈPrMڴGf~caE=fT8TMfOal^mYxSo5qxq[{}1UB@!P TB@y|]xc N$|^X6ՙpP0_q8I(i3*=7+MvȹF\Gq:yˈNƯ't1KZ=[NWT)lxeQ/ӟ/B@,?"ˏ" !J8{!yg'* ; Bʾ3ԑb-NIԌF=+*p{o5_IM J#UnPs eg߃ #Kͱ,A& B`eOC!Bދ9ʩ!܁*o{Z "">IƬf;#( )lTRM`^l,O#3$8|$4V}WNݕd,Sj3>( ,B`\vP^E$~:^& ?W`\q<~M:IT؇ʿ\G@%v 3j]4im+Ed8̘~(ǤC ^eth]Ta(92U"B`\vP^BbVCrzKXY q6M̐~{.Sxnr,^z*ijj)_>UJ"ˋhB@u_.=2@&Q[H/=g)}'QNp *ɟN֜a8I`޽Vg2'5R˨f\Xԡ)?,FPI,.[e~6B{uNxu!Tbj!HG! X^D(oEB`#@"$wl"{+ Hg"'(6yVlΓL`y싊Ig$p۪6DZ ̸^SeY& g:`M{& MN̲~^&+1檅B@tʮCB@|az'!0d^B=(vNJy# jG{#(Z77?uI8nʅo8 #ݱm,1\UM.Ŭtf13 'tҞɩB{+~ n=H(ˆA@B@DÕ `ڝ%$Iҩ'(m$vŬO1MF"@6 ֐SG1=bx NV-wtcio]c@{8ML˝2|Jv!*B@!|P.֊$ B`bEYI ؈+DH=vf2'\t%wUwM7[u2rKJ݌FZ6.rS}Ȏf\8^2z ͆f\ڭȺ%_ۊB@! B5hXI{FQ\ǵJLut5]OhHـ)s!}'3 S> HDWHc8]Gmധv,W(s1gC㜢F bw.'CB@# Bu@_@ǵ\(%5IVC,;)ĊJ؟#95ub3M7Q%3JG[LgDszN—X&fOjx9UٔB@!-D( !" ;ރAe='|5:3e̙7↾Ύv$iF*i,iU϶;Kx9QLKrU о$m5M c7SӠ^8(˃(B@,3c ;$!^x݄g>__.Z_V󝧜:NĜy4'NқהN&TRFE;`D0}Pj#I;{͐f~6\3/Q<<ސ~ 0X;Þ>5Hus"B[tne,B@HVrf&<[hfA_ $](R-Ȱ&BqU'av:$>H{kEfa&Jꅢ+$RdXv8V'Z+۫擞Cn%н`) BKPv XBE=X_D@/oUx'%#e>W%Wc$|̜q?̇!L:';!>MCg~ ˡΕC[Btfh'rȆ]%CSBDE! @X= B` @"y7nk}DV$Wy&< lNCY-ZČī0[ErɥɍUg"#u}~> B~=]ɉYLLMf%qd| njӐ-~K#5Zm-@B@! B5hXDM$ERzE̗۫JIFnD?"F:\LEzw3䭰φiCL8:>uK9zv#xUHLCƮ2*ׂ8x}%5B{Pv[yB` #sERDr 킋no3_<{_!lHv-#f|da1eICC5Pռ3N:ak&nwAJҌѦM˾,8mLārt9]=b<#as˫bqݥ*ěv\7ěnV`4kkG6R! &"DWX4I$wIE÷@j,/6R+ŋLGnf[$^BfҰkBajOi.m8zh3]Uc@95fEewmz}QB@." BEpZ#pޅvD$ٙ$pp|6㽔F&9X2فD}|`[!1l>>b}}U&szGPic0;9h7s1y5oz|byB@tʮ*B@,p[I:޺^R @&d3+[WW%叔XC5er]/_cڛ{'wZ$qLlD䯥feXIjѦOeDXƌ?Ej.;,+plPC! ! BD@ʍC W$wVaw Apt.Ɇ9-Ȍ4I 'o~`o G ʭ $zϿ.$K?tGZ m>'XqR31O=>$V-@@ʧs" G"9':!܌$ʕ"Y[$ ,IW>·T^Je|ęNuBnE3]yMN$]Y f !p"xc~lA:'DܚI07{9#y+R 2- " IC!_47[ ^kZq5YegsdbÞ ÑsM.t7Kf6&u\ ! i(s! f#,J$7yDr2.gkJ ? nzSLp9b޲C4"x3T(jsՆi lc6"I*sur=L#jF.$MZ9 pa*5ޣc3 ! @7)(S "$a<+)":~IsrǍFJ_p%7Ă5tl(' f9]-w 7&tЋNrsʓ CHl%3rI2JC<B@,!"K\ "y'n "9WkU4merXtg+vF@d,WZ:ychV alb/s2؏ݓ>K6dѵ n~$d @" s6-|Z4yCl<9OI]Ge0-~H'#zی'b3onֈ,0kJ!*B@!P.5'|onڊg$5054Ez`oIsב]սBM9q\UJ0RI#/BXH|}.)ulh{L@i{ c}QǍ"B@!P.-&V5GO|$7y .*A |_$_^Lce~+hN-_II撹2N7PUcIxk 6b2FEM{' "WXB}wsKylGB@C@r鰔'!j8vH39Yz g ؟5W[_%Q4Fj,82k39"!4'ĝ6Iv KIn*I ݳǶeB tDu-ɹȥh! R" BhʗXeyD>|H;t,= ܳ(5 %!32}.$|ll=Huʫ9 ^犕B1Rf0'ak6$"?$eGXpṕh"Bt*B@Nx;>\1V/,* ʥ˅1ܚmDp:W^"F"h:A7$ikJߟI5 Tω,cKP̩~dEH%ۖaвrߑrxx0@O*B@! Nqp8ȃXQH> wD&֏dz327yCM' sՊ6ӿ{2>:YM/[AB@ Š%]!]xNRdr3f\K|u.H]b cn٤0Wي ML>ϭ7j.֦#5M,&h.Bf7Da>ӽ?Fn=y*Hj! )# ByPy&pwl({ꏼj<)5­#Y >kNZ4,8UQ/d_y+|Pfx~sSOS3g~9B_Nr-k܇,:Ig6[NV[J@ ^^UB@"AƊ$_i3sz%厮kܜ (EV_ICh -ZDҽP캱5kgNcS4駲V cL?ߑCڜgّD! ZX Vg ;"y7c~$B|zjrSprF(G$5 СfY6LImY )sbJ63ùJRx +OȆ,:5+9\unn j! 8ED(O8 D;ރ#I&I*UB?\Z*y-6aqFl '_ aI;'U`4}նn=Oז};4Bj<2hwG:ij !  By*F, yV#c9g i(D#pV&U"Y_u54`d*IVѲU@cs$f&HtNhKmni``>xPSEs Դbq,C=k%o1;6Uv)BT<d#7;;&ƱT+.FՈ7Pj {Wl ]/FHH2Cdo:/MzX!ٞYL3'ĵSHD/ssmqG&57Em5B`P.3Y @Hɍv Jh9]e?P+BTF#Ke/I%JThX`aߨfIw1]J%Je2Ҟ[Œ/9DB@8d"1\y>#yk+oqK _k :_joΓ~34id͞ykffs!0ud"FVNJM8bw,cޟ: ! \t H~.6sU*'GP2" eN$k0à h6IYL=#F &Id%2bȦjst%6d|н:MVo?{"B@,c& !pB.7yD*B`)|nRea\i<~t 5c_ 37*t&đ J) rn?A7ɞ3~0?F\ɂeB XYIbXs2cr`tB@!HD( ԅ F;U 9 $UeqUJ@9٢O>/zl^R=%*u /V.{hSLbuId۰1Oj!  A@r!(IG"D{+;&|" 2K) lAH ɜ9K8޶_gk:1B;#-N00Muik9V@2Beڛ*6-Gt۫B@,],- "H>Z"' 5'L⭊1n 0_m$X$``g'yNu=Jj_&yx ު\0s u TKIZySon j ! \0TR'Gh@IDAT$G[GXpح/_[ ! 8)"'H k1\Q\܏g%j-L\s\\\st@y&ٓ ?rX#pH\nɉnw5FHLıY'SnON25g:b-xi"9\?3`!|פʙEUxP36fpH*3Z'żBՈ]ˉ;5=<,&%4KJ{D2&;)UB@ ʓ!EV ;$F;={JCke+z [BIed}Mns&h[/dm$^mVٟIwGhT96Jj ! |P·=gwDޏU#~ՓsQk X5`^}B8W'? 9i3y"a؂3lА G$Y8̖뻎l5gT[EJ*B@! Nh ȉ GroL$~^fߴf$eaX;vlp`HpLM@9!pC9N䐜 Ďĉ%kFii4Lwum^u2Sߝzwkj]\;|Iif Lgx&MCֺ~rrܝODJ/)Otٺ]Q -j)JC^̬H ÕujC0 X/zJ"F')煥{XW@(-ei_>>RΏ|NG[$&m+ɬִh<^*NjxX|`e>{}gM >C 1X^y1!^ I>!T.5.4 5YW@Xrm.fe"x$Yyk/e+kD+e~9B4 @S兙ono.J/tjq&,-<_3(׋=gqQ jCvb0cI>${ Ɩ`:J#  C2!)[t}%\qF>%k$/oEOk P1yB ͸k)cԧ3ކh6SRd(҇ūP&V""RK_OjgPr|Ǝ$'7 P>|)ߘgevz@!0 + xцA6$6z/[ IS!<ͮ?^XwE*i"= 23vVZ=VI=K^0 U-RȬ((l{WGXA $gbR5!%͒C1Zeyh|v,x&m^IY @k@Pͅ]$G9G(P &GMt{x):U3fW8мB2aO^҄f$/ &rC bX ~"@ (%>?;˞ڶbrog LUݩUazKc¾LH h83(u􃸋U1zZT FL;& f > uEN r@CC8xi]rAȝ&O};E`R9;{yѦ:Tg S! ^-X*4m^y!b0ԥ˃Ia$ScPЊRA/V^Nuʖ}3@XMA 1;@`I<#|E>@N@`Js4.߆*.!VJR/ bkM2h^UIt(dބͰh_i5zI^,&Qz>BjLF-洞)BrVHI=/b';BH#{&LyJL )"1ӊR(@y jQ<>CDdR$YR=:u^ @+ (Wy+ iO\ȑ3l/Ǚ暭AAk^{iG63ѧB/=D%.`M+&/IOXDۅhf1dK6 L1eeb%bbNik` (p0Jr^G>isrطQ@Y=1A_8P31J)WaO_B+WC<0>_oUcn% Xln|yu68@FA`Q3GQ3e@`w U19^/j.wc'QaI ɄE-g3Nio"-?֡F R,.hVUZ%C۬>{`_ q@wT'eJHD@e bPlN[kC\4Dբz5,5i3lf//h!>@ró`eX %QYݧpw_Ȋ+ xJ D@eE=CDlGAL/[K߻5}N4V~fL^LYDIKT!g*{K{o!i6n+KYb:J%C 4e&t].^~KZ<^ʤ`m|kҋ˘#}*cjBt) PRʒvtkBjE!px%wPN=*G* 7Rgȼ:4a@C=/@'B/v*} A%oIC93E^َ @FAaf#B.C䇀~֝\ɺIBy 쫎Wh. Bm(t}uN)gU^,GHAh\Sy;1L[&fc*1r|A߈x#l@+Edә0@"&I^aېi6/ U^YS)&XAO[{K ѫy&OJdj^bT]v1<$LJ1+ ߊJ:Wqr59d֩;1QIw)vlYĤQi]P^إ?g>m %+${*tI THJ7 زtF_.4x %MNt. !uy齃lo 9ilD Z3 Ѽ({ͩ5] *-}f/oXo8p|0 ',3)._+ o "br(r1W&pFv=BN"tc(̦j# |}LU$udx禉PpOďVb&P_pv iq(PҦ󛹾h鳶z/^ʿt@(1e]:%PVΡrؔGt}'Ygdqw3&30ow4Wf6P/mg*$v:.ħ[j/&FC~ʪZaf(/tzbgl(Q"(=W^!̣}/ 0P'll*2U+Wv1>uI\j4+{^y7Wgʌ(4 E{m%ާb* , U-$,ڦYdCJIAYaFWM6^ۗ/+*,ENU)@E~q)JV';ͬL2j-4+( S[#RNޣnMQ:j/i&~f{- c[$j5H|w|J$^ ,mt;kfʔZVk_r1+jl*7}-vQZVMd)&B{߶{!c(˿A9.]jB0K\'3Qc?to)@(+eYG֯:q:z8) d:m*6!ߴo\\< x~Ϟs:JAgeTfQLb~f OJP(0I 6lpՂ5L i%&R~Qۥ3x{L @$,尗?ݔ/L>or*Oʗ(M(EnQSͺ7Sz*: tF`L!Z{^AAo6^ Nr.߲P/)W}j_%י* @||Ƽ=֯=ѽm|;`ѯM6V`iQ+2𠬙$@`p݄{x>s~v=W*)>/I^ÉJ @|viB4NT.LEf(3'ibn-?u9!JA(eƛn~IޝvoUpZMӦJ>NqoaTB %|ggs>{Z~ Ygz2ёT% 9媢D\X c,*DtڃOuF;j ?"-n-aڤ+ 2`kG>o;~Ŧ6MR%luVV7W݄$[v&= Z)r|bM7DBmj'lY ^`e4FXfՆcRI­D5h_sLJ-^ |cN3B@fS\q*mOۜ=n#2ukʧ$$^_LqNhZJ}CZiAr #+( nbt׋&/vWSU46z1]Fd;-?SQ=NRUYofZ_dIbdekF|BT85$Ml_SVou݅;"R @\Zjv~|QI?=QAMkh[Gf_Jc~G_G5]$[W]gAek"*%AtMʛxq}B SDkwܪMz*%HuFAeȿ!z1B_WshdRc]T٥_|0% EK*HMS= R@Pb$ 6 =eK:虔5񪫈uۂAϥť%E>(^X@ J">-oGmR>G k M^~x @Ra NLxWjtd-rPSUѩ3Ϡ >}xL$j>I+6~*f0 YV&j<$YR$>4ו͜@(eBx wuSCPqQGO6D]J =/8%Rس='5"[k]*-5W{4%.}}#GDT @\oz @LuLu)ٜ/XѦNMT(M&4%|"cA-3)&&ZȮ{1:wC@(4eA@ Cn`0kJd$xZhTzgSTM- 7bLE3Uğ>mbI1WgHoADAYѦ 5d?-r\vy{)Ĥ ;}0EuiqE_WsTZfJTe&]> ͳBZszB[}&@@Y (22E'mR/LIMRtmcb[8lO{(5 W叙HGX4q^F3MR^ ,#M?!@ sp}ˍ6z~p;0T_&S(CisL痧xe^Hy|7Hgkf4@Pn+^  O]qMzT>.ut%Ϋ"2D/4+v**GC/M}n5c?dg+?؆'@(eƔA@~?saW t:=7wwTQe\RFiiL38}Q_60ʖ,&j-s|eiʹc$W@(8eA@>\׬W{L}M/{ )fQ4KGe7(PuE*Ʊx'eLAYѥo +pxkN}MzTy^Ei|'qg.Ɂe>8>${@%,1@#eݾaMt ad;ID6F &>jHt&ђ($MXɌu9>$  B@Pzx ?}A^5$R)B.zmHtQ%N3%udGf,V|赔iL{5N@(8eA@?s!ۤgYvz=4U(ZNyUc&.:T`y)yMWJ^W2iy'UHZh@pJbE{>[4IZf6U7 @(,eaA@QWtUgS_(1SA7Qoe|:/ U8i1.1p> *joܟY:u DAY+ PL~kzڝk誉DozUOa"UǙb^lF)x3 1JTj2^,1ԭC J @p*8`jQ"&EfTo A/TxSB(izo[ X}3x HAYAK PLw7^B^X=^/@AV PlԑZk^~;_wTB(eᆔA@ CzYu꽉M, 4M7/NxU!Z> CRfث`>gX!@$,)@(2 ]\UPe>Uҿ)E,^ƐnY cj\_||@ 9>$ @$,%@(>}1_vu{r6e"V!hh:vo^IgI,C>>)}Zeĺu \컹n  2cH @^zrkwET`p%*J04ibӼ11LG-XWX?b/*^#UEAY7 P2o:>^q=(T̩3=0*~Dh5K53)PޣHTŨurބeZZhԗg:  XIo @~%Ƹ7~~*AEHS*)eY5&ԼBH&$>]TB@Q (2JMwtc&MaHP=>+HC2h\T{ :#ǜ^LB@ (> ܏䀫QbDPoN=/8B=*<* E饤2iQ>KL{M@! ( 1t m{'L'y'1Fuiq_ZJ}2 EK^lF[ rBU (  @Pl@ M5!-D&(E2ɓ2h'T!* [\ﺖ?63p @P|i> @`%99^2z#jcބD* }c^Iu?_$uߒ|ސ'u=8n&@ ,0 @0h;9Y\bQEӄQ9yaDKBTxy8M}JW@!,@ @@@]/Ԯ_^0%DIiA=U KXIWb~(5 2" aQ?,qvфVNG@I&+C>ȋe^hܹ%R3$AeBF%2x*5a0ZD;+)y!i˘Lx)=@^!H @XӽHs=Uid4a7ªi6 Z\BBL{U&@ ,8 @~{tm]tbwX4iF>/'J :Z긇 v)l@6G`/ڛ[Zy#W~ePXmFFRaz_Nm%r?zWY6 @ CPfh0h  @`; 쫍mU6T`V"A-XZ"Ml`5^\EpŖ{s{ 3ʜ ͅ \ SSU2jBuʫbTbo)ht=*&c /}̸?zh2N) @ő˨8Md*ON̺]lMb֫UWU[h2NAy {xNh_uhy65ބV%R2y/[\꺾(KMl,~\~Ǎ5_  d2B @O1/ϸsk_&+x,zu75bRS#[yCo@P^=;rBrOQx W I%yHHy-K]ӱk֪r˲yP V~ Jc  ,@PfqTh @` {~ѹWWy;ޛLRNgq J5ehs.ۦݷNc$1q ]&z@@|BvY #k GO)K5ƹx)Y׆(<>>0jx ]'! lͻ_\iczI7=q]e:J5Tk!kl'#x+.vrwS; @ 3ڢ~)j(0uz)cUǝ#Җ\zC/D}Cy)+ .Z@@ _{)V@NK X-MzζSRK싧2&u{4YTX)ʸy-/洈=9^uЗ.{@XR @1/ϺE9cuZOm|Uek%&NoMU]6̈5G׉RFLA9b@("/t/>2܎_uu::mƙj)M:r1#{et8r;Ǝ2!r@P{= @`>y˛ߪa-QЫuP2!ޛ3Reϥ7f=+rrr"B5@P^5:(cZ2-%%*57[L@xa @(=YQsKߵ̢ WeƮum RI1jRUeLJNQhz.p|Ӥ6$ G_e}L6vuZ@Z#Dm^DZ`Z`OV=L[V*&  (ףC @B{ly RGD_:0PocSR^MҎk(qcX70pl' Pʲ4 l#'dGkӕu5[ꮯ o$:NNn~F9@AQ 8(;>p;^rVSm3:){@%r@@{~9Om~Fn>D 3)l,iV~SΔ,-@ (? =>vfamGssuZGRf1`ý]toeLJ6A! GJ @`> Ko(QnY jN5)zȌ6A"xZ@@ l8NoS) G{dӞ 9j@HEM@Q?;HFQfKx1}˔C!@( eQF~@rDv߽OΨ$FQƼWfKm՝i_w)^B%W@[".!@?_蹟~pƵt:a-w eȫG`C8w:nξOf@@ |i~9[G0vdewu 2/^8C^ty|ұleFB F/|e{]1='/elU͓1+ @Pfh( @~ٖͧ.ͮYKZʕonpoa±r%!,@Pfyth @DgqkfB]Dn5YcI <m {ϵVӕuwtcP7˺7|l1WҴ((R @#!>_}l}Py*&UTn6T6o/Wyoz>+ |@P{h= @p__i'}#۠\ߝ,Gl2ykbs|̽Ĕ{͑F @ S@舨Gܗ.yQX"2?;[yLѻݻnvj p@XOz@( _QgAT~Q#ج{zF-)Ӄ9]y}z:{@JA 61>̼ۤ&f=E G9^=ӁGd2cC @]dϜ]pe/,+"&IO4x^ܷ\to>>XZy oB[&222@ '-YVqnEXV*zHɪV,&D%zb@Pk<  @W 0# _2"7Lo""}@ (Ԃval_ݽ)wPί$@%(iR @%|G:z VʙxiDA%\C IDATPt=ZЃG{SnE7@A Щ lS+?|}D^{Ww2_d 0   色 KyLG,XxѦ{)`ieF@` (w55A Sr҈g[n[,ay9uڽxr-G!   @` \o?\˵:126x}'X1D/ !@@ _?~f}Z tɪSxF@IAt) B8'sa?,|b-DXV+c'ݷ^t8+ s H0R @e&Bώva0ܾo:;fPrR$ @$\}PE;϶B7hs&(<eᇘB % gE\\c*cc^?N!@%%,m@vβؙ K3,oEn%(eƜC !d.Gd?v9ʘ{Mkf,Mvr'(S @؀ŞS .7njY[y@v%@ ,8K@rBˇ:9ip3'J @;/tcB7]NNIvhlr @@@V˳.~mUjEjNj\IrC Z@Z~̢ΗE{I @PpP @$ '?+x/QؘrcFX@ Lh'mO]t̜Z8^ /ؘrcFX@ L.Oη\WDf^Njeh'6&ܘ @ oGe}g[r|K=^rqH[4@PnDt@ ϶ò#;Mz˦L%@#ߘb@ .'.܇DX7^.+Gx<5nx f (7K ;@ 3uo>dzȷ0 W|ki   (#  @1S tͼ @P6 @% [|l޳εzqs|̽{$@&:@ 0Rzl/,?^a^y)LWٰ'DMAY @Gkߖs,wϭe{k%) l@Pf{|h @Vݳ- 1ٰ ǚ-7M: TjR? @\w}Bt:l ˓UM  ʌ ̀ @Y pQU~̢sK=aY>1^w {~ @P @XEl>";~b-gPX~큺{-lسj䈀@P,oj @"pzggX٥v͆=TB:] @[%|}lgY^7dÞN$@;LA f:",T7NNU>]߬d]4E',?@  mݙˆ(G6y6ن!HIA&"!@6"{~Vv]a/,62߱;peM2vǘSQy (; @O\Xr̂$ǎd!nvMW@Pvh @Y%\}E7}aY { &{CAY @`G,w϶ܧ./w9ܾfʃu6塠@PpP @govwT-*_q4!,P@ M/tqϧeeoywǚe'*s`fU#ݐ`@ Ogo. ˓UC/|hu (354 Ş rn)}7Oo:ę5[ b# @#fvOX~ CAAq @뚰|x+ )'JFA5^XC l/tl*TÕWNNX]3[s.a@Jn (s;t4 Ӌ o5eڍb DLsV{b*5eC lEt7Xkv:e؛a 쬜ϟlkk:h=; P @4Cϯs,/wNX))?ŋ%\}vXL*S |Fք Px(2 +ueGV5C :?z۫j%;jZ˯9BED >d@ a*vbx,ѦMpFlėgܟ/ vҒ{r 15.//(dSg:i @` z@ Q#W"#"][Pݽ iYc⩌"zqT| @Pvh8 @Ft]G,_lo|͑gmJCvz^CwX7eF6B \3=ж)T;ݞS`Wdתet:eF6B #ⱼW(Nt_{q9v^>*-7MQ6 ; !@v㗻",ݗ.VX.u:.:UteC ",^p_[Gt~cVE7B,‰SbMPU\4$T4c( ?,Ä$&H$eMdc?7;i*۩5NXF ;@ʀ4X( 7ͧvb<șa`7mY\?rƒ(HgwDE@H``>4=n˛W0O0{7((1!;EaXRO0cvy +3˨ z y][[d$*~F1;NxoҢ8ǺPi8Y)%<BDH'2m*{sIS(!(pu0RC;Ur)B6uPU ԢF!Cd @_l<1N{຾]tq'Χc {BS}QD eCi_!w0O̥-yb a"Թ z R-C ! G] Ѷ,Q7՞LͫF(aRh Etρe !h([>4_y73^&Fz:[4_6n7?任٘ iCn䷓?g沁t\fK]_߭gu4&+RaRHV k1U(%¼xz"PMV \\Bɸ0vH֡ݨwPt6v Q!ZqIKPR^4(e*ĕUahsđh^5eQ!i R$?FJ+( }L!#*l+>2PKz-w/pages/351A2724-332E-4B28-9E25-ECAB6682F78A.json}rd(` qK@oj+;F:>'TEvlYC~wzp++_o}ޛÓ:WoϿ9ux&zM\4)e7kONޯauzp~tr}_>|;S7xԎ&~0$Nwnbs f?:p{rٻ;=i?[f?nL3"a$RJ&ߙ,3}R짮CoUG==,Wznuû󽯽\ytxn/uv⿘|𻓿1.?w}:ѿN}o+|폫k/xӏ:|1_?ꔎ{o.>x:;Sc_~9x]OW?>x9_܎Ơc3Hp&]|f3_uOe?N/9Z8[ϣCL??r*=ywp6?zwNO>QخM}rpzxgsS1 q&}ohh4>Q~JBjuĉ*j\y?so|vۣ]h.eO \7{?7//rOf} q"L kHYlG9y+q>n3D8dt!1&;%8yvOr|Xr:Zo*M?O8΅,!kx35}) M?֤ۧbcBMt2!XVcɱH׏[[Icņمˣ&6cE-9׃mԋ/C)"E TWɇ_LKk0o{cBG)n!O\=ۓ'7ûj$O{ju|SU+r8оYsnqvQJJg(VJq s@ %\>_'Ggo O\+k[s(SrwGk^Q}PRX(6Mtp]ftޫF,ףOx?1™6=nve3CrٱqZ|\L\#՟?;Yzwv-_Fݷi(6M{w>=bs7}|֤1W>9ģ;iBL)S7_akãB2AD{8Gn'L8g#YVo䣓~{B[ !2[Ђ4ȭ'n> |.> ,UXs4a_}Qg?#-H_JRVWZP2z~:>;lliRiCMR ~lCK K1]ugs<>6ZRA~Z: \ SNY!8i0yt1b϶2 aR/O@Vٻh{:-SJf3ya¡]P銛8+fT)d"q`lpH *&cӠ2qJ*J`O(G: }?IE!4Zt}ɌY@3ݫQ7 inxYP K_EBw5NZ\\|g>WEPh+E<lk>ۥ2d]ph!)H52KO>܉o}N8l,`ղx`*ժx^G̈́F׶ =~]ͬ\.; ]ph58 h/&$"̮MH3LLdܜ<%/,$p*<%:6$L,s,rL  $Oma}bX4,]Ej X@}`1x)&BȺ,z+נSvr յaX*iYf!neVQ[Kt@vY#00d+%VMQ6,g %׺xy% a$yQbslv:HGaŗ`\}"O`&s ݝxA4,u,/[C੕ P#N MyQϙ)EM? Eyձvu,?>[}j|i,f)A':HA7V!K68u\spXhr)!)u~"mp0gOiXd6&S ZeZNejf WMR kA(VV],ˬPh1yZ:`Yj1L`3rh'S<~eZΐ s(v+9sqQ`gC2^Xh-#!}R~J+bAle#iXfi|3~dJ邐씼& iSmmJ 8l8uXJ_32!И/ǭgB2 h;áHHXY Z1@_-TYZӨ Sٝ2#p ,6 A/k⹛M!؜N^j~a9,BQn]}%-O:)a5BkÅxf8896ix|dVksrr8]pz">}G'iCenL<]MsÓr`%,\Xk^)'i'y^rfJtDRT)P wd]w Z_E##:&Xl ̐􃷽P\s7XmνQhF#= !G4/>'NAA'l Xw\9bw Y^HC-G.9^ v}}ԚY*W&xR֡_#> 'go{c<9i|0Et|Hpx:C8iZLى7-k! E |$e3rhwX̅6£a8-ӏt%HUdY Cx&~%<22|I T#auD:wMqU CZ@ üVloɀ3[ZE`YpX, -oJ-fjD iD!eiP:61>И ׊hqI)Y;j\#;A/h"v)qwm1-|1PɊzOh/]U<(<N›;JLZSX048InF9K &ܲi0yn`( OOO(w/_Au 5/JӍ#0Y&i5.h'e#}lP!4FQՕX3r$Y'piFEYSC%pˆ2Bz³'Hf-sǪ{x Y@>OD@t ^c!AXe 0Q[Cœ1ySrNM0nm:~$g!yfmD3cڢ5Q byZ|79m.,:B= L'ahFT[*a0I -)0'UO!/we͌:u g,u Kp" z% _΄tRk5ƌ$PY!r_SB֘BW/o7ÊAs8ARLeQ[pԸf 7v*:&q5vN0g~nXWb!xm=-mL:1l0іi >TG%jof*{eunT)dy6-bR6x(9טɳJՈ7u~o%V|sl'tBZ8ty4&21 hy2q?WŽvyzFX5ȫ$gr+ǭXE'ȏ+ڝvıiҗ; Q|Gݘ]sQlb=D Jhb~9 3c\v#|f~%GR Յ+ \;4@J#1M |WLo<&q_MTJP+d+̵_wX#pmyqOnln_[o ډ@0yRI]pq*.┓1f{*>`hm ok 1֧<:\+~v"24Ή[Z?Ib?9F 6*ݬ3tNW՜V$Y 9ά u:E˳kSQ|A:䉽f_LL#4X%6V\/e+@-5ʆNKlCFOoT:ȳ&ixE/qJ)ZO`IőGfE_Z ԱZaBK;uVD8eL-4eM5.hlKN $kP4>!i Ixml `7 )Zi2;uMql 8Mv"g@-% ;Dk!gYZ8Y 0Gሿo~<;칚tMS! }տ\{`ԓ96~uӚטkO8*Yenzm0ǎ֑@)is_<~Ú{֟7 䲂Wc}Yj:?8u2>NiB7BlKlnd!*p ޱ§yyĒ}7BnKgTH]H'>W|Ya,ʙ[L-7brŲ<֮fYSxؠ"*,yltS¢dž8>QDiItvOR:LE$fHn|ɝ̘ :B~MPK23XQV@+G %c3 ;bSV%Y2c4\!'Zh`ۡJ+!Pb` h ͓jت6ZW;JZhuLsvrl[ܐr&-kqS5Ib ju6.qeQdYAJaS aNqTRȄ>b(mLlb#2mD:T^!(ŅLK3,@A7 e(#[a8CٍjQBe#歁Ⱦ̉)eq0mDxp Ut#3b) 9U,91nS<)GXay;5TBK&*bJPbӦvER)zP0_L9 B'ɷ6y6k.I"ĦL'lh:.?lЛ03HkߖV6`! c*!̓b"M y$QmZ%e,oA*PJ`D :H+IrT,)+uļ;ٮ9H)| ][e< ;7lQ avٱ4<O>f៑ .֎1{78)&8D5>vLo,lHfB2L0rVn޸;8aFY [skR}p"Loo>0]F싈5Tؕқ^lڑ/w_&rT?N}a\IKNcWdxU<LX6pb;?|gut E12bOiyۣ9lZWj@6r6HN+ĈH,[¾+}o8TB5rIS\zQLjO/xTofAƇ)I{8Eyg0SF7܍ zÛ_:8CƏlŰ}rS0vSC1n s XAzG4;Wn7q}uC3!I e{PyeDl?8s,zFriO$Cb^uDfAFVci/=SjeZ K&耮M.EH]*MZ1s }5mIbLʴl:@u&bvbnO>cEؼұYHQgmM۰ch :>Ck2DH-/+hV҄>C#3%F/7WP0;K ۨY h8n7cz~x*_ag_@FMR>LSiѱjUZ,eKu^Š%Jk֢ާ}Gu2?0 @&t"p~{|QP]I JMfƮR?P(#}wPסRt)Nq~*% 1qC'X LJ*\ody?mT1{ʞ^qח] P*\IMlw1H>~I))[g|[Or[I&ce>j p2yR~hLBfSGO$fBZ›V^w]-vYq0S Spn+v!qA3;7u؈-l:F5E0k(DžWZ<3eVYh`cNB@إJVHTBvPq~$pE"dׅAd*fm ^,+.:vY<U޷ke{e&Ƥe;RrK73t /ƶg |akVAQ ~Sɳy,1Kne`).Fɷ42RhmOن;OGt2gbQ,3  KflSz*p83@>Al`C!΀:`;DJ]\UD8^@IWZwu\uW"c/u[vy4a nt08ŭ$ 3"njQߘ77~e ]4b&6XSG>4P2ۋl6D&9Af3 T4?'ˆp{ISm.]Rpy>_ zt=]RD޽ЗB4 ѾN> 6ncz-8]eW2S~i83 φ}{M`;\0@L&< hgO3Becvi jaUlMQ_l׉$IegAN_'%>71:`z]bI)Mdm9 .[6?dp%=ش\QTH{@B%tu+WƅE-%]vka-R;4LT)AX dށh[ m1 0N@;޶KJ"[?F 6= r=8 ZXF9)cuySVoӎ >-OY.} Me˞R%> ѶgDv/ц!N ZL7zg8 qt҅8;I& N| 4ɓF7]:kr>(tGJ(Tp-)F3'j7$zgʞGǽ[uSI0o8y,1{el{X = `ȦK3Ty?at4! NO?xB;{ݤ`Q+׆sǮ`adrxS=_'q̄o\f!'Rbie]+MC_L7 q4}tƱt%w.=PK 2w:/pages/864684A8-AF25-4B08-8343-4B3FE43BE1F1.jsonks-W`L#~zswç}/n~gju,9h][Ǥn.?Wogxه?^| <ǟyOdz|??C~y0YP;~6CQ?çx;`޽>p(yo{{ǧtӭۛw>|qx?٣]{\w84LLV}٥\co+14)26a+1zYmug-7զ?߼=[O.- ?GX'Wmv=?nVe/df~áp庛պszJ_.?Ϸ|Ooaۈnm?}*a4/y®_uˠKoo?iCa8>f4ػۉG q)جF-bKrSxCt$LKgsXD?IK,.C3$gcf8p霕H݂A~ަ4 %Uz\ߝq|Ǔzj~3U\}d{ij*1[zzNȰhxԧogD1xvaJ('W}w`OOg08n-w?Yeju]G3Κfv}׍ݏz7SG#)'kBogSѹ4?:a@2&kZ}qCV?b>Eƞo`O_c_ߜm0~ˇQûW@ߵa)۞JM냔\DOWokZ\6Jp-1SL)kwZk-w8W^k+3A ,Fy9$6D8)6/h>Dwݍ~7n~˗._ N`~Bxo|<0W5&@ZVd[5GnRqI`.;6G,,1U󉗀M )>>e p

s0YK)aWȸ=&[',;o۠s3XL7~U)vGDSJoG'&?MpE?̡~裸JdD%ߙ~(-swtas6q-d p B^D.VzAN 9 ,VQN)G:dwh2Dt9{YC6b eXq(+Ѷup 1J=~q_X@͇-~[ `/!rc?dU 1KR\%\t]0M;s(8Lo؈qLaXmպLbZ0 jS"cӲ2)n"'rZePBJ2)a5Z<0 B ߙ(XlO . z+/@!H~q꺾~lb<ÛS{ݼEJT՛WB8Fu|'@`=yHz27.>|Bg s6? Kvs=^pg| ~NJmsBCNͼSM~: ?reލH,B7 +ѷǨ AeADޗT#SkQSŒ;_Ko|ڃǬ IQ42 pgm1^!HCg'Lcr#q3qό> ZE "GM}Q̓XY0:="*PPFv sDς]S5 @m N5xxuR%XvL /^_x|V ${?{ŭ2RnR0ǘhboy>O^3"lLsֹf/y+ywx9t3E*1q[.(Rq  ^ {,86َ]bMco~g&urǞwzr. iP>[_}=<֎D"Rbl.P#ItźW@\.lr^3LYq*G)u0HPBb >˜+EI|x"(~%DLG>b5#nL$\XZ2^;)f(XXv\p|,N, :I-'*)ac! )_4V\Eim*{Ga`&F0v<@~6K\R5Ez=QycێEz\/PzW6Cmkaq^p+-Du+ ^¾΃nUx@XՆn[j˙F9l!% L? FДz&5#.9?q0i$\bF]e*cWEC0oq'43_*WqbȒ?.|f\h7Y&"]M/WVWA-I99!,s ؁M+^+ 1Cr_Be y[K~{cD;,yP%n5i^Ƀ:b^S1kOBB'ςFikc+3*!O0 TJ9꘱meRZz`]x$̼"/Q6 `/J?,0KL-i_eo.25؊,lLD Ûiՙ%qGE|m=D:v pU"$*qGcG//k,( QêF,Մ捵QQ$چZYj_~K^BvV c<]$$FqeQn+5tZjǡ"]Z GD ϊO;ږZBa*,mʑ3%^?հvFT]/+9'zXGV.lnxվLGzP4v`xi@hFM3 nJCe}Hݕ$ ^sWE[:^H+gg>I5IQ^δQ۪8չc'L##8(KZ<1\u6-"1!K7zAX:sK.dErDZ#;g=,Ќ LIxk#@VZ/-ҹIs$7vf l0Fg,&DfЌz wd!p\ᚙXb1 z϶: 8ylκdapN^v M#tKZӲ\^QDs` {'j1K GaJ+ %&9opBEXIhv"G ,J*O-%xf)3Q 8WfS(L`2ybztHȅLlâ׊0rw7/Ҟ bԬg`X#/F% Nظ|!7K1f@c^2~paپ|arQPW8pYzw"J %qĖxD5mq"0uʀ%0MR郶x%.Z3 k ]b,NlVIq-T=[a]Q0?$2 aa)>6҆ɍ-ت=jKV e{Uo^1XZ^Bs\herogaUp 2<^ bx&ahpXT@[e[y>_-Vݹ!i8Z0bݚSXO@ΚV`iJ321O!"=r2'SPoGo}ꦐlRط9; tj(!%?^9i3ybL7@p]q;8,$}5TNXp^GV;2[$_z 9Od yq`[&Sg nDKzH*¿-?21aK-(Kgy?r翕Ċ\NMTĽJ/]dWUbEodVΫD!@z2z$(.&*`^puj%h2i)p.C4";Og~;IncnsWpϳ@&1~q8pp+P&Xy|Ċ޽ڍB9`Dnj] sg.\s#}0RVJc=R&Tt#q9\Z{ıfxq* }Eꋔ:6 ;YDF~+emZjE ;a%ٲ-kTՇ3!YɗRddp;F{%N%#]X, $`WckՀNq`ecRT2|HMύ"}P.KaҩesI'5^QGy4AC6MSN՛aIBL~YUֱ^i "Ɓ(in$*UkWK)4 a̰i(Ftb6_ l!Ohb$4iPX#i4h1N|H&WY_">aۀ fK˺٤ ե%6cK82OPGF?E]omwBǗ YHaG8V`V3:jҳvγNw σ>b將!qPo|;n,Go&^ vCLC[}fxg6hPKR%U=,wFcSCIkB,gH=1:!BG4-~c;1%wSʥp?|Zd`_jI`>!m A kTjzDG6 (^Ape[a {ײTXXC$V} "LJ V"˞OK5OeXj)զR` SaUCGl[NN1H` DY-gKlXSwl %I SDV [FwQ\ВЁ7nB=}PlI[o(*69 s@$J2}V͑#IeIo =̀J@[\Oe|q7cƷ'LJ:FHm![c HMjЬȅn +j/rgYxV2K0"nF(q0s}!Mx׳3*+6e_<7Ah/P)1;㧐z뛥~298 2jALO5ɠ!;3va}I7etaMӣQj]&u3{]6 2V ʗBZ=,9o޷jbk^gM`Ŗr  ѽ1]4K9I4̓e B`EG%^R_8]FL#.b-;]ȥVyurf=6϶^!cuIitk&_]G<0S59qT0u:_fO=F uLDoq RlP}NMR6F`cM:J+rLW$kzTF6ɐ =FNĞ)EYet- 7e ox-͌.qDL.2d2R%qXP*!.L9K*ZK 3Y$n+ 3taq,` v2;iJ\.>PEJ>хZX!Yu6c I 1f!,l1$Y%T;yv=zAkZ6ՅR@BF$Wxv!aFV;2 ,.$[CZtWc]B#ߍmsFvB*sժhGf\E@6j)imȰXH^\s@#0a؋D$2l_Jc=cZ99E,+(dvc_PڗSQ.*Mjb>^a)iK^/5fӺ$l$vuqKKHimuq29a.h]i0xIq ! 46|Φi7HJd7;23^e(sEZ, f1Ii#OWdǹԲPqrNV:\&)J Z4bVÓ:{zɗ4NgAfN1l4<ؽ$_ʙ#flhG/OuDK_dΎ/svt|,3a\9/AOat(J)44)پľQM7ٌA뱎2! %"phfCQo9bzQ-}wS,/tO0RB%`B 䪔xHZ+Blܑ/^.S0%<Z|ul9M )j9|Y LxK憙98<Sj=@l3& 'ؠ7jP/ۦRhT yHޒT.2&7bh齆9F luR UϺ) (w'@tW BW[*MVsuRݒi7Abtˑ>m3)Wꮮ<^˒PM,(Y ?5$q;#t$W2(XA7N@<`Bv$q؞%|6y$6>4h+|볠 D53֪qaY#ґb6%T# hiM8&5w@ O;˼VŬuP aŇSw#!#JmTc%& ilآJe]WV V̼L *</ΡvȬ-Z\KHJ8Vm1JB|F޾6#< y+UF"{sk#@H[3E? ]bk8:AR_<>3i&6Ycn ScQF㝟L |%|{ɓ;!qٕvh^+)H #?eYKK5sVZz×O] ZOy'mr<(Z;Lڍf376#c$5uv.uD+3!GArh5a{R9ӱjC1]_f3Hu:GKB]%^p U p`'ȃ-'jm^o=) VJƦo#[` Ft*!Œ0(]zR}dN1 *?q=3aI!Nʅպ:dRơh~f ~#"M*^7n0q\1T;to+v!2Gܝ45Dj0BĬ"NyC0mWMSufUep Y̵tg*Q(8:RWQgLj~Z B+įfjKiJN7v+>L^*Vu}dU2kMV ŋoVdvz{Ρ侀`Sb=vxd dfa2(9ˬҪ˅?lF6K(DZ/W8aY{iN:CI>3!jGw `MMI0SGc<ھv Pn bS8fX*W}(oV靜]%DxݰXB8Eu'Ms6豄sL}of/_Sy#J2'5,甧ؕ .ep N~̖'58&3hNX!u oA/dIP:I` m䑙oH)٘)v/B8|-zGb>nb`սcs{^Y@[m'foA `n1ҨvV'm"P)B!Qgk=%R?p-8wT@[,^!M8FBuErJCذ z׼)P#}bٲX =˺Hl=towHo'cL73at>f^zрi蒑Mڛ#~Ô{0DY{^99qet >#p2ܿ"؏'|* {_+ ъ{w_NBkR9bG)fboe //8 pZ3 ɓMB&rot/2L_I2D|J5(ЅѮ{&t]ޔ22WVWt:ѓ-*{x!^|a|8cuIB& s k8F̦:ҸŖx$w621kKrϤ=`TH2[®ic"k Ew~ c<" [4sQr6Jߩ` @;`!"@;KJ"[:Gٯ:`JKr,uRG{#X X]`ޔի5[|$'y'DCLKW;K%vSk)c)tQ{l2X`l.>ek+f t`ŕi]6Q68Bm d6).eaq%8υldfiw^RT$<@&wK%xJX]ЌMjZFNZz[ACJRHeԘTɻ^dd[ yHQ!btyԊe_ ǩޠpcpb=;|31d6ZJNಔUxv0١3pi2就"ܙ~zǬc /Y#(8,WzɮȦ$MIڟHT/Հ 3~8ߟXA{ d25n3\O мNR߬Ɛ!OVyp]&,/A]_^"!Yyr+;w /!5_4S9ZHFQ1}7$:.+xY'#3a^I`TFQi,_&appvfܞ cn_4[4/ZxB̲װf.2RlbF$Hظ#\04%xEWR,lG)Z$/Utr_FBᅰ52[1}&}m%G.v^YD4>1]; BP#5bTDR*Nlߓ砪PDOqzE'Q ಬPMFy+ P{ I D$Βޓj;,tU)j0q($*lGe֋QU7g{*Ж*eџ+J#+<W_(f !v2?QCe"*ue!bJU.z^ bedUH9]VH*TVs/l=HNLU38ފ,{d:3]-xpԈab$f&\Yj{Ll*^/z"KYFg@d(u=i'@@?S!AHv! LdhgՔ~T~ms-(m-'(ax0S/am?Ctk!o|z}+D% }ֹcɜ xM/xcUp~5̴f[ϻmgAmߢ1/ξ,^ΖW` F?[ct`{6xL{KV2?pSOn61eR@6# `uFkTc8 ɹ2:18pLwAhT9jNO#\dc'Ny27/o";yl%_cjl7Gn_ ž?OUcY3E-q,ī"*[FO5Q{-9΃FP BpԿdаTۦ{:֓zRlW.ې%L_ ljm.iyk{~H3ĂPLr\+{:O^y}-0S_묈ͽP; TIfb d$Q*{rٽ)=4t<]Rml\r*Iv$tZIGI3SO1KO/ l lJe2aÐ31q.쑆i 61>ݺJ}MH;"#,IN rv!ol;ҞoM>X[5u)22ŵ>,9SZ# R(R)̶M H .!Z2g˪j0}C'α-v5[֎2EZN 8YK +&j7rt>(D&SW<4L~F3; ^72(FoxE-KCN''4 Te8O8"/)Jv*Jqj@m%_{Bk$l8Fʣ&+ٸ<5]RglaxގNŠ{K} m7}O XKZGTۃT{Wv$. %yN$ "۱j Ebj䅨v "O;̞z; g e7zϬʙḏgƶ.z;lM3E,vD&$v& 8$;t䠴 5YarxN~['PH$*#0ӻS5 j`ΰ^}z-aZy"97'•`ٍ-l#;DEQ6Hw[Ed$d:-L=1"Il98 !8'ʼn aһ:[;-[" pWI!px! ;JfjTbb/$+&o]J$xˆɿ4Bsw94+dBb(urǞw"Bpi3(~Kïq8YR6]ir,,E̷q @ꄑV'_%rmUx5v@B,l26"< "Oe 2OUmW}X3qkʜ EގzwI1ˀ98wd_UT0'ɾ޳;@wuLS5qǒI4`:%l, %4eaQ֦zf$'۰#4(b,{LIVhog>8 wr#?_~C*%vj9͑Wܔd%Fi](O" u:ǦvnoЦņ nz:PeTVV"O ]`GdzQx3[iQ!pi8qF%)]tt_~8D$~Zؗ2&ZV0NMyyYwUU6rRYO2O;[r'{S+@هo^)bWt%W Z^~R8_>;}ӎ;YFIgl:!$=q&z84T媑K9AjWkWSR+Y`by}Yڟ|D@,5K!#f' hNν0Lóc>2/,Yx>?*maf19,eܖoۑ"f|- :bOEK׳#1m:|_Yɬ,lt"YMuTMY!RyҭڠZBYRv%c, zVfVJkIrc1'gk6$dp0]JبuLQ2yU}rd{̀-<+6xjK-3r ZCV``b6mPJ1+wFyDvRyl ࠼)dVޕ+f mM=+[2NFPmbr+uk7R;BQi|>47+S7Ya+6F-f )M >yAH'QY4U&$_Y`X(WߡʼnξFs\gjBm$XШ2HnbrX]!HӂsjVX)NEנ9z=DAIHE HR )Rog0O6Ois4h)vFz홥n1:cl#4=*;$ln&tzI /vf!ȅ䢜pDEbj;Rfe-Ǐ*N*8!ɨCذ/[8Ă_6 Z9?%Io$a8@Qfۮ7>?GarwUUOX%≰M #i uϭWN#'vCӿOxixz|޽ȋB=[r`Dԕ jXjf.\|+އLL=RCJG!ɤV!b BGv"qЩкBHOTDD`w-+gŶ"&ij0B6V5XA?9^֖Xw^c6!X-ے%U6u[$>3x)D%^î5RDq j`ecՖRR2|HMЇ1sƴ-KKta)e݊yϛ:Nua-L%Xb+ T]2ERY'^31A ̥[Xx A$vCټ\JW._reMsF.eޗ651 K!EZ c'ZfZ10I|n$Jk ۢ~­sPNxR)8gWai{Y!ڕiqx&hot-؃iLfͦ1`z7F8=s|fgP|a-aS `V_NFRٴm6SїYn@ *8ٸ-#N'pN`ўύlK] ΦZP>{ vM"LMl:辁|.4lU|v6V*_p 0V^lf?DKJrff7%ҋa}%Gr Յ\]T9z[zZuxE୲_9| jxTNAy'&tv [wUJy_UqP;jV v.H%$+.*BadYVvCGA]X@g˖Ba]ՆRLI ]uˎFλ:NB!ݾ;}W볓L"+0]ӍcOg 9V&i4@hS@EUcmRClk ޸ue#@WRȱFE1dG)`MU*ǁ%[d0w 62~O J+8q0 &;& XG>N&M匶 ύԑHrE5(‹ev ӗ}^Iy-KKSXC< $'|0] [X#~cstJAOC/oKpw3?A.Wk%ʲnl:l8!0E)~c*%/Sb:9 |ZdRSDjYV>!jq&T b@'Dn6 ~^#fl#;Ć`醵,u*֐Ine_9ylE[pa"ROlMKYTk(|VJ'6µS|-)'VjG 6?#HsY}5ZN^wl %3#VJ} [FIw[\YВKX7n0=}\"c1z2y`ʮM5T",P%b%TkVv'Ŀ߳}MiG1h\+W>@[M%\|q.b '{Jƀ,mo !jм^+8g2oYxV2 0eMAg0s9-_#_kjՓFǛca@զBZoq'fg!iw8' TFHl!^o J5ˆA'&BȤ3`os'I4 yV bC4RͪjWx:V5)V\o$P޹\K5/d Ccљ5@"D?9#&$< 5K5 E U}pR%5k_)0!@:q_byB(sֱ`Z@1\!P@VSM8:9EAuuJ=^@H3=4ҳi6<: X1s8?Caf٣GJWN a(.ie=f5w֞˘5ckb\S5΀˚RkްPʖK`<54Mk3nmv,k4S&.֖+$)䤾MQRTU-n|B. O~?wLyTޓM NYA TfXu_pS[V@P ~M=?zwor/On nX3]o28 %=V’TyW"N͕ 7lO?S>QjOoci(/wVsXxz>>}!VӢm=׃%_9%#Ov§߮mSwa"fwu:۴g7R^g'IqnoI愇W6\@狌sB;7t"˕DDD]!2"񑡮i?ƘtsyoBHP6\""+Z9DyZH (5V|5kuP{(^%6y)oG +Btt ZGׅZIn<6DjAS;tʨ%BxW)%O0qzq5" 1YyI ,$ E"<>$A5aY&M[4qƝk6qAbO5hw\n{K0#.*f6oIƣBV+&[㥛Osxaa`ƪTRl~==)O|1\sÒzz[.ߒ3|=T#u}bZh:7'Ba3St*ږL S![ ۶0#JjuarCĨ{I+A8(|q;/|j LXQ]"oTaQu/o59xn\.KCi~O R)"${nuJX5SBý2GSb(''18Ri\*wlҔ yW-zI;;lˆ [2 ʨ8FN agU6|h 1aW|!^3zIG iҙ)wфI-FQ*@͂ʙaD#Dڃ_9<#yp b,j2p4dH?:l?x[B.UFWANwߖ֥սl⼮f{2kڪU?prݕM@V Zmuk7 @~}ӡz)>?s͋oQMyᮼ"B? CahX@B=%NADI +MuҠ=Xu#k%SrƔ婣_Gȁ*+nŶy%oF J23H;쁋Wt>錩J$VP$k^{z@}g' }Cxl-CQ&Jv CO#>>=Tз:_/>}sO]UuJxc[kUzp2"h=޸N2QoqEIq2[yV'*Obb٬nfjus'ƢX4ι;'6ǺLĴ.}1V^4w™zr9p$%Ԇb]Qpmxț=[5kY8/(nrSxjg(kG5{HzF^[5##]7zhv`QN9?qk˻&W)xG!R <^IsYDNҕaN_IUCGuX4in<ėkmgr9,?<|2|E88SܝZЭ/rq&rha#tF 5_ek'qL5(q(_Pc{. llꃔ-^[؆qԫd y9yhsD& %+ӄXpI8>E7D.DֺK؜a`-VՉLC=UBzU-cz/w[>cԤG9>u&4() NB٪3䓖\ڧ3bFMTIW Q|ucxXv`T+iPA`U#JqwCa=FIyZoΧ xP t2j .VC?ՓsU YGHɝ?x*CVO*dykshcn}^[֐@_Ǘ0.kqh%wagpO[%FX;5OiPڊZG@[!WỴagV 邒ţnx}#>t:wAZ:r& #";X=X.h=~*zHW-Zhti!pwMU[ٮ Za8َ(?ڀS}ĬymR:ĮwK5d C SŒsЅL٘u'pu~CYNF _;:\_?RTT&ˣ}W mlJdvV‚xt_~-x: xg|W5h9ARp&*j7^A-U]',ar4ܭ@Ew5tUunT:օ:BC4TXn!rVa/'>,la[)…1zқ I[chShuK.|9C[Yz0j5 J T*EV\RuWZ֭bZ8T ŗR(դ+A֞:IЦ~\1:M3h)1?.'=Q[}`_C'_:?O?U|(>ybSrο?%O?~~}{֏֦ʶ?*loϺ'S紘im.ݕYG @(D*BP:QO?} Tޗl<۴UKAsi4ACyRͫqJXL5SV簬d[nlvJiS2۴8KTsUfeN7]2JաusAwPu}w?>#ӭOHcf7inHlRb,۔l˘>wkW-Mls0mQ2e眧Y0oW$ZfZR6mr-nԦpgMzq&)]_[:?SّѾGmf mtKZ:reTcWמ{[}xf\!Rfsm w6]SmmӍMlɶ0@n&[31T:ԢvSH.f%2iAⴭRiiķ{7Bo m9eJ@IӢtd:[,?Te/V&)΋u_GR1SQ{SUt4z13cflNq"nM\vq5^Qr3!4Z{SsQz mU(^ <=^I3R0Q5}=nwRVYS DRH*R"'v pgmĐWӖyi0MNKO½i(clMyh6yZužXdqޮ_8a_4~xc$Y:bRiqqԗo}xi ߬Unr{CQ/"fC'ӴJlz8D:r~0Ң\izU$+6ZZ`cǙekhig0%^bnE}?f: N .J8븻 #Ukgkt!R@Xp&T8]>77&QEpfٹZe~<"yP䕃 ڠ:3:(MSX΂8']8*#@`yxzj;ENj@H:$flA٧3|83-).4k&u笘ĸGI>$K @g tQ6 e,< T D}CpTO^A$y)udb7$E;gj ZUG>^LZ8>v1p 'D<=/(M7vK7TmC\Lt]X(K*Zu1.3\AFt}Ɗ"_QDx3„ sfGFڍf7Dy¤:+ VGX⒰l{I5*ْ8H*jQϪH<&MeJT,#j?Q mWdCG Ew*_,rpu} hI&hB/ ȡRIE-v:}|nw[l$|.jYvBp_z YvZڦw?w\ 3K1uےV7ٹTRULLxs+,?f.Z"V K|3Jdul < Tl{24'dC6!vVi/[GVU"}_Y~dc۟~]ZN(:'<؁*WmӺ{@cT`Հ.ӐıJ:mv;{OV=`d@v0B۲ ;qtkAJ.wՖ-)G8[6S^Q!Wڶof2Imk4f3MKvvKz18eW%SFl}YW`\zk&pno]Ҹ"SF!##GWmΗtV+*7:@b /;Yr*@۱AMG@X:UT\iiDI/uP:F1ڪjRү.GǠ`vG\Т9x=А+y,d: {7*\%Fl 7+0*9-@e7tŋ#8Ve niz:`5xW5#2x Oh6F)eUU CP# axi,-紧QSbGW)BK]# 5"i3hIK7(HG]z.jg 뗠U_s:ݘmʘ:ɓv\ϸʭL=W]hE,Cc[}Ѣ ^BHWڜe6XpQ=\~s[Л=QIU;s\38SKY-R+\}hˍKc޵NTlzXt] wQg}Bn^nb2t|*ٍ;OS:ٚ)Y׊YD?PnZffb⒪v 9՜[my1,8wkrvf._=nR氝;0p(++f6m:HQ_\P x**`swq1vSբUS2넬 B!43g@U 5X{LG/誸N@{@QI^z n= s.ёzk@Z KQN[$R+ qړ.Mxy <u& ࢔ZG'h ǡadoYl7l7=( v|~fb&7zRǾu^2VDZ\Auh#*sdfzu4- [ uY4 y/572MAH _`P! IDb_nkVSX.k`n4vBMEGm4Oxv:ZOD_T !1 ~Ό$/> }5SteiKҚa5UGi6*f&ɇRl$e &ܾ-EG˓7|Ӷ.ˬC֖SAJ*zfn6'Rmrk,jqlk[SBŇabݧְ5C>w݋O$.lZ%fཉ91ٛ{ [Ki1v3coRI˶)$^ 2VgaRgjL9nNG` _D&޿7^RR~/yhPQFC_m5*b9gB#$avki)_ֹyyd+ LeTjjsnNjzI O ÔV6Ub+gٞ:ip -GޗL"ZV<}PYXh)OsrjI,cMF)⨷WXƏ~eUhuM)^#i1wBB\eKgVgG(g*(Nu%Ն)KnPr~:0Ei:5Tl'leX< 9CY?)Rjad7â$:<{8sI9@$Xeq5%/AuRn\XA['nQCꎳ67W!cpb 0$.~ tL8ЂJ#QWh~rG_Zv$|Pd@tZϊFׁTv=OEe@Z0"l&1P,*8ڎ =zD,'LzZ5|ͭ +I:T~[5zp}%Rc=J?rFT gQ.LU[k$AlXYulcE>˂>(#.)<(,ţH*{<`+Z<7 u R&P,ń f$id MM}Aȅţ:fE"+e>ѬqiscmUN@Zm !LQV&Š0 s:~JkT%/\AB Wݖθ1".%Kj&>հ䷰Kϋ4ag8/vi\8@Hhj݀t#Ku곒g'F|̠ҫV\~ ZzZ3k! ݇9LJӒWz[M&*1󬳨/X3^_ QPF+SLUms'?w0_~CŔ׽,DVz5 %hhD1A/U\J嫲ЇOMh->zt@?l mAoSNt@ʼnC}AѲKXWU5_sE֢ N+tş,DBы.MaDSPqsIA٤3#7A>èRK,RPTPE-!խY _Q?,jUD=+5v=&\(uW;zZ0Kp ~SzQ;X9lPEp~U umW= {0 ]Ŏ1kFÍ<6qzϵu;Ts79Q*%0Bյ۞kFj [gUh@#O/t*YR9LсIZ땆%.kObBUwbN'BMz⨒m%MF+RKV%~)KԀ ZuFD]ֿe%H:ןPjk¡B5jyrsevfiϘg`iYulo:%[ܝ="GΉ@aw3$]"z(|LMV6xZ./%Ƙ3MS (+oA}}0vL6aFhAUMٶM5^̔bf5ݒ2f߂D6Z}3@:h?X8*J%d3lvv-i>Oq))lϏA(j.:5s\ -.ӓ^LlO]j S{㗸bN5|`&XffE{kͼTkKV>*" 1"J'w-$^L*8ZLD9&!޵|׀Wzz:.\4$EN`*[=d'GE {*"ї{T9V yEy48<T PA mMܮp  {v8ёCgDUdETp;p45!ʞπXTyN@mW1JJ (rU6ȇFoԤ[xPu1:P̋s3,=#UkX%]Tؠd'u_EMOSa 5\jˢ.'%jH" 'CStZGݬ !5 N4C 9=hó,⪀3W-mc@9.j+(|(QwY2#Q3`c:E,ΪwmWG1o?{U~2UO?PռyKz*_e;ۻ_M돪qT_z?a|W;7GmwJ;AL2i!S&@%Ibm?_^3rR|ǿh"?L?=OGF7*9JJ'>>ziwS,ԟA`<{Ӕ7Sϋr1׹8-[{\"|[o'B)1jmڗ*amf_7=>Ofm36h~N[o>S!l ~ŨU9 ޿yL֔X~Zi9Xws|eMƵe6qkfsvJrmf֙ٵhfFR{n{-T۹λQṘ0뽆<׭ӀOW( z}waZ95T7rw/~ b?ή=VQ\"7w*{*i}Xgfj-} *iTZֲ>i$e2ԃ۾IA׺qÓnbȍco{wS :ն9޴;{ZGR >ыĿbc۲Lpuy5s1=2M @Vy?L(R+jN*ZfԬًm1 ELe}@Nd~rG̗֭UrꝔ~QHtz`4F]9uɍM [ЉCJYϗ XE̛U!Cb j|IK/U_a[QdevG 8bGbV_(YmN:}jZ8؃0ꡟ"Kzգ~%jg$Ig]uGa;#6 יyP[|(qD['pYsdU}0M< tRɗãFo%G&Nh%}lf5kPE#*;+"6@*ɩkfkr j8^z,[ Ň)t7tz9Px̣QN:*}N; Sl̩bSK;V/몽1㬤?Iq#!phYASG5\ .2t@[WM4U;`=ƾhFKr鲝栂Sf"& D=+eJ *l>`A!ZTaoηJ-#q C uGvtU?\eyoiBWV zi7~37a]4 uA"c ~թ8mz1ag1f-&3AA$O,p-4%]w,r#rڭzV*07 ڷa%oY4U;҂@MziIfʖɒS|t%`:Cikfi{uZb؛LQ=PSE+n?yw-7*u_zrU9*mSvސ7/WjKHϕ*u "~z1 F#ph*)bu' 5@E'¥T/TQ[Ye@9a.@NXsp00hCd[==| wHP s %-:WR[~p}A|PR-z8 2"q䮑SD"ip;4KRE LjwnᢚA&4H]+zQ›ΙYEw1lW C#~PJ+hjY+*_47P\aN~ $Ts'%UpK49ZHdwvVU:ߕ\n65RltX! 6!*%g8mfr~!4i-mt[H7=V\<ܬ?=> $͝@k?uBߛwgFZv_je9;u{{Nۗuz};__ic/rCvޟyCVYf EiM?UVeܳw7E3:L[bTn]p3ajT,JLn,D^32lBpē"b ?>P_$>DKݢGO6odjAVz5nNYk, "QBj_IJXkxͱSeV l*-kS>)2 }61ϑ vzZcG?˩/0` ᩃ_1*ԓ}1P-_ h7%]'rf8wmRK9~hcN(A/L?}h-Uhў]OO>bzEU%6h}ϪFC YϩimaIYws8ET\xerob}\FwcT, =$):XprSƙҖ 0ݳizfwI)W-㧕.DVڷ[UfKKYY/*!=.9G@9~SN+*یwlLc ͮ~q}~NYJbTT ̛OERfȳn6s5.~="@ )J꺭ݪ 3 I=^ XTmEEɥ_B"z{wdJxDZT֬;a}/}Ks+O@*{@%Qajl3|N\`,lBa[Mn(T x엘L8A  B5Kt]^? Az:vW>vсPG*9xt5@ikSXR(n)F2qn#ؑg >^5LFtÂ&ϲ i^hrU}*vkT!K&|1,JA(]^͗1rTyGOm9 64hYtYJ:i /"`" 9eЌq`lPvhK!U9i7Y-$^IP9X5~K[8՚-5%Brt$lk]Zrz;iqfeN㰿20@mR0ڤoxTmص[ -IRtF@S  ky}]mlƁ2%3%\ҜezR|p' a|CYWNx^K!)>Mpy]'4>7 (,4Q43|ʁ٦iYoE5 \̈u1KӬeVxsqOYsuLU`y;i-2x>&< 8뮞$NBܗCa1JpEJʵX.+krĤv  6KqU"J4NUePwQEfn%p`8dB8r 2zrbiG(Hyw  Ta;d5Pu)m!(NVE]Hy,WABh Ehh\5 :#w`Fo-ОBʼdK]D^gZ%n\fkJ NQ3sc_U.R}߾3PuCClC.Ѻp6 ^7Ӄ3 ֓ܮ =c=f%>4qq)+8 H52N(.bo>+:HcLM! \VӳrM׹ ~!RՀ %lVNFVI e%F8p8_ޠPM0 JU?IWELޠOeRpګiu ~H[-y}~6ԖӗqlZQin i ?-Y-l'@|R-ݠXKGi-X['(vr5zm}}}}[lzb iyZL؝kR[_櫣GSqA0~f54 I.#r1t_Uv:v*I4ذ>_1L/#黾%F)u]eHs8/EeFubڼy6ޖYF/fVQa4MkeO'~\|23f?o 2/ڗR 8q\2'SG b[ᤄ)B8p?rHJfɊ\l]D]6Sr- \KX!r?S9ꢧ%Q{*\*H z smH@&)4UAoԟzq@fH=G9U5iJqۣ\cLSgR7.A%yj{"d}2bPP$4ᜦrZIHgCkD`NK hu=:E"ڥ\;}qh'U]!lKBENz!oZ8:%-:z}6VxoN+[}$4Kl4Л6AW NS+];+ ] H)D:mj.؏lbȱĊZ P Chu٦o[:ܳģz$a `x0+V~CX >Kag*;sraUG*UrBQΟv[y}v_w<_憎'?o6ꫣwV_~ݽ*dO?SޗyjfYlbܲ:L;+q*{ߠMFȤqK괧dV%&[SKkߝDh~K,YtVERw@i0E.~|F{.Wޙ-ϫKf7S4&;}=}7j眶2N ]\ʢ3=>psf!XLi1Z*5ԞerLQl ̪0%uS8d&:3\}{[ΚMK7"5Sڗϛ21¶%3kwrLfW ߞ{ic洜)_fի}'oYlZ .:)۶2߇tQR+76C]Toub>9 V Uy%>XtYz8FIڭCOy_-[0aI,OP8\8P  {Mj-&0r_w_" /".1.[m4m1lF:s:#јs+X؇G ^1p . { ]s.< ǾqF$ v4MkĐ%'qb87ӀZWgfJ\Ue5o@6lsXk@u{N&m_i릚neE9Jyiڼb &͘SN5cO]s|*NfL=@80s 0DKjC5@_)ӻ~]\*fZ#ȓ5:!v"!˛bCO 般]+ N7J B9CVKx*2 :=YԾ?BSFFSj.. pro'%aAe=QtH[<Ӏ NڋSP; jPt46 .:f̃CO ;c;-O9sRA Rfn;8`q4-]ruPL>*;^:  v`Q[G_";r@ѮuQB )2~P:ioa;fL(| Ck-^:zML._Nz>GLPK \z#( P5ሾ LHI9E'#R~ t( J(H_-="D1T(~P43BCM% Aݧv/Kfkpѥో,uudMa)"Bܚ(*ŕW$|a6|kޠ5ՕKf1]5`hXZ\DRCK+MH_>hYIOͷl>}jɴ6]M^6;OzY6g7JYMkV-v-%Iы*IiNf׶1z[Q4)6M{}:;xhǠd. G%0 $h;VM@*SxK]O1.{V7[bUGbfg#(`aT,xMJ¸˪`PS/u l_/6]/}L|+6: SĺWyz,j$MJc,WA[;݊.UF6bHJ2@,CG. Qs}u]6CIrmj9nT@e65 5DK{x`>0Ay4.V_NA -;V726emzt5R B>Do" CTߕv|[=SU-K\\Ėek zr:+VTW}YBZW!/Bwx b!K&#=Ҧ)tݔڥ'pZ\ASԷڞ*JAMF…nΌ!霊N.e) TE}Vᕛkjzz"'ޔA1:ܖߜ/Kf]2Ӫ:f\ۦsw▐/W>smW^,[|B" n^Ւ fZⶬ>opfs \6u[H4ś~[AXmw%u4t^OO{}}f[c0 jy]9Z{d_ŧ7߿W~Fgxm_{3{emmJ:[i7-LS8=}qv_L7sY#,UaϽi*lBXfCLms(֯-r6φCpĭ"Z9>[f{%3RKp]G\87&,ugHW7m }|4=<*](x:9?\3W J 圼j$~KNA3/atW Wuҽ-`5M%49:Up-Z[ǏJ."v 'rb:9fAw!Is{(>P'Y ^U*5baҤ0bU.&-ǟ8S@7 b 9c/)`zwRnRE?גA5^8@۳ثVAmojsj̈b"#.QF9U;➣o tULOQr$7!m^*@S4pgmuߋpSMr% ]֥w0IU):>u~H MbѠ mwV͘Vf[1KMܼmvln-Mnlhju27$3ge.PzJ 04lt}.i^ZqG[$/wNpZt:v34Oh@2Vu__3U 55m[qwŗ7s7}/T%.oNq7,L-ʶ9tvO&7𝃙ݲFgں+.-ŲOb}"Yh'g>nfݔܾR4&P\,y'OIIx .[^+EtG>pI:^0M'Q[!ܐoS}eI҈q./!tU9T- 8CE\e r}QKrE3 q=yMTɫbF38#4(xYV e|F* g4}@8@={^;> I]8SsL68f%ph`>gS]97<9p (Jk*@K*g Zé,Ъڮ ~:,x8im{,n$?*50#͢kNfԥfK|`F.MkSn°B찄PUe-#_ZlhxWHhUO'BtͨXU ]^ůԆ2@7y1d>)Q37)GB˩o )tmʱ=&~%ľ:Ov][%e:EQJf稜r@*uac]Ꮱ.#'ՙ]$;R/UTpi'(DŽ^Oi5{ܬN^i%p 6{g$mD pL%[hh@lS$ffpbZNZ+Η[v}9WLE¹ !B B,[/qc!o$BR((so/ dS-{8]ۖz^Ȥ+=k4mD!5zD&} MӎpXcLqsӭ$9"!d'o/$$G;P;"=jew[dV+l)l 쎵/v%*fFئQ(a[r6xSMnoFaF^ ؽ4Qj1=2V $ɯ @#}m5X| VF13XP= ڭ >XبLYSWAk>₞u⏿₦ssAY z;5EsvG5bM^F *yʰ̐G +^gV}ҙ%$^Hsi=vPi: *䂪7!/,3T_MZ/Y ^X%vg]o0ir :)Zm#t=*bQ"_*}=#YYpuILp2ΘSo z)}>R2A ~fS-8;ת -۟ :up:],$NuF98gRkMmǻoj$µ1q/oMaf*0:2 }:ðmH.V; jZSWUCvlS|+Ԧw߷Ҳy8¤H~LjQ%S__cxX8Va=xDiȮhM ȠxrIi’/X׺Yia ,n~[*]o nNN]y臿1o*ܱ^n gפsǴ=I?Hc)!U4ĕTx7؞t0;E`׎I` g<2dE5bV[_84Q22+οP!6QC؂'G'k ޵n`u&T 6eq^tпy삔dllF84+>iϯn&VQV3&.bG7IK8 vO S4nJFWfrꍘ~x#E^tЋzA/:E A6$Mnuचk/gNDsqOOe&t⍭MkaYl=c g[oWKgEpW%"fx"0~#+ۀī>CFN<}Z,Ś3򱟁2Jˌń5) v}_SNCbc0`sMwn4$MCDPUW??4u5#:7quqӗą+X݉YOKr+MCG-%]ډ? C4 ̉dV"вk̏0D -L=L)|TTd_Gd&x{<Q%Cffm$XuA~/DDJ{ ̔$:6l](yu>JER1_qq;% ѭ#}w"iO" eCD +t4n+w,a{" eIԝڣᤶ8z>Q ?30 AŰZZUPJ>@EEqGZc:%.}Ed[̟u!c2BRҤ,ӶLm3gˆSK+aI 8F ~_r]I/#l/sf&4L:8"À eq?RVxJ#f-7jD$0dûf7^="䈳.jSٴu&](d x{Ti7֩xeӮr/-#X$M*l9k{Q,m3Ğm"_& _ gj`:5܂ͪg|A+4 VP[NCtA+)Yo-KrV%Huψx"ܠXG6 voyZAƆVXg'@+T' ( &tQ+K on)jEHJqk)r"1Q+@-lwh}Oa+n} >z NcHt[؊5> 6LX3s{8w[ 513MF/YE6e_-v"W1Kޠw؊=pR&1O}| [n.pX nnQ+\0pȟɭM*Q|\tOq+&wGfWQFVNrt†!lb+[igf~d|Os+65BO7ar[}N\4vJ~K3LX'ЋB_cj v#KgՃzF7iʩPVͶ߇-zᳶ%` w/hsb ksf5R'lHw{υl4]DL[qw-c+Z5pe`?uU2c})blOjOB R,wNjW7`ݍ^/lŅV\؊ [qa+.lş[A GQ+2@t Ãoҗo46Vְvԩ޻UmjLD4PZ2%7185qV?PK,L5J/pages/B526A7A8-90A3-4B4D-8209-B5381656AF0C.json}]sGr_Q5Qo=Ӷ" awvWv=ƒع#0tWWe:y_O>~|W~>՛-^.uynM[L|v1nW_է}Qr՟>_xӻ_ޜٯW߿^}nFg||{˻7?\]^ϫ?+>'Q^z{~O~-śOoZ?W/>=]cܼ?yw=ٿS_7w^? ||_|뇳~p^/}W{վ]pW/ィ}fOOju~:-x3O?=p6͘m11l8mȌxg5,zqImɹl8= hܕMRq%/Q|zpח+~zhm_vpo˫{#W`Z}x{Vm~nBׯ~:?_Vmﻛpf>&z>jtd5ZZ$ރ6eKXJ}ŕ7bb3 V_^|<Kwt3kO>Ǜw~45cYC5Nm=u 62k;C>kCZWAoRo6?\3/g-{闫? PV<%bG<ކs)dl]jz?]m'cbI(&"6%r8^n5xPDl.b=GׇM}0CfZYJܴwӟ}ۻ!woz?3[U2LJw_Ͼɷ򙑖 V_Zb)Au;.Z02S(ɃۓqX0BOXe;`m‡Ōǖ, \/Kd_BHMH0_>r0`)aW{r&. gD0O9TO%zd,x|cacN'mIZv޼~zA`=h\0 6Ѝsx2v $dq-ea^ 58? '( -6D1߳,R7qQłe1`\*i&Ĵ4FR[ Iݿ$,,~@8)>\#*!؂p(w|D v" :he0c•VBB|@8 D3RS)bZmX<5%X b(os8DX71`"xq 2-.a3Q_6r N{8f ìp5 ?)7y^^bhL)\՘;<#s@YStKpyϔBw0<25K1yfm~16"t%]ˁ $ox sTuP-O" eƚ~x?p ˀw.1콹h>L{kz7D69) iH(>+"^x7t;^=m^%e>;;G #$w3/ǎ L3~_X@P]阢sa|<1$ZnScJOf\+%!#k!Yyg.9X Rcoϖa?܏fM7]ѹq ڍ+ؽ8om2JmT3ߒ87# }Db ZT#(B06Ђ|p,X)̌m;zEDT0v:xX@Ws#>Ъ&Dh= "4 ܛN1 oST!.hFLSy'{(Oˇeb²-rqm\bav n%H2(3pAF@N(ɓS$U3ȘcRc:8'Rː1˰)8gcQ8lKƫ-,#Vf/xKzlē2kAqb[ e2eDonw[98Evrda@8%gOOr< FdC'76iM7C0Coyc Sv>vPe6e8 D[ld\( r!H)s"'y處9/y/+Zqxܚ[7pI1^R9fE;0]Ȏx]3*2rW^]svAd@2)um)d.b?:M.my\:}/Æ !SOgdØcncR G$ϰA/[Nf1>UcrK*.̨5~n3[>} Uq+)<_ch%v-0хibk$ ,=i`%+w bQӇLnK[92 Bp9"2Ú!n=`7Y2Lu<3__얗Lc3wF$b6j% \MQ+MOL6Ho]W hwmٺOm:*~c1хEQ`)Zjrj\0q[`+c$ ᳺ\쿢8hNCBإЎgA_<cB|yOk[ Y08->,4G_\WX913d*Y.s]R?81+p,=*&)&qBb"Wf DJZ/@<5p0ՋsY0![N沤2MOukNXCks{oV%M>rCJ6*fqϳG -imVJq,=Q?-w$%nq)TOE.,.KrZr_Xg:Kv%{7<K09 . IH6aQjHFl \gb}|[7;97L6 a+ LV\$hr =+6`Guqk_&&DڸW0£_0'D{B/Ѧz5y;QSfd߳GhjN[CY;" 4iYo54 ~28+3,|E5$۽d}"g@ 懐t+ք,q4=Q!;2 9 hX~+uK#Ho~Y3 FJ[xR_^p0N)| 춹:\gpD# Bab#dSϬ?_\A5+L ]M n% ڴ-&*kq 7қ[S|W ;uwHتoh2K8smAN !@ڃZV&[pVZG|K:\KS"r@rOFMO ,Ǚcr%xk/-2Uld=y]&)R%jf:y$SDjN@5Dcp-/5{.f 084XRk\ LF=TÕR~BѪ_8316zu'>>6]X8$H_a&8h&%^'*X1ϗbӑZ3G[jEIpVD&!F$)B^sMq2G`%đl,^^)eP.i ",i_0˪ (x~GjFaGjw 0,bp_uk]^ 4WM@Nd00݁E8e.YX"+~QOi@3ܾ66\`]Z.2Bqg(uEU!-<:+v4)z 脵1ȁP˙bkGNX,m dJ XR#D`ɶiH,` / ˜Nl8|Vc.sj*GRռR&A.` xٱw{.8^$)*$Yh-vdB,xDV?h [l'(5-J{\)u^Ȉd՛2FnzV]^W. bS=VE x$o *r͏ VoboYeX<1J#c`  ,G֒DX"\t] I6;W4j3;7;06 Or-? v"Uvr;t~(agʮ/FӐL`vpEm3&-OoYSNzCyG4W`Y̕ X#&*PNȆفٻi]f@A>%cBRn"]̚RŢbK"0*[2Y 1ejăI`%֡r[%׬8x@6hɳI}bLg'eWIѵH )s.Ob[b1W1Xs8vu.el̵ffhQa3G@oYP[,mN>afz_ckHN"ɵ#?ހbE&ܱ:NR_X>XY W(LPm,IJ,3!H&V1LsèlCf=P?UM\Ÿ\;CBd)nE<^yGӳx%̖:Z߼\05Z6]QF]7UNvC1GHT܃[V=ӥ[bX-{[&vL.KEϭ&naE_$uB,ċHxX3}춹ݮ?n'q; ؒ,*GX2sVو@%P "@D@$E*e!'MO#x U<Au3@~$$|j~qwa VWjeGUh ,TryBh|*0 <3ֈU8u=)"E`5㰒 %؅8fK^lnarn:CU"6úb 7[Us^*O:FΪgHJKf۞!y;%#[oLIB,k+Z8wMJ 4LL"X|ɱNs$ nAr#Rai]H"RZȫb[:W5ӫt0OTVi#N XFVEf_ھXg-rRi!>F}#"9F92^ EzfbH~).ŵU_9*\)B $5,ZSdvf8ΓgIhY1yGzDȩ6T,dKk ضz䨧6mNそpֱq(sr3U偃i)%TcO;F5v]a( q^b[IRjUJ-֩]86SG#U_Xg>Mҽ沤$C2Uj\_p@NS@7;nC MM]xt2Qj2sU\W%=88ث%J)_s$d'nj G6 ̲JR6X=͊x|p1X.D^Z c`BU\x~_iZ8~N*.xniK* Z4ۈIluj5\IϕTg%uVgI_@ Mv4w3e _ yA1"[b2y!8[4R6J.B|OB0 ɀ0u`ҪMm5T"V ”F)1}ɥ*zXrmGB>"[|-|s%)VD%gh)D\bך- X0u3!Sigg " jwN& r0yh:Ы$C? mwdIEgK:U=-_`֡"И1likHj{+YН#u$ ]DJG/K9Ʈť&@=`m\ז"fY x"'D`ێy͠:FL2l mK;pR6@OκrGLee%Bl[U7-[(W?<ʓ N?I' ULT[ZHU+\p텔ƩZ*]̕V6Up2|l}+ B :9= -7A3Cf4e[󘙒%69ͰYQ%Oޔyyx!>P 8r]VeJJ3'B˼Q6bIqvJ;ih!)yē`tòulǾ'TJ`'LZ^"IeavHq8XH[,)%?Sۗ) +[U|DVJĔFp1;Hcк, 2/~c'nP'[iU6\ "PR ”ykNCnwQYZ @pl-Buz&[>ʢnʪ%9@&% UG\nVY yȟ&qK;AMWv1Nd]nr1 >Kx/rҋw_'?q:wZ~;X-Z움8E,<'ݝ7 & Hb.յH@LX9{a_d HRYqq U/swǬeu!k]q:Fg򎚄}?|BtA_+'[ 9vyĶN,ܸt@@fjDX;$#)1 ,JjzS-ˊ r+2c.폊B#'%M cL$ 95{tP0Ԅkbi<%݋TkcQH¬9}[<o y5nϛm>}=ogZ Rbyhg&,g)nWd;ۜ\bڗ JjգBƒ+z;;]၃8KlU*/Ana@ia lzsP=ɲHN"IQeG^XjE6fax¶)%K0q'u@_X ضP."KlS;N.D"{]3=[qogmhŧ 1vD) U$5,#P! ̌ˈbaZDXb >r"bR,DBV{:<Pb <=h#eKLw6HCPx.xaadR Oذ|%DCLKDV:Kv@i΁2N!,h e7z`5[jE( :hһGIیc,&-* }f( 13D& eGa@!BȂ}`ֿxP2:w=J8ㆱGSE-"-B]=,;b%jmX[Vw^ò wkQmhB/yE̮G' 2ƔvNw5y~*ٍ'X`bDH}MQO0; XRf 0ʢ޿k26xa׆{">ͳuyb\ )hY2cv6%7Ѳ;+O0x) p*5"VrXmdyKACD hM2E 8?ȕn];/P\,<2ncK@OЏS~٪z>s#W[2Yxv0١?1O7ͮ>P887~}*/B旋75v&x߿ɱ=/owgw/xIwѝTUcG"vY%Q>̕X^=}x;hn5UJ3A\jʒӛ7.o/=g?7>>࿮?ӿ3[[J6KjKQZ>_C~^?>_7?zw/w˿}='|[[ʲ8ZM6ܫfrY??}x;o1xOu|?^3~۷/ͻnݿ\So|q ?[xa_>i/o/^Ϲy!߹ڛ<۾mW|w|cu_z77ʶ?n?/>]+L_$#LHI5>v]?=wWC|m߳9w>x?~PV۫??{?}2v7w/n'5F\br 8[AWV5j3vaBOԶVCɎqn=+|?xTWMy㳫~E9?m3_8߾ɑKy>j2v1%vEšϧHvCqzGzѩ'>V޺ Km1#WZzPDo{{{9ڱa-|Wl/+!ϴZOH/IVLp>PҦ׫DmJWN\jaO_hvDf WG|lj-9σo5%cEKŕTlxO%$G\Lr1VWZYm{݋[Gg39&~xͻO>~ |̫?=NO~[] ُ./__5^]7_^p__~6j~1oxt—}Za$dv/Ehz  =_w_^1/ma_ylP?/Lٿ=~Lmg{zv4T|xJ8H(Y)wc҄cϗ6c #GPU} rc]2`H@=۲⃋x%2Թ>fgo^=5~m>?z\-_"]mm\>x㽅`%w#Dț]/߼7zw_|ݿ_3bۻח})bX` X`rl]Ο7/j?DS@I)z ~x㕷G 6wU:m"gH^RA>{iK'>}'t_xWц˘9*~U_*_xW18* 7p02!'דq0u8ËC]wt~Chdx=y4=o pE4!P?G9XpJS3T{|s!in` rxWOO%½!FD È_цFk?_ӀXsO,YAX`-6w<>e\4lb*|-K툳r}BO\-b^jÄ[5Uzru 5efc-]]C4#Uv#&н7{o[5ݿO"ffơIm [&1WqĽQ-lz3]ڋ;@:kɀEE>+Ee[``-:$@6GT{S﹀^Dm+f3="y !9&l.u'493q]cee.=EaMP>Ó"2~n2Ѐ}?9L !}w EHr tܯ36 -jq,lGip޶1 ɪpx'"c1u b. Hz7=<(Acǐ530 K`wׂiKr&#_sԔɐ7|>ڢዺʻͻo!&a]u ybI-btǠe5ooyR}:>e|Mu RSʼnu6jZe5jX5; l.LDڕtE!u5  4S2e\H8/zwv{|J 8s YV&5;NLQ2C,W/aɛL91&@S8n&z~4ԍyA-ߛɷkR>1tsZ,N(q(cgFp_o(v闓M3*9na8`f,d" eH{{ɮINnEI2h$#5,tehz'a01#v򘵝% w٨.pq|a6E`)b,xQE=1+Y݂F9:ka\xR"X=WA;N귙&0%9f! T% 1 NDpiֺ̫cJ50t{"AIha| 7C>OWi! |'~==5OiOrfƲ!ڌm3@ݦHVQj`&ch>G1N0[)"(g.XڙdR.݋g !,H0ܑ][ 1 v?YS([;[YH#֜fπ "-eA[B& ÞE>B3&\oKۊ)R<L!#$W,w*4q~Ax$$z9b.L|6gd٘&QG^s4_1CR889y ;@ם\ChI!]Z`\yxŐ0;X_o⁰!Na9! LnJ^mWX20>Գk|ˑ6V%AwpRgq⼕r+iy³U7V?0 wbӰm,o϶ZWVmjJ%Ed0bunp.ŞZѻ6Ǯa5tDu)-{-GgL,z_;b[Xj)R b> ?j4 ɢr2iGc/x IkPg`?c8DAj65|UX#r4 :[%fCeq7E%a4u@^poR)gt޴΄컩9dBC0½Sۺ(e!#h1մ^\Vڢ,% .ӴÚGag{_ Yxb_7O%'9Cx3L]'+oW`!X-Nu+ ~9!>su aKQ3O|9sx:&pn;wȾ$:.ZzZ] dXn alteY^r,C g-à=XbVTDx)i'%VlGt5 0V!K93>8] 1ᙣ`q򱺔,~2@9>)g)86w`M>#1Pla^|Rp9e%R2M{ l95),JT)gMot<~t .bwasE\ͯlJPJ}8ObuQ$+JHJj C 8!e|C͆ ut;#axtA&ɚeeܿxoKₑ%ETǢiчZ}L X5UE )Ԩ5 v̶%=˳`! xd%/l||`b5kLJYAM}S)F`p>!':_| uۖ`2Wm#5U 'ޑ>cj|7ZL^5Gv  \&ʛR qV6 ?4? ߰ UBAoCR9$CeWDħ?dGD|(&9 `]'!ǫkLVZ/ *c|WԺ|(pPK6D쫺,g"[2dcWgqK#S4,Q:`8ƹ6!ߒ`IJ;Fnԧ? ٮq%o,\={= Ź6!XWQ7(K7l> bCޜkL5do8rcl$aXY/ }4EsF-&, >3[ 8m4&\ 4ؒIVy;NX2*a<(tE-%*Ý@0O} ${Ҽ gL$LiN$q`P"X7?x^yvR0txpA[G"ò wk:hALfvV 0KemM<|ŝ} 0<@;b^9u )Tr2v=HѠ;zESNĢc4`~YǜZpKv!*[{`ޱF1{%ȶR^ɴQppX>祒;B3Rʶ0xlG:_OI_f~ոѻ -TSXY[Jht?):gI8Lutg|a351.VmY>lpPqbsugs751FZqè\eo"b i͖1{$RMصyۇaEjշk˹ Lr%ss#G(ިg g)qj֫\'3q5вؚ0m&.fGO|K5N[)7%GxS f5jSE׾5ȜzgӈEnrzj8aI4\7(EO,sAܗƤ. @fG! ,ɢIGooK|^>d XG\tdowm ckf/? , b`W)W?аsf8EG|>ڤmtL);\|VG"YjhS_=)w> I & n +[Y%hǐ<ǧo9nU];;TP;!NKϕk2¦2X?v_v0 VbBޱIzKV`ȁNc*CBò%NҼ1{-!Gmu^~+b#$즖ҍ/#cB w*wނɥ_Mi1K ! =y^jl+LE\=Zw'rrseڭu D`*օ ~BW O JyYpIZCZýisN늃kZXVfhs>ZUĐnC! QRPϛCcReglwv%C5;b /IgCX^o9Z1/\(&MO$I:0쾜 mʭY> %?d;ҴL/p²f+=Je`}t2B~)2IV׊e1Q)aH19z#Qb=c^V'fevh۹"N*>h0G^BzX"|s:V]2`gV3*[1uਂ޺̞.ŲQ&TLVlvV^kՓƎ|[~qFBcɱ FK5'K?3KN(ŴLi"1y^XT9l9PǼ1w[ݔ,6{SͶ,  C O`G{Dy ;[0ϕ?$i\(BJ>2f:-D?*q}.Ms gu/{V$ ƙ*1 شR[ c^òN]v8eu ʤc0K"yɒ{I|}yAbWW-#K75- 3VT.ܗ&ɁUCvi l ƎAgvMU~>ycm7YR8d="1 YȤ-ޭqGaZf _M/lS!`M6S/5߻RZڻ2ZM%oZ0j+5&Bb#c0aVf;N;SqWGtˮV[ 8LNՆ,^$!exxc.)1ʌ vmMqyެq]Ld5:`3.VZTמLXEM5mYȀ Bn1p ƲV@-7Rx#fI1p`c,v`r&7)67Gco@67 5)7IrlDqS,z_&c+O;>, :s2!PJkGmQҾf "cFp^ܢSNR{2%W ;p+aNd£c-1L;M$ rixkY3x[ 0/ X[%6ǖpWOYʜ&+l E48?9TR1rV=M"wfǯk}{dMͯRlkv5F''c ?6_& M`Z,l<*X3uޤ#!['u} f.4rLFY= X'F̌YvWf4/ Y"QE@NQ}-AoTt׽m>o Q7=Ϊ7(zUVm%r(NȞ&'>F)xDx^Ko{#/F>dW#~MUl}"6wZ_͞sIJg+r@6';i[wM¤W ᤟UV(x}Vf؏&%L|OGkK6x<;l1 zmwl#oEJjl8YW)WP,(.td XSƖSvk`  b2jF:`Y?o@~Jv 0a8N+SZk= U{5Y3kcT-.&G2dM-Ěd)6rکvMV; Kq:c=IeI[-S`gPa'hS\ZM@taaAyR>=bnYZWI˭[MںW/FgSfƵpI653d $˱a[؈į{m͗k*,@%g}bBu5څX!$Y¬V`*,;ډ-l"wO&HGa0A7eh1O=zTdZuux1I(K>& 6K!{tKa*Tn鼂 $Ê`E`ՊE:5UiK:Mʔ\+)6٫(e l7LS([9Ip!K!`5ǚ 0 SD6DwPqI#Y7EnЁ?*E @tOPa,fBLU:lS1JmNe1SQCF߸Bj,`lᎼ -Fs%kʹxY n=$7q3N9Lua(q7CJD;6f2U&u!R9YҩgQ>Rjl 6ɑGa*ܦ c`\`65?eg--W0w?`-Oup?r[N%:XRHo^ owu^D)aglG{Զմ &T;j`.dIkm,N*n3V.]ZǕmJ\q ˃hޘr1<Ú2Lr7pְk-{SPPP\|XJ0®c78RF[wdߚ KlkY)ء"'igsQ-DVn ّz"$wrK$.oշ,ijW]}>Ey6g?k))2Tƚ-ac.Lá%+?0vA-J 7༐'T)X^-it7c r0TVNR_)*-iRxTݤ 3x Uwg949PT%+>嬲*LP~6%YyS[k}(g珝 k()1="LYYl/ 89pYEBcuϱs$=+X~eƒ5SB4#dF<(E˕~ Kѯ˷(yhiى-r'DU-8PTl$d}oa5`{l? SS3nKT3O)S1MEC]3yI]MV#3eÆ6u`hh]J\bplk&SF%f.`H`o5$.#1n3ʣnp{m)l*̪ u ]uKi?e_z=6{mIw{[&l5 e%~Wcu~krn.<[KE8Kc^j0ĸƾٝ/_PKz;ɲ_lz~y˄D@V[569dx^5RهDtQQ)OJd )h -o],s |e5حG$3oP";E4 n2{p w .Sp%mgɆ9DLe`&S <;ÅꄗH#zNM)jY rVB}T,YmqN{T̕Mj!s}f :^h aDw)F#Tqg02P@AR$C ȏ=΄n̑)ĒH-̧x|BInZOg^J@SbUJЖxH5h(ۍ$XlK.1 ANBN[\ZCBcQ6:Of rxR~-gG'\"ǃ}_`G^m#j%YJ vV[P@ x_/"S;$<}!o,jבI-Y$|7/X2{5rnS{*&$ZR5]_W u:Fj3= rٚB1-B] ,{L0R`YKL 4WT!¾gwe0M -ھXje[h z]{ l D8Ί)՚u 뒗)PG+6_r"J #uSGu,<mz.X7ZHՔX)~s!*)s:tu VY€/vRxIl2_@ynpOۦZԼ/~}tUG6c#T 4(*Y`I@,]^Ⱦ;N)`yd\vWa9 !!! 0D k_4}H{J& <O'5XȭZl VQ2|l(oB#KMoޞٷa ;ÑDZK|i\:F+ |S",;hGVCJx'Ιa?gn3(GZsri!(bh-]T;,?Vj Ȅ)3΅H< ?o 9AQ*]*aR4 ,9OcJ\X2Fz(&qLaJp>^_Ȥ;\fa?6rI].Xe9XBO >ao`c[@Y0?өuQjXf Ryu0AE"?T㮇e}L3[o-ݿT$mwU>AoDdxů>uuz Q(c[pJyX6JT*YRbNmKk0Wӏ##Q2&8a 6ǮYp0{vJZ`BfܦEDV+9# L̵K4mDLÞ%}~NB[d%-5w7:簗 Toz#4{l59>_itE) VJ¢FxX27-L*xlxPr>⊽܁[7etY+KXLJZ[kT7]DpXck'ˢn~ˇ"@gR.j'ԕ k2|}Rri2Yl1a6Xmsݦe}J1*zW6E[ЅqJ[ނk5C-%;L׸`ʎitSӚ jOBQdh:#C܄wYIE+5Pg⢴7T~ZcHG'= I& X%؃$Kd-rG]p`WnQ75GDr#B %}O}7^S$tQfغD۽wqlL'I6a1 @nѕP,֕%X#!_=}]*.0(1c0;?̵A4r1&DK1Cڛ<>&Cؗ$-"8mIq }NNJT`e1Kޔ5:DLJ^Qԭ ؝YkS`*庲]ؤሱe`Z0NՖ5ዦrNEbdI4'( D*-1cj?dJ;g4vU,U$j? hA~~[%౹)W[7j{`* RRJβEvSkn8V>N>v]C 80>j3{gX@{h@%]"e1 V 6 :Rbڨ_)BWe %Rah$0?zP{  1cO&TLk0#/}AB|e_˴b [Gg~MK X(ԶLbʺIQ<6[F:P{Т]ۗ f"pT%h1$9 bQ6͔)'GL噀 (<#  ֙Wh.U;PImv9.S: N.tv䈘g6 $h+{$ 0aެq#sjQ|#r? z L.C+jT RzP3JU٨~z.l߿'՛7x? Mzì;|k_CL؃T6XXpMŽ^\3k;dV0.bXaF;6wޙn$ ̯T,ڹtfPK' cBKj/pages/780E7787-ADA9-40AB-8BA9-1C21329E7C45.json}s\7rg 4{ڼ>JcVTI:)9wDR"k6mr0@O=_^}7^_pgϯwNxQU:bfrW{|Kě~yu__|ճo]w/>ǟ9׼t?)bj̽'Mg_ًW߿]|ww7xx~/^xs&7kxr㦆LMѧ1YQ\x/o9g}ǫ?o\w_mg x?\ۋr'/_~}߼W._ٿ]S'OWo/޾%ݛvo{} __)k_ !߸o럿߮Ӽ `)ܾ~3wH{su|#x3w?pkW>ޛ^BOl2w5RٔTf3|77Sw|pogͻ~/^='Na~|_OO?"RL}w}{_Sqə~}6/O|X~xsu7j|0B/oV7+GWWѾ|GKyŽS(6M63x.g*$;as1I1<Jį!GW;syO }{W?a\P?/,ܷ_(ׯ?Z{A ZB x"r 9w.W&nyQqwo!A팒XL5 ˝}%G!%K q`cLuٻev<_]x{[}*`^-7n^Ë4ow?ii 9-U  L`DmX3nɫ Z_6K9٧ilyM>5@DjisIWC|5z3q_@ԯ~P?mW?^Oo|$/W}{VlзO= 8a  hcY|N~Gwة 1HL)J( -ުB=eÃ|;߿<:z6S~h~CfzPA*v<'?!wo~b-N3'Ĝ"o>(|?'F(PE`oR JFƒ>~<Qvv+aὃCwl:;Hbb1e/>OT-Ρ1aO\,@*8b}Lcv88Vة EXCq-)HSL$/6V4SiCMR 7p =0hJkZOEbzj`ۚBPUFL/R%Tw.+[BLإ`= |EgoҴJXz=g$ wʇDaF>%ha烋>ql8ݫYzk6ٴ<-&Nam\r\,McɦMLJS[TEZ==ra2\#v&F:M7bUͦ4M|WJ 4?lZH B? ʸ5`k<-e`Q|_ >저or:Ŧ=m".A-F@wjG XPr~{`u(X _Bء'~(*:k$H>>"g{(!h sRllXa C)U6.0um²DChl1{"fD4=+r#~;0`ti~PywYc 8KdFa`uЁ\uNۏo@їtN {xVߚUXSsƵZ;$|J}==lp]LFpt _3x.]ZX_{TST Kr{ >ﳟDWX[0>du׏պ%g0oBnNm&/y|ISt-9 gyFgmpJEh^ ̈́\0&4hբPReH |IX{ K>P~?$2XU Dz_ N8 eF v?Dhۢpڿ%ո;򎰥X ة/IG/MH4 0-'V~(_8lx?,+fJ0"$ c[`&ceL4-s2|Ɗp9T#+zEzd|,XUp E)6V byKP S<iRHkJkd*oXLIpJGlT'ݦ ha{@OdrbKi>ym(q1R2N txv}0SG/YfjM藹cF0zV}+f,s02bJŷs-Yԓ'WSa,,HӚS\\ٰ>xZ_,Mme)p&n-A/ `d-p I-g(5E^5Re;q`x $ 3C[Yz}f-%cmPʫu8 ܆nhX |v ^9zJG%-޲ìGE qjc /38 #a"1BGmF2bd'iZ{5%CB{i^lBeȚ`؆w ?A9:a!{T0!$!N:Q°sv_(5qtw&x@ҶNJ:;Q<: V!TL8ӔBX.(6qz~! I֮IX>(g c,_]8#>Qi>MMpפ=]߻ge]{Ϝ! .y Nkd n=pk0.0:e[򁡎䬆 rVÏBW#zx)I\ektt6)@6]K{?R[yr^G쨄΂y3 8I8f d:/lvr{=8㽯%{޷og`g9%Gƹ7$2AՐ$$sw!%dY6 9ٰ])[/>\5H ˵L&8%I5O%H j/ |FHBF2Iy]}V(YQe[S^.0n¡<%`+rBfo&@+T5ؐO]ԁ*K FI*QPXZ']<&12w M,2%[]q:kMJ wD_ز|ˬMXR Eyo LְɄy96qC]8:x 1t V2ZRV1U)Mt'LA,\(?tb-6i l;KemfX#S[W% <-%;8w`o` Qe*.~W#- ]b>,'Lߋ|&HG0rݧ$,VZc1c^Lo-DK:D%`m4B63Xįߙ7w+"0]YkV2m2mn_ A&'jabua $O*٥"Ui0 Pbw}l׭n62mYİGCzHcEg<_Ә/EXY녫$~I!!Q`#FMP2qԔBJ N)BԦjIJΉ|NRvۨsbjݴ)O L2 +z`.P22+bj Fii`Zd٥+ᡃ\ɬ,TOHܷi$f!ȮeX@ ¢Ti+ džX# G0ֻƀ|Xo-J!M`lz!zYG"TG(Cj&7)&O0d v"xOl$c5'@FaBqDps)ud{v"g-uPL3 DPs}卩:/LReHֹE`<{1RyŰ1WIr{3RH΋(,sP\57X3(UKڳ܀O8O^OE&=(۪A!`/xF%b Ζu 53,g5QƎk89{\8=&,uiYyaW+&sM6r8}S z:օ V.M f{7_Sf7 Yx\~|rE^ˆ֜bKS[O !r v2`AU+UBYT8#JIcL.8#{wI UE`u}GА?4̙w!އ\c%Q(@e/~1T HpvD݆픚E5XƳَ*3=:(}E mdbɁ W+IQ09%'e=+Uk|bCq{$G"LhʹYT\)'܂˞u=0Я ֬Ć6@k:bҪGݢ:XHHirƃ"d'-*QVue 891mlED ίj)<luVjm#ڒL2EGx.?vjs0`l^ \fHb*R.L.-!\P)~o[S637(yWCyQL9] bdur;_Nu໹`LS)ߎ9V~xaDg&*4uZ7섺HOCt+v7nV\%@4GXgrZqze& .ְɭ3EOLw!\W9嶗Cq-a`R~8:3<2lJ [ 8`2>)=] >$' C3lrHx#{^~l& UCTW=,oȝrr?R WdƇN5h\RoƳ>LȂ5eFdzR{~-xםYz]zF`OA=scȼk4LdK {1DF|%{ t }Ģ5}G 5Y, -Ӕx r~8Txp=U|炨/~?+y2c-$%A>R"95zI09e} !6+Fw~*3Z9&RQIl2:w_VE/LI=+P7@`d5 vYۧ7@ A3 )b%8fge*BQSҦ(8t}ۗlayp#X(dFƚ8kW F1 ٖryGQFsl v?4WK,ɵ jlm5en⥞q5J$<&Z6M5~qgkr0 %ZXfs7s%Wk79(}Q2 76b%k() pڐe%`9=,leiy;6rmCsGgP2K=bb`,}1 eŃ~>.iˬ]kisչu8Yi2}Z]iKHfϾJm4y80yve0"16=w&Os&A'3?eEnՏ^q1 |ξ")0&R)q[Ҙcg\8b[vi9ɴEzxVf%t2߮T$ -e]`LwɤT183X@fvjY)Q~Nc?(Ƅr1՜%yGZmx$(rGvl\?_BwQHgs>TV5~ރMc ,k\ Ë: G3#ywE#pᲷձy`ZcgnK)DŽ ɯ5XhC%L)fW>`::J\,Z@h69h4/,[/,0!akպ2|r/H"8l$nFL-ϊd9u !K̈́19`h":wLV~QA kS7r~]X$;.+eSf[A* o%á5 aTicSLn 7s!Z}:L+Ux37?bd´][00栗jbOr0mYz tXau*&; };eVh~& [R,bam}8)k{q X)E}(؃Bv7cDbd0=cՂح]O89с8:2:QzG-igv)V2u\cilbg e"ڇ,`dcJi@;Yrvo ID/eW`^R,Ǧ?.@iSTʲ@*l?ߠve*B';0 &dlYXz Ȭ\^*̎ Nq>)a"\|HȘi( Mi[3fe,(]o @e_kPբA)끷"e<3>Tڣ nTT]pc\ò,ِ.a3!PY>n2JBfEQ9nF2ՒiI)xNvןWXپ*Q" r[3!PЊqW->Mgt @#]YשwՒ|yGKH$BP=tlb22ݭ%J- lVXJɇS^%UkZ6ێ2Ky0 t;'ͦWcjYN?ӕLg#B7hyvv;ܫ08Դ+#@F2WkO|#IIZ[.=RsGaHLlg-۷b})y` / {bvT+>ϧ/ ,Mzg\h3U@y04v.erv47ES[ (\%)VEŁ, khRx#f,<?S ;u[쪌;{}o@H*o( 5%,, xE9x73d|ݎЈ7e QiT&8jEIv4vKôi){Fn=f}HNe$դ\˺< G2p+D֢;60 :}[3)2NZ:dYDŽeS,y$]JP9RN:U شrrQ_!pE|h!8?9獩pS:D_VݸvR5#Fڗ% ~G* , :ei6HK&,fA)dfQ<:1nO 'oF#?fipI7>? l5GݗECQw?$uK!55gfMe_-2*cIQI7|JR+S-as3ܺ#lE {Ǭb/Aeqx%hqZle]D\3 rNLN'okYY NJ7Uc7FwlD 6ykXvit ecLg$/Q2Vn\k09(ׁ.ℵ(Jibkĩ@A3o<2LgT!Q%֙嶑o6Y3e9whZK4R h e@WKl)M'Fk\4XX7ɤM)#`U| fXXy͡J ,\X{'zzR8ݖhĒ MT8C$SquQ`R֮]Z yR5Xi%+_XmZHe#]Sm(d@Σlq :X2+ /F`_[0DdF #4<E{䆒M@y,yX[c3dRQg;1O1UH +9ɺ(*;0͍l1&5w350P"O"~Q`\aF5^G\ca<-W7p3N9,xөv)PHm/!#c!L]6\9*,'2Yh5%ag(ln0jȡs Hb][r[3k&˲(鶾^wKSxsM\\9um,3S#5}V[͹JHNFk_u nD,8*nqzpdqMp}bSVʮaBATo W9}-/t%Ʊa &i qrx*z*z*H9`7JcY+CLL'a.KNWK <.LjZL%}ddz=_C ||X8-f4yoײm dŧ`Y[XBV4l4>-TE'[v;?CdnY ـ3_Dݱ-ptU_8L ]ٜ%%Μj][P@]X],V~`c[d#ln >ݟ4k: Bu lv0dV GzA֠)ě L$u^6$=+x椅B-14ٳco!JfH 9娲*:LwWG^YyS[`֠kmH*אVSv}5x>"<w\:jXݳ/,bMዕ+# vc_($mfTO5n`} 8!JJK,7-/ Do86I&?/A-%(Y7@ Vx6l? e894vYܭi3$Ųe~)q0띛) ejTrКS&̎A08Z4ACu@"s|J;) KvZ?Wu?}ɡ/j4b6uJ=OS+Vf[&<K7p so:eY2°XQ̵.TlȔ9,4hb rTс6V{Ґ4`l!ɮ-g`&[`ۡʬE0\mಅ8i*JBy\;z&j5!G.)1!ďˎm&N"+ qa[[#fƫݎƒͳ 9f;i)|L:"8¹-eiors^!.C<)H S13y3($쫿c%X j~bk[j=1nF;42an82g8T ǨJ3`>c0aZ XEGXmA +=FH,c_#ÃAf_DٜDlK5Fz=yu 8wH e.j(rtK*^,N`#=v+/Y&`~êS-pM!$7rս׉<|QV&. Ԫ5ZUR]&O}hĻ.:#SꌭS%;v3xQ'-iUccOڗj98&"a./0"H`] }'FDYM9ιO-rwg9S\P;y v}rwf?A Pu'swy:Y?j5xTӸ#52ذx"L]V8^+en#\i;jlzK6Օ`x%%< vo@Z%K,<jS )ĔJ\ޒ6?MwcVMkJ5<z&bf3lޞ׺2r 1ы>Zv;o:_dPƆXW7'@D !*OtE_Iv$]´z@eoVYPDț)^Õ$em *+c3AG`qa"$Ƃ9m۾$g(2{]03bcq\uLd)yټ(@d^Rꉤ@rdU!KsY78B^CUN8 #әQ 1IL6 ^VYF) @ha,Apٕk 繐^He&v/Ja/hJ0K'V^|˩$)ڛ> e܁'d&m2MfKz4@%7E';\;aq =So|[;bo"=h8&DY{҅g `[ ʏ8/<ªJMù9uSS\݄o}¾T [MX Nj3z߰I4͹RaTA՚gc lk{5`ܼ.dPw+ ׃IzkeOkAdyY6! eP]$9d7[)# f~6$^%{U m;<b<TrkqZ;%ds s͚<ƟIө0/elP & pn$% -! g~*#P]G01ٖ=LTjR@l WB,>JHlV^1#‘ 2+v'l0ÇI{I(Hok#3Z)̌[)ȍ3rZHæE:LӎIS4l! .C0-pN:1K 2rl3'7@~lؗYk8 =nl`).E%d$/aeFA3լf8BMk6 *jS >LO~.} ,S+p|ebT3s-YJqC >yRr{Pq1>u&QɌb;l|_N)sr4)O]\YxR+R%>yiolQK:dmVV5OwKS17٤E(C4/c8;oU$?hĵB4+6{~LJr]s3l\j8UHܞpN[F-Sie1]^i_Ӕ0HufOsP(f̭ΦeCت*哪rN$]K>{:|4fO׉ 6wԏ.M>vG46QN,U%q敔A’x~i/"#VS!Jx ׭l72 4T<>D#sXQ M(~ƩG Axf@;@ELuPb:]aӣcV^nD 4h6![)`ޔ-ፚq=H dpa֛ZWm]u/pluj LdFyCbZ)r89,+:O릵M78-QJhz#U)`rzfOy>ې4dm6c Ӏ%e:t2n=5[81.{8؏/xֻKҗXGx62saua<螰6QЌXrtlYF~oClg }Dn,gnb"%u\P<¦Y A؏7x[Zg)Pc$WyunNeY2ڪs$RcT f:;t$9gOzYEk5]ev@s85mW[3xL7}3]1y^z$f1$W1Ej4ڢk9z.~zjx)8/귯濭{g+X//_vIz nԇ^ΰRsm0sPY^.dܒ[iO< |e.f>a '>Wg[lJx,aX Q8XvRSPKLZ/pages/F0CBDCFA-B82F-4FD4-93DE-46AAFB2E994A.jsonMo@Jg["P l"%(Ğzٵv;yc?k9 ra (^&q44OU8-rϧ Ns<-Q T럫F`0r35?~tjrgk٩9DpZ: K ,pg8[ bMcRi(‹h2b쑤/OfKrkvdžsDQUj,A!fd%OŽ7m]r~u@D %ke,󺭯YtN:os]aک.-B NΟpk l7KD ؃s$-ouC}bEY-b04{&8c $^uL-Wʀh<_PK4ðkHA/pages/EDC6739F-F030-4250-93C1-8E3B80936ADF.json}ksǑ_Q Q,XgÆc"a k `m̈́Sxl"aM}2de?^ջN^}N^nqH96X#>ZSNW[Bٿ;yo췯N{ nߜtӳo~swǿӷ+;>x7xӫ퇓O:}8ǧg_خR?ON~/>0䯼'oO߽;y}~?|>ysyx>Yw>N{w|/N-|_~-?ޟ|8ӳgp˫?xڲO<^^_\dz7z'/G=o/[篧gop'g^a<0=}?~4\cjØԋ%Fn?L1R,⯌Wr5ެz{οyV_{wr̻G쥳^R~]G7ﹽ2,&JY%8vYՇ8r0a!Ox7'|nx8ۇߝ@糽?{?HPkƵs&9XDu6:8_]}~x]^=r]Rr6uqːWhr?|߿}AwFyI$F3CZcygվ>.ؐ55 [F#> W8;?p6yKxr ,#"|[NṯŜe~21 v ]pX>#sM5%e,ˊ9^ˑ+ح)DEI1ew݆]k.;QC'n-r{jOa9c?/?opF_/'?ogdyx&lwT4  Nޱ~_ߜ\][.=wu#\~T_pK~f=H1h3=4Z j=<ý"QR222ww%B"cDw-ҶKω">|UpZӖ > Ϛģh{xՓc _0,+ɪwŵRz3-[_Pkz ) b`1k`4agN,]m.1KX=#[|;o6X9kZ ko58 7Bp )!le,yH.%$_>ERx [^z) ; x?`E ɸy#vbIRAsyayɉIP%Xf21c;XY`t=4d%8e!直|_~aR Qe->JVrQ$d $6lN.HF-ܔ lHDKZ>$n_xUb[' f#*zW'l?O>~pp!zn/=ttOv8PLQ 3 Hu:۽θg'_{NDk, +XA.u6 GYhU9[aYra B%r`I) ց )ÌKƬDٵ# Wd^@MU< Mv|t(,'&&4R~H KS1..~ĩv4Aj䛞5tll5kEN2n7Y/ ӁeZ3!lftM_7[:Л"ˇ^|3|lT7?*O<-+m̼6,V)~*bu-E %D6ϕʳXk$ksٔ!'X<"hz}I2pPA,UU`&l w a4=}q27I=J#npxT6FZZM}sR̿_f໳o-[2;CxZ3=oϙ]ϸN8J}O}!;>{}ùu.I: d}1ʜm׿쇓vOt x:9fhDa22LF3}7WuzYhD-|~v #'01 sn).x#sWL?LyhttZHN܂hr3#b!r]d9 H dYu$@!\K{P{v=ZRϰ@|=mI聴aD9md/nrf#逞 C'sč/m>U ~%O\H~fOL@ R(b ;KmdM']FI)1:lzw $1xI?twߵXoH}JLh$ Nf;S䌖] M?vA2~-FKFJcӄ{lz?ed'bx=EX<( ][2Z{= ZN׮:W`d|`'f4m<4x40]GpS8tuO_ؤl3xT6xƚ$e#lQ$0<+L#V,4IFЅ7=$<_Y>!L[|@9qG)P¢ʡtJ'@xɉ6#Qv@Z,snX9j$tl%$ciqꇓlNhpY]T*w $Uɠ)[B=Awnip'R5Q$=V,G[㐫c{IQt?Jl̶`9Z'xq(Kg=h*mѦȿe.$ƾn-/JEr. ńVA$hE J%Ȑn_" ,/$j1Աz<qHUkJ'skV\m]YI݄tHmslr1S)a[+C#AH8ڨ V:V w5HFsX ʋ?& B;X廰1shѷ!Kh!<&[ ;TqSa^2>*E\+5pV& nŨk7*| y^~挼h4d˺c:˾)JeW1Q .$ ]0!*R3צ(}dc(.ξTUlɋ? rnɲ*sFs-koT<H`\kDDDJ2'R+F&K%ug U~j[G?C΢`|K%ŒP+Xs)Pspj@ ȱ68Y0*ܴt &Q*kRr\Hzqz[e* ظ=._IHYnȇkX+}Rmdшtvƥa8֦`W ִf9gO=e~ID 8 \xhy-+//or6AUȽ2EZtמdR(~aIrk%2P HF(oLVJ& Rl4iLƁM9*Baf@a`KLG6$y+|a.K25Y_X;5F6J % \)tĀC'Ï環?D 5xO#ORYY!AG*kߌw24CY[ZQr> )iEk",c(KȺ0!sx4Ա1ś.YHH!uz–,`,*xFY8Dx gS3d^'DX Q:vVK&?E^[C˂\0Nhno궁̕)Va`eR*DZ葕uV0q"aM9Z$ddum .do_N|`X>cI$vSjeLjf?_p۞?+Kv r.` _d09F;$HM1 H̓;X!AF]Q@uHZlͣ FTi6#b0hv/@X\P3xFF9 U@ PWxO#LΩ>K9E zLĖ*;}j XGF6a:G%1iJV bKiW2R<%a90[z%inM=jgI%Tn vXbuLy39^Zw9*BJ=c`UÝ,9@%,H 8I݆5NaU %HU3#l!Br@EtgKPrJ;f/-( Y*խuR缱$Zb5 E].A>H^@V3Oh9W%}_ת-Q6VO9\WqmXsz.䜻vU"c+99$y䓂Ց.Qɋ!)1llȫ[q)\:dF-IzM ~4] }jbř4O`Φc0+_k ?~55q½Ek4Pqat}RWWC9׳O?qB-0Մ撽]zqLJ) OMT#jy$~rLWB0:XdHr($E}2gEɳt+t* #btv0O19!~ [{yX}\J/1|9*2==1uQ=cxs K9Ʊ@Ɛz9&­:-gS'dWR9 *4.r(orLe"J!Yy! bqR,j+K)VRdⱁr+XlF$5:^v*QR3[z%&5 :XHIQC)J wr-c-Jؼ0_ Uw'Mzs  _V 1!Flj_K9 cymÐo+k< mDX1H쎯hxuN(dW(>ae1m4rf>"g\JhasHEk*tV/lgҥjMdIu{ 'ApQX\z_!B>nNi=,'FB,x _#6ƅb Y3:=lg%[+9$(4Uۓ*!l!۽gXʬEҽ,]Mdw& ^)e0hs"~a}-֭0j ϑדG͝ZxM.D $Ȝl©0&+r]Zx/} A(FVJx /]U$xˤeZ fbB1Y9 Zɍ$D6ipQavVsu!"̸d~rQ, T( P`ʾTxҦ|q<"mÆp>*Te,#WrvrUG Іd[G^X穕짿5t;{{~| oNn➓=kf_Oߟ]g9< H~2` 4~8 nO') s\n5 fL7lj l޸~"{qW^axX'oNOaPum,;@ .%J?d[r眺2[}~>Ӎ_dju@0yuNܕѦ)>Um5nB #,YX2ިF(ZL,˥j䚏\( rgP32gmtv\)ٲ[PBvN(oq#߳ yW B bߘp+鸒%5'E&N(K)^s(X#G#B6Jai5U}jxHإ'KHSiHxV9,,O~jܥPt 4|dŰMzQd~Qem5i3? ;K(XgE.8 V ;·5hgBZ&(ĺ )32mVy"0S6*ź*v dbBY+A-,,PINY$/73Nml#BMVd9~BedTppW+a ks-Vs ~2zTZ?4Vp[ިjf) &X)|:E,:бo QkGHZ Bbn auE|MKS 8ƥ2wl0^oMI?aYr`¿ Z%LR9Ǜ51nbkV#_8 KMh*c )'zd+*TT$-adZzUz;p[0;?&`+t?J#R^j^R&BD)Z`;ZZ $̂:a3-KqNRi8rIsg 9KQyR8V@p.f#/l-WIC#9Xf(rHjs294#%'%F\AYn*qrEc[F@YR@c=Q-K hmńn;VH΀lkY[;I.SɁPccldzr 8xC*ri,k.8v$lH_k. ,XJHVBd!Ar1--TU&-_ Z X\QQO?Uͣ9̓IJ ʵAɘ7tu0qpGxID6$qZBu>C#=_BifeQ. SL*~6X]/ȫ{yi}WGZ F" :juH8#Sp9"~i c _ U*(6O9٦9d̚l- :Ri #j ">i8GfTdCY!pe'J %U m/sρU8M8uS/Pk#R~c90u UG6 jDR5*9ǚPi'rU]0^T>𰎝vT p̷M B%;VPֱa锸<Ӭh|rH)_tzB FNd9ԣ"{:R]ldO.^2U  19Z wLIHRit.53HgYcQEܲ_/o|L%ߙtaJ;*IcھṢ] XȎɞT#=Rm^jxy?wu Ypj7ƳõK ΣWQb50#D!Ddn)C6;1dgZ 2qB.Uh6hsI95TRy&O DBKʡ(AH/9}\̎/ ?#箘{weK;yL<o4mmAg G4.釉vL-q UL<ȺW2 }t6}H3Բaa\X28ۥ"sDI#g dUZ"-r/S%6lȯzk y(ٿ# y<"[5Rn'!{?mK̬ \P)X.m dc(S@|Xd3wcBhCz?J:Ыo_G}=T$N:ƕcv1dOf־c;bǑUFǓvE`ڎ<W AƼ&dn ծԐFJ,ʐxAp&F N&[ĭI}Y,LN3G~HCP['9e H6{ZZB,(z b\H>ۄziT*uzGd^Z឴ny b'\<"VMUXKZfg1{$ؤXUh[\˦Ƥ&;c֩!_H `w ;|cN"TC!U8"lf@"M@vv"MFR"q(dvE1-5X[FHvM\~Вȸ^Ry"28sZK gy޵9XJEsZ07-Ml\t{j*<>A圾O[.hx`$ۚR1ϮErX, '[ (v$;η 8 {Ph ">ImHGqVȐϤ'/l˒OFl t:,;(>RjBBk$]qm}jN'YQ$g ecd!BR<6 "BWAMR?8@?#ő>2|bS#W$Z/sU+ eM$Q$OU #oZC:Fl;('U֒/~y Բklw=J?Dk6 vBcdIn#qQNL 0CEWO:M7jWcO` /-/*Φqza*d'ʖLfqƩ؍]( K@퐿*\`Sʤ~Y%6F5MJ<8MrrS4 a(㱡ȬEjE3XvI[Fgňc Y#]Ld}\^^1"I52'BTH$NcmS9_Ɔ\xbnZC5CMIjOro<ٮDrıGmʁˋzFCb{OXe>~lIU!q>QPUrfYNPr#FHY_fUD8y *kw9[Q cnE(zSUq |]'s*o5P& .D BDT!":`k[eU%W V e "X㸋l;Ϥ+$ {@>O ۦ[&*I[pߐb+hq; xd c Ty`oV^=Z̈.NbVzZN GtE^*ʳ";wWjU.<ӄ}f;̶},oby6VL*RBD&]J*|-Nw q[TqA}ɵ?ylWX~W+b_&>-9˶Z'.-K(;*ʑD|=ÃёIkCNo^`?n]zno $ %y+O_ ҋJ8 ?K=wG0yi-V !'#c{ѡOM]1X{̦t1Jw:7M=*\,xj!`,QIZktpàb4&Q;cC8b|n9c(ٓ; 9vxyz{֟Y }Y*;. dfv]"SO48cݗaӫ#Σ7h <JNp*/%v:ZZ[L?i>ư1IDQ-@w{<ugXw&#eEN#R;LM=LVW /groIoW84?pH Œ/S˔ ln Mn&~[2IYX߱L[dž08JgYO K7*RY,*?9H|w6gokd~{ߞ ]='{>ֲ,;Wᗟ?'JvǩLػUGN{= Vg7xk֤=XgaP#lMśiNl(^VO_Džnr`Mol:d q eE)*þ ;-39ʲj1rkNg_ˆd` P`ԔA{e'?Z(=u07I3i8E1"wv_ˑvة2F=8ipp#tvrdHs*3 us-ܩfk.r!H5e+[psNV}VX$EP,e4L=u(_Cr>v܊T$iusk8bCcWq@)y\#GI<*i/dcN mk%PlDYd[g K#׎EDZ%\[n PVD6Yu2! U*8RT2\'YXCjS=sj)je/zFX{Qg E!12 F0K7I /K((-Wr4*ԁ±FlSDB0w 4dz+5.h~^=6VGZ)<Wb I`\\hC>e"k|rۙ?Q=²o-x8#ҥ{.mV[KdӿeDR(vdy+@xrV^_q7",K( Az*"*ApK4iHgq׆)*tIS5T1yZ̝Fd#p_Z쿌Ց&^3|q8bСp{H_&;n֮5`/%;/75'_*F\Z8 )QTķ3l9$5~6=&ݳHːmrD܊R.3U>sDئDxl*z+̠w[xnT*An"qe΋3RPRZSa urNr2)3G5 DxրdBU#[s`$)V,"7GضQT2&zJ觹 cʺ??gx&2}ȕ".%^yUi E64M13qLG@*0 )Nǔ82^_Wde4YDфG%; A?;'켙B bMLi(x!?Ǘ5>o{*_?PKq user.jsonAk1 Jy$Yl=z)=l7C0Ʉ. ߫5sa،>5qI$+$;sGtwpZt|uw!n\Yo2;Qs@xݹ*%!CYAUBB-#-'uY(2H6CF>{RJ=g.0I"!SVLl X(JW2A,TUǴ 5mQr V!r,|}*i*\7VOP q<.z^ퟦt|zDعü\'z睛~-<OibIz<54[2Uބ)[i>)oْr?>4!~~۲lS5AZqfl2's=ΤڬO1A1E_=C>U;a# {l,[d 66m+)ŠqEd{) 6ZBxPKbFu" meta.jsonUMO#G+9Ө?U(C=fG=hl⿧Fb]{-v8t] {\_r^ECokv+M}w!X@K(j'l(X_kbQUvmsϟxhjWPe)tREX((,(>Z>G(ݎQp;ڦ{>@XR]% :Ya9a/& w@ɪʁ0a1*UP` ^VmdMn_8#F_4{XPcH$$Af@'j2I+x/HO.^S۶ag 2Ŝ* [e ?#k'f3qV:)pDJU0fb&%L$)0(@z'MLR9ҁ(:$.Z`Ex g;d4 P#ĬCF8(97* kI ,& (gq7wQ${PRjQr`9 &N4s9юP3j_sMEGˋĺ#UB&%{<,=3:u9ţQ !ѡdi^5AYlj8+913 Ny~͂|?Y:V J7"Ҋxz(Lᡟø.tgzog}b ~ 1/vhv? }{W=ė꒮o^Zra ?:ƋaÀݷ2_;sZ82;PK\upreviews/preview.pngPNG  IHDRsRGB@IDATxyl]ʭݯ[-ABZa1c,@,c33Lc{ <cpHmk2*3ksɼ/^-U޽ܳOi[39&ҽWE@D@D@D@D@D@D@D@D@D@D@D@D@D@D@)?R?nAD@D@D@D@D@D@D@D@D@D@D@D@D@ZVfQ -ө" " " " " " " " " " " " " "J'\ktE{)mhvn.ofZ-d\hjKE 6XT{@C/)4*;0Pk)4j;_?aӑ#YlMK`;i-f %\!O'ҲK_Z^d[lH~{[?nND@D@D@D@D@D@D@D@D@D@D@D@D@l9irD+/,pOElP,AC88*ݩNLJ,H (," " " " " " " " " " " " " "))<ڰ1Uggv!+E1dE_I%#@ OD@D@D@D@D@D@D@D@D@D@D@D@D@D`U_2N[?Ff>Mo%{m]40o酚gW?;]ܔ7}>zxevV)" " " " " " " " " " " " " "*/7gkv+nC8:L}P=JRhi41",SV,6@5 5/zNۻdݟV/" " " " " " " " " " " " " "&oX8(G,ӉOg⺻foĪ>X7h6'fEhSKE{l,gᮤ$NWoLGFYQD@D@liy \fFD@D@D@D@D@D@EX4[X0JfۨW" " " -E ǪeoKa7Ff-W&kT>xߚݚB kJ\+,RgCS:e蕚(lsfSSf7 6E@D@D@D`kF{?S 7K@)ͫ7B;{={-"ʙͷ)(" " PZLg izn&xv;#ݟ.#U/ 9i٣P59hR\Wr {֕hޅ}Y VqCy៝:}-#H`T" " OzWg(CCi{e:oO  p"9X+xK1{rUiBD@D@DU &!nv)p<ͩLWk8k({~J'VV#xm'pP3i*~p>\u%" " " " " " -D`rl|,p]!Ѽ?6:gUe{Uz[w&):4RfW7}{3vzfў.tWվԇK+5V˹ƪ4x,QZD@D@D@D.tV G-m֟je{|lgk?gI0ha'uMF9鱆בIkYD9h}#f~7ь6˰0>;uw,hwJNqMPy&t3:k>}v+݄xqf*!" "PgYsk='cLV>ȉ@+wr<PJ˅" " " "4(? %eg o΢# Xd{m(ݜ 6N 2" " u%0VZߚ7e f(ʉ@+iV6wGXͺ`~y)On%p'p[4Y(x (-=6IF ͈@uc؛jz˴XD@D@D@D@D@D`wko3(ĕa?g62QbE@D@D@D` כqBnkD%un6G5 }Pݓ Ɨ)u#" "К&cقa"IWyӝC:}E@D@D@Dt%#޲,LH&q_}g=Z.Z'SE@D`{n=8v߀9hI76!t7V .X!כv{ ul2, R cWR']D@DL f8 mg-mLy`Íԛ@lEr}063kRSs~[ͭ~{|斊.G  v۽w[1ݗOmR`E{)mv݆9Fnl^{'}ג+R" " " "@"WhkEj4xϺ D@D ,Bh?E~ 9V 0>rk/ֺ6K$'" " " E`O/mpB6oWsv+mo_M\QD@D`S6ߞ6[e" " " " " " @ UyAFA9|S_2+J!~#WD@D@DN"~ioxI4PV~jAD@D@-KYf;D9hncPkm%VzS}D1w%0 c:`v1XS_Z)" MB`F!7]l4+eLόWVE}in !O_" " " "D˸pd[|klΒF}`_j|WoB-@ '=hܒ=?w uԩVU#" " " " " " u&0= X@yASA_3<=PCU7f'cU*(" " " M@4Xg{;OM}]*3cwî61`8gvZGSE@DU Pkf/.I^PU@ LAbUy,upT ᨌS YlbB?)@QŒSv 89g^96eo:0}TLro7nZ)XP;vȎ]w:b==ͭa͟S" "  0 'i6{(r" " " " " " "\(0ḋE.9w͢Al+Ar" " " "DHY(F|b&o/_ߘ'Fm+kt2X.g8:n,Ϳv7GĴxٗWT7w2ɓ=ŀkntEOj_H6Ŋ4t{)Ku'R`0e9h*~Ym=O(z,1B?X9E@D@D@D <2^Ҧ@ `.jçf/OګӰ^C1: kmoz.-.¥˖tr.<%QF'-v!^γ])li#" w8eh?༻8ނD@D@D@D@D@D@DLAP]0Gvu 5a!s0|uf!7nv%W B4fOcuD trUJ}bźQY%3ۭ;ɲg/•b,1Ptx3 {ɤR}N}e|כr )D@D@I`{ifA N" " " " " " MC` f) q$v3>OG&k G&g,bY. r" " " "L6sEw<2P].opLXw6bxy~O? ][sH}`mWm߱,JP,(p[E{饳`; `\#F:E@D@n!0SD1(zzfZ~ 膪[ DYd e3V8^sff '" " " &gb|vsOǿ|>g6س9?e T\ ɇx#e{? D*e֑cd'JWv#7_ү9@ZD@DM6u$l?8WрnJ;D ëkbY_D@D@D@DLΤ]n~q3wܣwɍp+0."%ZJ^)q ɔ{[/u`yha Ύa(t4FG'3F6NP" " @h+." " " " " " "PB >DáZq!>-MyOgh6o饒M,bˢ +K_;cy{=ו@ aH|/+xqjwUBNo [sg?mES@}jҗn\uyq@#Ne?4r;/brEND@D@D@D@D@D@4oq:0sM KT" " " "T2糴|{G{}n[{:WM޾J !/0{!LsSpT]&i~º @*-@WĂ}]')4(hQX{U |&0?et |qf30)[޺RE@D@D@D~cCȜ3O٧}8m?ַ)sv8l B{'Џ`Nİ?2^Kۙ1!&\]|2i~,Ձ*'gJ흖hϸǭPhUc)_T@C4?;}L.r?TD@D@D@D@D@D@nK`1Q\U= 3rqX(A)>c4Z8@r'mlw`>b @ C/@J>R(.Ղ3\&=VffO~7$՗@]q2jh+ o.msD@D@D@D@D@D@D`G y@`'W mfogYD@D@D@[dyJ矵?%p=xגXW/A_^?~ 3pmG,7`c{QYu}{APVS!G[{ z\33ίI &D@D@(E{ܔ#8ܝ^BhaU+" " " " " "ns>kW^m;dq:#ehg`&9h;XW^RX?󴝿9c{rlGEA 9pjsy]r<(O1LW[DJ>~+ Te@2y Y9V"0[F6utb91EMJ7|qqqaZy2P{ E@D@D@D V^^,w_a?'YvaxR崭=nk <}c]AQ ^?+f=w[f?C} K611e/_g:Y+NU&" "'C{@wvN | BP]gkaXr>d1̴G?(v5[k~CY[2@+>(P-" " '^͔[HL%Bɮ,bl7w s/!-p:K[xUND@D@DFk#v~xbo'ul7;;QX`NWJ3 DEɠvyJmHmɎ`$;˗^<b@P"89M'vOO],-Oxx_?>oemX~?z(" " "K KsNyV󑠞y?_1-8&Qy .g;bWCql7('S66Ũzv8Aū#ҷm1;?dv`,TH;>}qGwٻ;{:ˉ0OB-.-:C:NPZߜL)N`M #VD@D@v f݃|' yp@ nD@D` Bz^~/!v)L񞙇2@OW(3|U|g"0w}ȉ4 矱Kf9~2澉oxҽRHW[oo5Ypt{FKB$ӂJb*8 ]6_>W& :65S6y[vq`m.Bn[FaGl_gzCM.\ۙEg_.%oﴴg/@ D 2T0NJ قܦ~dǬ ރ TlRIQHD@D@D:]}p&]$.B3~a8.`0?@6}:4@|?x{!.pR@gZ8)i%"L_wf.Y#QXD@D@Z}߱+9~{Xs/޻=o#nɷ0d1}G;@AxsA L{k…C9!u8/🙂i<`ڣH -˜AHE{Y߾0ete]@X\ju;C\NL3+ ?ؕ&Δ}VWQѥ0VwLUn ]nX5f̾?8cۀD@D@@P?CT!P~ H \ĸ >"R}; 2ֹGUVV. ZX`(7^gN+KPP|m~h}d_N" "vqZ 2_̹ x9XC3QXD@D@Z@lAY ?3:1q̾ HR -9h>Lp&s]9 wLxFnyy_Jl-nۓe9#pڈ}Ծcwa6_.2җWhF۝Z|;-}hm9WG9)B}FeǸ21W໼PDE#ŀVGB}ʢ .=9Qwӽtlɻ#?ZF}hE`ݶZ`o{a`z#Gm;싗sD.cʄKOl#" w 4+`+Po 4ج~t:}Pe&u)" " "`5?3Ph7 Cf8r<^x/nK( " "??S3>fn^b.a&%2Ҙ'cW<&( YVY}?н~遒@xٗ}JΏ}MηZ{ڏ|j9wzqS4jCY(xB+}1-:YAhi]a 'wYglu(UUmA,O?oǺ&E<=efo/`M," "Is{Jct6t}>}ab]S}:r" " " @?Vq!p}K"1jvf`Y_pƴWXD@D`{,&r<O =+1q9iΎ-뉥˧9vνSٱu ;ჲsZeG!x,Sf@ =9ÄQ L]:2LԞ#_S]߲'"\ 3-@!=x.)"Wc0(00R5b7aqW7[^뗷~vNx$cE/6N8O=7jKr~-4=f@H:E3;.2dɎ'ơGkv×D@D@D@v1~[yD9q.1丂&\0;]l@$&Hu8E@D@ŧ<(<>ϣg:M2IkY!ṃ(Q&)PAl\l̋2 .!WʳNl13co[`d6YD@Ds>f2}wS<_u\4^ rg|,_ q nUltf졁ګY  y(^ !L9%[[) |7a^X-a̠ TE\uEfKvȺ~ٻ5I3:s?3ztAi v(p|:\. :Bo{+W)!phtH Ƃ%=:9{b4/L7p׎EBo*@3ہ}qn6і!R>we ['cTX3fCW}ԇYdf 8nV$b,LC`Y$155둿Wch)"  j P?r3=CΕ\/3:Ӣ$Bv9NƬ=>sm7U; %69%0?1P ,o+-0O9rgۇc4%5e6;ޭ+rCP6ώ<3m#ʉԛ@]&_oE]M4\7ύmXƄf$G~MFә,3DgFn| \Gt{ _g FiN" "pp<_Zz嫮uJ mC4$07uzKZ%B.ZϱKT$ޒNZ>gt?µ˥M|qkisA-*V߷;!}6j~3WaqkH4,:Khkn)9 E&:6{7/+?^S}v+_<=bqqf S w}η]Ñ"^糍xIac{?l(*kp3 q oy#B%mP|dv9M\"_x+Cw])(" @Gspiy!>%4g;](`㣙Zi]cV!kkK[7.^ U%" " ;C ;(_\2B$' p|YHdru w=jKKNOaop!4Eס\WGyCZ-qtPh+3y]YڸŽ4,E@D@D s;el 0#FoBcC u"ay:k [z9;{p "A=^9!}ǵXX/Q+|iWETz.a)Xuܼ9+yA -Ǫ -{3KoT'CXϼ6k?{悳j%JXL7K֗3s_嶄^3y>7HME@D@D`аHSQLᾛS ~Yxŷ5 B/2@Jte(48Q'h nF^;pwfvpkֆ|kƧ:)"͸Y :#00kOk5i(TP0VSNJ^^R V8a8* qe~4 b>77 Ơ YX52 uGcǑ(fkZCʭռl|O{Q]ȠOaM0W5i˳cɒ0lNwW’qۗLrޅI;3wPz{1U@D@D@D`{k:R҂kM/?c-Woٟ ^|",@xiH fݸ2w?8I-CWg+ *@X%?C ỰKgC,D_ }zP9J3.[DiЈH6A, = cݩrrK_„8[բ]F`/k<^^DiBv%mJ[&Щ.i(S4P4uOhC֨6T׎{b[(')ѓzE'NDTf"V qIe]E쀦V'c04$F  %(MkxƲHux|l_3:>\Tdu'Wg쩉)^|ݕh}'Ou NeyhKؽWE=edwυ2su*2(ձy}+賵Pc1'wU0RAq52 @EL l(o?&<2Փ{S `{nHr?_xf~l]MG7Ԁl3f4OAo [w!DzlOLlK@H*ݾC>ʟW&\&~7K_.|֑ 8^~ё ^FE-K`~<\M!pePmg&&?-Kx/" " K @IDATz[\ wt'5gp#/vQHrM3,ԇPY˭"PW>ݵr(bq69l /s{!;pf}xm~Q557h n$q ft{;+;;yTYxv wm÷j!ޮE@D@D@v9}_se{:7&xHGu8bRHll^ U._}9hO^ؘ"|su|]C`O ~C:C8S8Yrٚ޾t},k8g O $h};,`9A *9u[mokus(WbQX!Gym\5+\ke71o ®.Ds[J+嗭ǵSJ1n354 0~k7=6ĉn۽}Xl4/ǩE+'" "@Y8 eb&7UA'Ƃ_/ka ;akaĒ pKG# 9`;=`|IxlvXqϘ \çWWs%j//" w*>[j|qLKB-Lc>6Ԭ{oYs uL``~@11ڛq Xvܼ^Zpn 3 ]nƸ?Եd}geGQVBȕvqէ]ۗMt&gNOg"X~ '.@]d[.շtYo빕lfbv1{k3ƙ{mo9q:4fa'*"L 4ą 3AGĊPԙ^䋀&𑏼g[T*XS ܇1[1䏔Tg{*2|Zk Tёܴ<a 싰黪cAz];5T3#[g-["@"_D WV`:oL9b 3nƒ/s[>X;p@,Ewo0(Q|]x<Ι%bHaV[f/C9`1/fy(ԐyC|\9>wKZ~8Ic5dLf фv-w`Y» n%"яg[#BV Kdmf._<$Yq֊ X*0⸈q<.V8*G/nW-qh: 5]қ{@E+ksOߜw #6#("VP|,[X\[ ,x>~]h"$:ea@ND@D@D@E$u,+&rdzƢXqW}]Έ@Ϩ` J1+ u.arVjE@D`řk=;V}A]΃0ui8Pmʧ?:O[qKPn{,ݞr"ze2j#ny=Gj;b |{8DrfϞ9N._4X <>j6](2-)- |)ݍmVq{G7}f C?wUXg %sSOǧ68^樂" "4?APłY(ڢMx̢nUIoT޷w} *@WX%a1F!:~(R;-.W}O@?#ϚuC!.(盨d6W[;qK҃ LsfD:cxHElޤG걾Tu07/ҤiD3 ШNo&wCL]`=n4sx!>_Gq*{gg_{S>i=M*x~ *^" L[.l3 ^l p[cf ]<sE ?]>#s2L +!!SlyiB`ykTXD@v<~c@b _!gP0bP:\wѳ9-O,o} w'd o5;{mphf8@)[Xrr*] R@3ct+ij<#x8}m=][a} ڲkj a Ҡ;So2e_{jլ-_Q" "x`NDY4se>Lf͈ t^{{Gy]p#R!Mx:E@D@D@6A`VSC2EkW^]Gȷ{x cAV3<{8m6 }u$-?w1v }&2>`P1OA V)n`҅"B0&7-~R2D@D@D@8?l=syp*c9s )w#(؋ 7.r3݉]Fs%?;#ȍB;e2i5,;d?oso h%4XwEscHO_oV]F/U_jilӲǤENvD/d^#9dS^׃@yD@D@D@v iqI]x֐/3u:B<@_3;YM7!(F>ױb>eЌ|h:)}+|.2|%}^#Oh#Oa8eyͲ8 jC[fغZ4)'" " "7ڑ#Pl&BsːXm^pY|pC18ʢ.aKa sC( zϲ# .i}~ؒ[%;YXw~u}PR@} ;žr" "Ђ x9q0k6otDK{(wP'X/Aqй!hXRNfBr5)g>gx L| {ᒳ grrXV\ϱ0!{ HpqL>yQ6zڡ@e ,ٵV.`!{p0m k46=D@DIdj1 QHEomõ ̨pdpK[Vc'W7&/"DHA!tMJ|Mg6y>땹qLudEHcNrPKENCߓ3; Uw_hE<]%i8#+_4y쳲l_]ס$W xήSVoa}RO:^ ^t[ݭ'#P ;5w>oan''" " ;?hmiiz s:*.-wu;np٩ɋȋ35)F​EA/2.#Jy_m}ݮzZJ M 0bWѸһt؛3WrXi?cCnQl,LĵT>8k Jh2νFT&<G}mž` YDHeyͱ={@8t‹DEZ6fEe6SѸOw +i3-b$y5cP Hk_iY'%g/pG=n,C(W yZ}l\~cVi@i|쵾rWZǔNͻak4E/G?;um#ЌUD@D@=ǟ|mu-Μ=w)P>c1rq*!̡FECkubqR4^b2Q\B>?Cr>B_l"M$uci'Nt?yMf<=f_ZE[h!|7ZiH%h˭Wncɀ gt|a?.ǹhBS{QG7 ē=RFq" "  ЅÕk}3mJ-.A2 1e 4fdZ'#[+#[130+}e+W+uQND@x~vاg}kvϼc^f eu(Ņ4.:^iTセ|3(o{{ҕ+k\_}\x]PF" "p{?QKgM øM,.ć8F{AQﲢI8~W޵ëƹ桽{0(*΄̩^{ijrF=C{NOrw-֕."(@ݲRV<#:1abc*Iltgf헿;fiXJND@@D6fpVҥ:@ft!2yOYWN_9kO5-pB{ yHUjRP:>l6z °:j/˕TGQb4qRdY(t?suEդ+P79a|9*`@w=P"o=gV9 WCW6⊢ Y{2\9c#ŹQֱnSX߷0GnC+^]l^ObKwAú/'" " /_:%UR._*CXBz ȋ\xu}+Xe|+qm%i?.w?Iݣ,w#{2PyO9؂}H}PB@@Zph 8ŵJ;}tܫٿ#m?Kzdξr#ڦo2uUt*6_J 2JΘPgoҪ㫯˳U*GVc"#ptf Ee ] =0}i\ˉ@ p;iG\T7>#* ynMDűLȋyA@BQ8wLaȷL8vH7>X;?fM# %{zouZ̦oUE@D@fPrWbeX}&b9A>|77lP pdJy YW;^4:'YA)\=QVwY~jYN+L kv'oﶷCr4u-*g.owuSyD@D`ӉrKxV&~Vj<|CD;-eu\..OֹWדӴ!r" " "hW^t %+k叧1Er6E"!~UC9(>`25 Dھ^,x,Ma:X7IxU=nyşq5x$Fu3&WkT/?X}+>*|[xmȗ.a+BnqG.k9 Z/}ܹfnPpN(/}lNF, | >[SbA[ Fl>z_A껹%HO %(d«|+žv_WK@7YެH~7`9z&]K/軺i-K9;>pqg=,K22]7R;[^HjI X>`aV e?A FBe[jUJgx72OT'9yqB߬f 迚8#!мnaJ:Qia؜B E)"6"%(e#t.ƏX o=9,8Xvc!@kp.x8M@AsVHJ!).سԟk\>Bi p㞛&shSlV"?镞5Tѿk%^8x#8zx/|k~GTTV;}@Ab/Cjd2.: _xYXbRQ)=(~EK\vm5.:1&9x`884)}j[oKY<8Xu~M® z!y#K2|a?Y!=.<.n_/{{\#8#pؾ_EюJ\_ZϣJ JEm7'ZndJڞX0)? 8{7 lB#\(L2yx8sVg-FdeH+h*HV1Yb3.ط 9L/< N >SWX= )B&zrP熕4_Ɲ0_FFGp6 {_Vg%w4ߋk3j{g7_>Ґ2O–g1idHdtp2B֎gd2`x9FE?C?wiwlI _#,+@St.!ngdxzNpJPETks \6W1z`sa?OVvnksGX&j`o&:X\h).vb#S򄺤~T|5qHN|`F%EG{lJÒs&GpD`*xQK)z38cQ)83 -OJBVT8K 0A5uh,jRXQsrwi;9vB9#ihjn_ϭ:#H}hGެo}Қ?@ t$t2֓nBT|p|ze szsOYcr8 9̋^NjCS9yfVwY|v:#pZcJ7W':bf&)0 L5(q/(yKRӟg9#8Csuc;ǐ&ha-2A3ڽzON-(2 ,ಎ#X Ѧr,/_P`I5`bIwG>31-t|t N`] >ox8#!10OD'&ݗ#f K ~WD#ь[*k| ʆ20n<&XVyNՊ )D߯<8#Tܚ=(|Mu}W'㜥r"8;v>9#8&G[Ww[5$f%w4ߛ :% d$ (CTco0 jd[Eø/V+7u"{eX3XArOFyofG`ifr9|L,|]X0*|&/uKU($Q0F ?]rGp5'#:{jifaEKGgB^Iq!cWJ)njtc q 5mGyJG0Ȁmu(p#6wx~dc =!h9PFcc&6XZl"7biءXd[|,`1^o'HaE9ilPca`Eqr#8@# cJ_]Z0zH|GktQ '@Pr#8)*$9lĩ x\fָz}zA0 c߾0$:h)y"0?!4`o^+FN^~VquuaBٔz|~0u&h^-RGpG`3 Ѐ~ _{]t{6icl8m點BPJuX+ۅ;: pvXv*?CiG 06VCFC$"2JOʌL/ՅDL'z$0۳/ToKTL&߉c ̿nPS%X~9gTZ8FUxAJGp6=M{Hn} zIԝ֣IRc!8X+Џ'|:ѓtضT/_jkPVr@Y[.tY<389OIM?'7mmqa#6^Ns=E+0lnDVm#aye?;\r#8#@/5MaBFr^eVnH41 aqdi([Y)0((Ǽ@QpDH0b ƽ{.ǿG E` gU$ ᙦy>s'%ZP|@xȩ|$ȪBEH!PCISg2bS<84<_S2WE0u8@$2W=F:g_o:{AHl75ȍpsj 9#8+@U v=1lid *fѓS0Adl]>/g ZlT~̺S8 8> 8{bp67Sf Ɨ,"1i2+C>7f|,` -HZ CGc˳%3LUVD0//{³ pOs1GpG`# cV?M9x|_%?21Eg^/82E4[ic``Ѵ˥_ 7 3υ{gÆ[@:JzˍO˷M5cq51ͯvql)xpG`C-=M홉Y/ q=Bu7CsVh適쑾rk ':8#l6/쁁'(<EE: 4E|FY$Գl>NVU(<l*=vZDC mF*8i.%{dтsVyX3U^+6|{HQQ/3K *lq-=puJ]kѱp֍qGpG`!p~ g951Nx k/$I0$>OcCnziGTc˷6RGpG(Kۧ/2;8fӱ-="K#V13,Q1zI4kq$@]D|q\~wi݂X p6/S}x4>JBA yҳ1_"g^AU`RIY)+wFGqʼnfpj}1|_>=X,Xblk^Z"d8#hh71xxcNʯW^g{hGབ^DS/q>aZMByzP@L!+?\O?V ,[ڗz(;|y/sxtR~uK.Y([TGXM6{on,60sTN^|3bY/W,KD.ڙXB nmoF8#8K@=;JyK `1t[l@YLz bhZKS+_EBcXt`eErX_cgF?8Ygg`dGJ\QZ'IyRA7"s"u:K|-J8mg 9-+EK-0aF]iۍn|E+"x qGpG`(w+X?ֈI9q>}FΤq dtt ܒ5Z*K_TDxnBh?=x_|e'z5[|ʃ}ljitGl+rĨTj~Ny'52&"*d̈LbarQX}V3/VF槱;04$""'i"XjT9&A>J³A4{OCJME(TcEJGɔ( x.85>mI+Ზop.p/rw"/^@‹Gp͇@mm\|,t@˅SSN8Ύc_G=F?l*'g3qlt;'d.S6WUvs;Ƙ+6bd<,b[ܴFq"'?s213'|lD5&(OϹXL;F 5)dwGLU֧;'ov\$@IDAT}@qEg+' ߴE8pGp:LH40\[ȾVy%s A cbҩa igO2!F"%I U)O(жLڠ=-CFE VtZh +6~nhR~^҅EG`c"vۯX<=c*AQx0HsgTiUG.%!L#8Fvh1Rz&5r1MEhJ-iYI0PъŪBfBE*PPδxYq\dc͚>x8>P4O#gfFp{X>;ps}4Z\Qr0f:L"ZG-ϘAu&tЛJDtA<QʝnB5Xsț_#/pGpG`m,C8vĶ 9^ 3cJhiUvc1ywBzQ{gԺfcG`C *˥' 3p32ɏZ؍tɏ*0l䠼8'խn9tq#8#pc0vOgΘҡdY5~A23LTC:ۥSd/WBZ٘"-!y>4Di+`oz)3 R^ȴ O8G`m >G-?IihȄ66/P` o*x)N <kcazt^13%B;¾S%Pp#8#,V/x875;j׵X0LFG#wƗR#p:`l]Ůh Ͻh&ohĄY.0F2-U_%x|`L.Y 9|v7D Gp@ -{ tE})=~P&>bIut/<ʤ@|DTdujPAOFB4/<^0%N5؀,8i{6gpN>|^O,Gbϳ cqC8Oyϱn([B._dQ)!G>HU{wC%n/GpG4VPMưwo߸E7< ?;/D<3yV8hG`!0 &uv!E瘅010Uj٤M0c8m0LL?&oC#8#4mnuXZta)9Fa|XDd,/Ɠyc' Jem7C @3X#`?gS8M##qspU)-<7̰ot}FZY,t1T6tZe72.@:LTT2yg;$fqg>{WFJwuK[^ =8#8#l^F& B~f :84!H١A*W#>ݠk4D,s^wHF a } XX˖lGpG`9غ?ѪF" 8Nd YRӪy*+|Dd, #iMQ4(mՖ_lVoH76;zC`.xn0g>k] otƖ&i+⼥cyq]6 x0.FcX%g 7c}[$ڟPi#8#;/%EBD k]^-s]z93h|ܩQoEfV!POCfcJy`<0̉%yQ -]*x|@{(x8#8+@ ݵtmqP.Oa^ :i%"OYYf<11Kh6T/riL. |A$k_3%$#BL*Gt 4=F["g>z<8k {>zVА6i{YӔ7H Ҧ?n&A5ĩ@ˡca?+4v?ۚg?jpGpB`S-n"*X_N~ViK ^.(<|!#g䞁 \]#, 1Z7hX910YzC=៘`c/Fa|5";#8r#о[xg&mi1=bl԰o¬߄RkKcTČkNޘ :<yѧQa N׋ Xߎ#E};yÇ>=X6 `M[ZZq7)E( ";v v#ꛜQd+D\r .~dW9~-t31vL"Id#XpH @/ǸxhHH/4y8=0!=3>3|(-sTgC^"mQ>#"$VftgJx =ݜ sXca,>=+E{Vyug8#8!.ߊe*WMRJZ=%?XyB~3`HGxy6@'ff9l*cKnN|H0wUdO;#8J#кFbx»[#{[IŖ71']$42"bio0mR| PI ti*Hu0+_s(  I <*3"1")k}n^G9#Mn`oo)LY&gzXlb!h=1=Њ,z@dzpigxM=gXBY79[[ =8#8 "ivxWY%M[״;6JCtf}_5jG`mpD j,q @zaÜG$=`cr6/?o{8;#8k ΨI$1t֠YEL8|ZfCVT&$T'B-U 8JR5REQT,F /1x^1:t>]/2`4>G`uD#ޞ =nwm}P|'"kl2rzƘdFcZ! n#MقՓrz,ᄊ8̞vfA9/tGpUF}@6Ov8iЙ(QԖ^M&N |D2̨ Q)T}^#k^OD󝾐WB\i-jyV I'Wg2 Xx){i1tRV" ~PNe"qeԩ :LVi =a5$ <@*5|zHxadB=xI{=8#8#j7co1ށ]nS/.kퟜO>7(X<67(xBG`!qx{#_~C\('2=?4YUYr;j|@ Cr4;wU-u48#8DJuga b@c|@ceӥ=:!,o a:42-^Um2'zc|FgX} b$qz'A<8ˊσ޶Bt/>mL$ce}_D^L1 $10?K#4_B^PKۣ:s^д)\h^ : WcGpGp~msY[!|yfRcOD=?(%\so&ZF` uk[]\00Yzzพ\Yսի}hjFɌvMmpepGp\P =8MTjHZm*¨Mb,-/Xf٘ FSKq:e,Lʜ4#]>DE;"'=8 0;[(y (a҈x Q^nr#gEZoy~&G䩫 Eh(۷HȎ@=X޶*}Rɿ#8#~kG,`G5;j׶K;j #9 ˇ}fMXf Xh@U<2Cs״[/¹GpG`!؁mIuf jb>2Y1%-%r*OAQ\JȊL`WyHM⸞l:ˣPaAY1ԝʄZ}=~ at3E#Xd9cfܿGޗjlgL=l}ˏC:țAzb*CCZh:~,m17m/f˂ӋL/=c` $H|R])8!>Q`x8#8kﻅmZl*o_Y4\z&fwARV^,6XȤɃO¹4Ŧ0n<)>GLi!0VwH36 xpGpG`!騨&F\v7Tʯ_,?{ Ӄ_{G?_۰^De=pctff6=1 ry?v;c׃GpGES\W0g˲ OzYh}Pq:VNz(KU O7C\,f2Z"3yU6ׯO Xy(cJ J 5 nZH%56Xh]d)PUF|0za]Ǔ'`p%!߳i޳TC:F*v+ rԥ ɨ~cR>W<_aګq#Kؗ9RDuks_8J253#/iFNGpGX}߳GN\:)v*"^6ɳCS#9O/'Ǧ叟kCeO?W)D'0xcj%,ˬHul\gR349mSGzTV9霜!iniFii. PkpGp6>DF^a3Z=Y\ v2Ny'|1IdNT,|QCN1$|iReL%ߋmod5)i cq91IO&e,G&܄s@sYƇe䇏ST~SYuu\r^Xc8#82#Ps7JB5X r)_?Lm{͝uap\-pE{aLGOGg07u7R$v=1"]`19<ȸEz",*rMB'ddfk*7vm믻Bn馫&y78#8#)b$ h$-؊q|B1-Ө߈luz,oq&k ϐ1c/Yڒ֓mg.^KUhNKiwLU]X :m9@~=;}eWzQ&[a|J\Ie%zC_VFN&{3$F'%9 zq13r0RGG|΋;3e֏` L \鳓2Ay1?[fK[[oAlڃ#8#8_aq@dnFɡŽj5b}ʕD:LJֺu=;!;Th-agp'NVSvbQygHԄT+9lA{y~=b[n^^ʗmX@׉GpG`a|bӉ(_T4ɥ'dtlN^iG>PN=< KZ n7Z*óbI9<ޕ 4 .N߅Ek52ɞa){P_S<W]2|2…'zx¯|ٹGpG8k|MS}Ʊ'^ӋMW5_Ol_$[K]KmqV>۞L' VεPcxF R?f\T|_`=2Gw,4rAy{Xѿ MpR#RG٫;ROCxQ6kx'3ypGXN?)2>@HHӋ%EAWHQi2v / 65YOr4 ExHN]BQE|L?c|L7G4?fΔKXSy^9G`s!0utfހv2ς@OJIg='Nɮ}JDf6sΚygUb՞RV5r[H*YK럭e"jIsNꢵش<5(Oa<|̈R-5Rqjg&&d'eťyR㵷ɫ^}477bu#8#8"  q/{pW~$T C؝=;&F`m£N}P֪<1J}|dNM7DB[~ܓc]C&9FaLĤ ZkdKSNzD_>f9#9[{3]@hniԅ/E#8#pMya>.q8`Rͯ?.ҩ<^`_7훧Fu0^VOK&O|Aݙ2f5@QN,qCRͽ]ysBoG`3#0e\}ؕOHܿXh\ó6f">t׿&tP@LyGU:-DMLy>eA 7XG#Y4b'OO(w˻w|hz/zH+'l19+Ѯ-dk2V,zs'AU |dnЁtJ#B{d^hOਚ#AR#~7%蛝KdHg(0䙴@9C-?[V*O>rމrm^TD0<='~(ٵ6]+wc{屓20~-ãRȯ_! (_?5(dbK>ˤGi[z}@џ]a"|g{8\/otb^O; #dqbsGO jŭm=68% U-rY>33C2<`nFwooEvvGPb2Ʀ7tC'k4%gC&N{ǟ[ʅ߷ q7_*חS#8#p^Lc!ɇa}kw<+/"Z43H+!bjRA|&-6iHt1=(֡r/c]i=QYe_F-kUtfEoEIXÆc޺jv >@LbLus{wܴ3r^9&$џjNkxSb…Xe39AC¥7c!44,oP;pG`s"Gdiwtnjr03j/esgOGsߟmr[Gf%{^c%J8ZC{p=Wv{Fi^dŎxwf1sҼ/6[8xj`ep_;3Kd>zy\RbK3"~G~GΨ+Du|lD|p߅>16%z[95@ whYnm|k?i2N v='2z7ɞ3V2%򖷼Jኒ|^8#\(h yaxB>[kw܅jи| l4-bK{x0:9ry&ПZ -X:".G0XFq"s'tP17N==cX$obwx E` ÔZw#ͼkL^@N?%_{KNPrG[pZti5DX{Ʀf-t@vHqLest^L~̙̌~,M/ZW?qTT퉡dGpG`m"Ќ]lzS$_eo[w4sFd`lZ /XJ Upggy$f2,ϦzX@d"AI0o|Qt,>QGJ@-> qiP ']vrLЮ"4Q "76AC[ك V+# j̆4\Yrv4\NLtN!i F.GH]_f6 2ΜIEMvi{-2cT,X׃*c_*b:ٟ 80O=łMƻ16ސ(bFD d2b:R~0YY(M~ u A jN$ 4ӳ/$46 sؽUF}1^n|Ow _ ?U8JHM0."4 Zp(B絮P>U?i1?!O,[t9Fn0 Etp@0j\x-?<=()Fqӷ4UE[;ud,WlX"KO_go/+^RɷF^8#-_99$b< m=螭ΘQɁyryӎu˅F+U_~_,ΪQ?[kHRza]ȢJN(<UjW{쬯_Y雔;d)O[:C{dw1vb\<ɰ(A?gJQ#v`P߅-50(р5D&rKg0NTdh9/Jf)Ȑ˰ H-7Oa?  |E^k+m߈3-YGpG8'pN0v"$>jɈ%}f)? ,f^BDy!-NNY%yM_SS= 㠧Ʀz4/-F#Que*C>Ȕ?0lТatNlmgI||1_ {[ 1.F~Ò;#U 8Iwǚ(>L; ԟB' YfVBƬd ۮ^l=0iyw`ӯ:x۵Vv])w}yUO%xh~g'" %o%&άƛHI9 'NK'&?b 宻~={vO+klt.% s#8xLK|!N穙1eF2N${}\VC`*6X6&4_+KOeedcި0v*3:N%'1KLR*ZKʯ@ }$lgVgOKey;XW+i;=vЭ2H}a'?:[ g-X*^IYOi,gy- Dm!!|r"`zK]$)R't9 E3$F<%&8Z+{;뤱vNIG60FĿ/~r0h9b{a^KwHy[$k \K:w)[_*xǻ[BHp,{~}!VcOGp,_?5,a^oa?= s/moWt6a/vWsN簁XiBV䷮5A(CX[M蚙~tzV]CXSx_~2/ {iZnY^y3VhF{kp=SRZ)^.m&_/8 905E5.:1&c`v Ҽw ˉ)rֲvus}IcS khaqψ|G/ʣҨxKS\y]i5[:`30'-H:BvHUɛ _6T-+Q.eviShc/uz.xڵ6Vg&#8#FyZd?8,䳚w'yKG݁ )Ӕ5y!(yKc1yZg,ڗRGJv@6Lu1oW>V>bt oͲ_|=669.(CŏU hyx p`&Xy- ,Sz%4n,TM$) dB;QJ3 @-B%,+[A܊uq (c/}VùeVnlSp]ثc&ik$?WB'pVcd)I^O"@ЈiLIxt!wij?*a㇎q/}-ɝx|˵sGpǟ9# mu Ob90_Uu 6UKU7Փ'Xn‹%8JR?Xm%Qu_K&sr՗ȍ7]u~ Yt ,?Zn݅F꽍rZadȔ|Tt9 {I|}_~s _z1́o=EAj+œc[}N>qrjpL;&OA5᥿6tbi. 'I|4 (0y2LKEh@`y4R#Q"m/Nvɞ^ΡB**q~-ΕdWSSr!-O>44c!<GpG@Bȑȿ8HӅ5|q;dXyNM[Jo唵F tļtz]B:m'xqɥ ϴ`4zpYF%̄?;'0'UlT,_@` ym|o$caCbg> cda778fi[@| c$&L%!)\V ; 1"pDW)9,(Iwx㎽Q|J;1(\$5rViX]EӅz( &cd˿T~ MA!D^WHVQS#MW^"-7\)18rRcr){*,(PUQ1N{i3ӱE;%;,wq2}_pnGswu@5n۪ʖj9pN Lz'dg}YnM=O1M:=><^?1?`T kOK[ulK VMG#8&LL^rIt|  ҐgN' +gq̧餔rP/`| eJ6iz.c'evbe0b,BGU_n]?o~N0[΃#8#!AAf&DF;^Фd|߆ qBȵ8UR?p&tiLچo)cHIɇj 7>iyicԣKVctD2+glP#҅RQ 3||V/,okbҧFC5_F% =>s=`c1Feɲ-YTRIUʬ|}ܸ/_fě|7"=z㞗*7SX=kcV `+I{'6ob 81SNɭxϗv tS#6E `rh~r._ޠoY!,h> zpua-҃/Y.%lxw\^;*cy3[|<0^>D3R=G#vmk[<~Y/&{O/8x';n(m(԰ NF6@:ab&)D Il +k*`@G*+P-ti5'R   -:կsru['O~x]q@@@@)g65´X OŴY%VZO.+ŧ2Hu{:WRKf"IϦJ~pYk&2>K:>iG3nqp\dCB7y_DzyP>Q]ФlR<0GR<L9d\SxI^pHOFy;C 4˴Uiđq 2:x@#Ҥ^A̅LMȯq|i_LUOd܉[Vg|\걖3O"9Տ4%~\N!,w.=oG8&;xL汹DoԱ0Oʣ9)9;[z\^~ytoxUSE====n<Ќ믟~xm%6O_۹T?|N?tZ+«K~` 774i)oSUuUx n'iǍy#*GX@efBl5oRZ3l]zq\hO<"DL"Kޔd̗`US\LYMzGEi#"q JiUc %2bQ3`1Hև]&%a3 sXH'(19\k;z`ryҒS#SVl~H]EڴeI &(=]Įe@w)YȏL?#-#=*~;J) +Jl#n~ߺCF `nfFqdW*N~z$b}+wF~O<<;wafddd[4cc R|y$Ҵu(@G bQh KtI*0ӎyv8tdWO.KAA :WF}Kܠ^Q|kgesOi{H[7TWMGl=bIL"4e~w16H5”ccު0ZoS&A 9nD+x y?mҽH====&#Ͽ$Ju#,sOsCc.|Kk~E]O5+ 1,%Kt*eo;uKT/DeNQi;;esSXxM=AN _G^&[kԬ>_MJ{[+k-ﺱSt'&!t%sD>1ؤ^ܑ9P^N4PM>hY},%v8?$MP\BKB1Au[dd!kj6 G猯uk(al,c;O~w5{ { { {`]ymhbY`immd9"j-*< 'Q5]o"p2.GpuZ,8QJ)-uPmwHhq.ˆEÂi%\t&gIekz p{ ^ 0uxB+:>| -w@x 4+>Ƙ<ƯSIW.(Od|e,,M6:㭓B⼴JOg|o~U^:1 OYЭ߉j >,kR 6dA%r2J% MVڕʌhhjs2{ FzmC z|L^}tv yAL֨fN֧&K edvN~Oǿ!)24,^Br7Xh!c9~Bo}=k] `ѫX iT6ݽN Y~modW)O "Džgf\)LxJڮɰ$owE'ۅᆞVپW_ Lp^Q:`N'M@aƜ;'4A{0r `0|_ ELvajC{̸BW#y3vOKc7&AR'3蚺ŻIS^_~OO>xC@@@@z@x |2 "ETg[FHUaL8}Q!|gf)aE@7D.i.OAPԏJHK+TʍO=CIy,>x 60fnFQ'yYޜ>͏ 5cicv~{ \LƞwrL_m {U-+Y9-l{<39x%R]__ɩ9y(І0i{p|:vXM)Njg44Q?̋̇` &F  $ L!J 5FJmF0li6Ɖx%@K&,\,'O_Wy C@@@Xg34ĥl&Jrdإ쬜gt_Є{;Kmܾ]as l_NkLvn`~a]{ &ލ9t;3.||1,s /@CGWc Mȏ?exz%Oi<@܂O0 Љ3b bf)0!;7.Q.yQϴ2&0)^C5:L#8V Ўo fQ;p{q?0N@6i49=====f=qp׎x4FZc"aG܃'*UQ&Aq *4R%Av F-t5m`:'6##k2%S4 bhij =(&~v4҉-2dYr=pE=07 }hN0aB#<|?~(}%x5]7Z[N2=^ N+bmS^v1z~kG1y=uP~_g)N<| Ɔ"܉wɻ{mi/ija^Ǒ- "[5Y :pK j1րg072 Wn7 -D騗ijdZ48pT~73crX؀8G\Bx}d 7˶KY=?[y:Vaw{ _`ve<"n+n(!{^~90*3v4^ 1(6-]ɓ2mW䠎9!_zL= +uSY.ǵCǡ<"މ4bx:EqAB QХD#A~_L'i@ɔIm\r,I%jo#;_|ZJr^HI jƮLLz4i>5x;cOw"W|X7hGN؈[s \M@}fUtīl4m5e;clN\#@icڰv :j҂#n2R [^C4tyy%E A-YHrrr)țI->mҏp˥ C@]UUvuwNPY:u>M/`Y#TQr=n\[cLb̹dׅםBcfsҋ,/&زA7,ȻRVr%r1?U6L- D,W4aTJm!I ,xY巻Dc=J=)-7zL\?&G>L;mR^~8"ɓg!/XCE<orF>sT/?-g{e$hk[7o"7߼]v.z!k%tvgS}obmo eMTQZ~*oXlAO;9ے~ZO;7-_?;) l`8'`OܿM~qg\Ұܒz<0f˫dUT[`~zRNÓmj )b 00&܀Of0 VZ&)3Dqq&D(.4%CDՠz2^Bɋvo@[mrÏ}x&?0rCPm@ O@]T5:!'.69ZJ n A_O8\'' ^\n$5zh8s*(4ڟ4H !#Ss+qHrhQ0dFkjIwaq/Ti0`Ҏ-rLhz`NPmģܜPuc)=,^{ߝ$O93[g XxYĎEK-&"R4-dU7_|UBKwAFJDSZЙD4͊g a?hmSȤ^Yz,;~rOV*p:JY| /'}ib$=====pfq[{"?7,_== BRM;o[vJ;ڟvMNGxNfnjyW{;6j[n\ma?n.&ʆ?@PMrh˻c㒍(*u<=Qԏ8:{WƬl}x`ʱ3}Svj 2N#ge|NpčIp߭xF1?ѼE( NIO3.&EW^TvUG:&\S7/@K9/0y_&q\FCch6<̬LLLɻ}h#e'G vXT66ˬy3xl9֢;% G!V~3 $AJ$r?y"ղ$ !alēHKʧkMe,?3 6Z!(oOKSCf;Ӡb ZB LMrlf={4vw@#i_&XFW)dn8"evVME9pz$ enXO>ۏ3pW@sJ~'IfryW5xb:1C [m5٪Wh ~wS^yl2f3b2y*^mb;DEw!=m(09=====ЁM L˝XjZ都A*ϣj;W!QiZ}Muݲ!)͛zS/ɣ>6mn aMO؅OA-|ktnl. ޼CvzŴ5=@ԂZ4_[]ԑ====~]?gL`"Ҍ:sXii̟*Yϵ[=օ~Jp{i:1K8/Qpç|#'bNqOʗS[*:)a~%U҄o9JPc~}kܹ#z<4y|,Fz#? &u0(Ƙ0͗N}!C&m`N޴UfdL_24\oī[d~FI}-ލ7]lk32n^d k`zN]'c38mwil fHۤzluhVPvCn)mҼa>*Ã#կ>+CC#r?N+oy=RruzN<_ߠd~l~ݲdFfTA]IQ~n)y= O[]ǮED\fDG3ry;M\4I4N/^4}2s%P֍ zL 3 "s"7F(gy U#!p@ę~Մۢ#R#U)eFD%ѹlkxO<sdy [L=)Hnl(w>ϙ5޶;Eg6dVʬyIi4NgciM! rS(s M,Gjz}=+8aj246*՘W&\oAHw7.`VMQp7ZW2 '2!O U~Nb1 @.d+eFDɆPTHdxa$;Jb,|16MւSt滿=$'֌޹YR*[O˶P(6Ce`.|nX656ufIejJz1 k?rG{<"^PԙЮ[NpXC ʺ|xWqՠɠX[pI> kqL]xb+)ϯ'Ԑ~U_yUM{_)_#'Ɇx<6IlxZwv}5i~4⸢k9&؉J>nÏFJH;]&R~OG~ XJNa1aa$L3#eOfW3h[mGev" pyl)qLɇYl"Y }ߛ79dddd34]cge[ya/a]*I@pͲ1On<ږ"Nd9PqȌ|BSޔ6MyE>Gg(WD@ްISN&ٓѡ?#d STБt0s~x-@+>؈p"@+YyuPX)0||3y^ϽzuYuF/㘶y2 `-(þ+Գ0p\30]/#dáץgns:&:v@Ϟ'Ɔaz ?OK[1'F_-uMz9waIwpm|hqݞ8o&SG9 x$<C)‡ļG8+6V,id1=rZ&z* bl$>ݧh֖qSr?6x38)=r '^%ֳ؞Xf ־-BÓT4F۠=r~=}k[o7f\6o(O~ kl ]}kG9aBj[GhOӍv{5h]rt@g’2Z@[C -198b|-#s;:GkO`o=Zaܠ~9<^8+G :dvpH|Sҍ=x/޵.~vB*wwxGbrNDAIa试O(`D<;oJf)W*?)FH `yHE7*p&?-?YI7 \xtFܼOJB{;nR'MV*1_։}[ @ yRhݴ%@<sq@Bqp[|.:8qI' QAf;)OXlTe%(MKQHցC\ ާB&S5-'? ܤ9H NLi+z07yl)?Ҭp֤Y!VY'p;l|1oPz1=>1MoR=Sr`OxWON7kfFVpي L9.~ޅXt2Na糘].pSFaA]*%ش&XuD) jI2?)=5)B`iW,+oUZtߠBgok6IǙ'rm;֑:ok|r\Z_Mk+<R[?GRA9n zw P((R>D\ה$hU_Rƨw oJ)68Ҩ*?tC)%.Ɠ(3<2s wbғnw >pIoSĨ`^@aY ~[zlM%<y0yMCoj r~J0*HK٧ʋOWyRvN*8]҃Y๕ײ~TZoٱW@ /wQ8} $mcj*@QvC+Ō6KOզ 7}YS7(ה=f xbSN(o <79ddd={S:<#;%>.ﺣgyhW8CҺ^ڙ1憁M_+W){L%??$|l07=-3X,7ΆFw7_KW\oESQg$Fw)8!'}rlbhCxlR|pکiySU9w؍ 8+xJ^+kHlhn)qRAi !ѐN_6R) j`ٿK'8(sۢtcB>qot{$؇<^i 6r;oe2zLpa3LANQp40qA&HNGTFiS 8׫$ci TL^yq7Uq=I6M"U6GZ8pR;! )FLi= J Sgp*ǁIg"R/;Jdz@aڎyJZ ʀF2e0 5CR kfG(=G:ooZ|RY~ǼJ)/X)yc/ʾ]gdwLIJy<=ƣy@Tv =£ۮy73O򞄻B$݉XǏxӪ t|d/t+U @iB,ufC@8? '9_b2 'MUɸ*hQ@@@Z@gNû7bnZd ~Ilx<>c׶myYLUb"#ã?.=}'LMaMC[n{IT?[)h_~׶|+|15/W#xd>`%\dZCӥ;nh=pAL'2eAA8+/?5c%n;I8X/( DGzv.$ 4/iRQh(1t*.f]$R[f /f*k2j#9;>.?Ü;;0:N !_.;b#LWOyp/ȡ\'HO$ p&^e`u.t &=9C⍮ PMx-ޜ1m23F:9ey`r1wW$^7 zh"Hop=p43ywV2w4w'm}?zD\#6Lzo֫n㘎#C5Lt?Jß5~INKVS;.v,IBDOŃY6p<4%_韔mrc%4:3ek8~<VN:,cG"cN\Tġ2wN6Vd }fFy4'lLe)\T`ܦoc2SB`:U ii@8J5~}8rEsB>U7C?]Βm7N EٽQ g4y!phζ]ANAw)2\㨋|p xO8)? Z8I*"7BbLw+iF/O<NjdDk5ӛ% eӲ8/8M >'bk6O~>U4Խ'XQ06GHz:ooD79Ϋzz+Lde:`nO?>&3R/6Sy 7ꄻ-Ol'mx@CW']C ,~fQg[8.q|`TYO3>iӸ6zB5 ".~,L AS*0op{*2AppF)3&'z̙ΖO_7====vǒwn]ESU{66[7WNOȗ G從[֫ ݺL_Oosahv&@ 6:xnPI~g~B+ua |/f:P2U13ZT҂Ɍe n VcE} LSs Ƙ(`spXg'IKxq/U H3nw\N td4Q?03|N19dddddct 2v]'fix7 ܳ0Ky<.!kdxa1NEBC4&8=0D_$H(hdg(S|dtaTA+-J]*g}3JY9VD7[ڜ3)^`+zoF̅$[2W`])%y{?|Om%cEݵO^-vhzn߇`֥ FS|L}? dθ`$1&Ɵnx|%V? |HS#1;0(? c?(U̗%@6'd &,adQ"OmR&1 cK5([ Ni31CmcW6qk0 PX`#hӽw44v7P־п܁bmf_ $,ohA/');q' !TjyI1z:!ԎJ! gqߑCgV2C5O-4rL5N4Ez {`Z+gw2SLiwTQTL6ej2]qd~B9evYS2,32r==0uaƬ6*jp &Ǟ&ymKL-m+o\/e^5ǫv׋RX ǒ屰+!e4YĔ+Z, 8&{Z̸,5.C^~7K-2 JBR9u[ WrYcڮbqL󥱗#˦%&82 5o<4I@pzpX7IJxE]&6#ؽH +74L@Mr۫7vIuA5"'>偏Y)TvIgddd7l؆3ړVtox)fiE;C\LuWb} Hl4 <ˉa{t`}2LG\vT`v[$'kGFfǸ15<8x3<.hL򀺂 E]CuK zoWh]Ec۬`\k-g=%-k&Mý.3.L5=%Yy)uZoqmS/) BF,lMM4. zrbcEY/~Xޢ\:x)y򣟡@a𷲪-Cp1_eFv\*8hkMq\,0&ηइi k;pzix@O>Ň^FFU/oo(\Vd+qvbmC"uNy/6gy25<Ջ<vʹY7XOFܞYndֺ/:8$`# ** @O@45u xd~fehֺB(fCqe]/rZl\FvAY"(g޹ y.&p5D9j'~22DH7*r7i"o{=Hot:u[JTOz?======Fc.3h>`\$qqՋ1& [ Q(LQ3DN.JnrHe~0n1^mi~Q紤~xӲ2]i,v&e=}29p\i_{I+0NHB|v`3An'˩"oiKZX&1Ԋ%W? s @+ΈƩAukCBM;Cj,&L PGF0 0EqA( i6VnmJ}Nņ0&-]\x1gG?#/޷XO~=}#>Nyor5w>sb>,8V\Z6;_:'zxXg9|az`bLcŠ>cfX\-H^ %d*r^YgzV,uͰx6_/B=:_պ=oI -B:rc4,SʍM͝9{y_Z ;0&u,*h@ YΏ8b:& 7'2Rf:Qq<F0⠦@h>PV-#h&s³ #|v Hut@@@mB??7'ss8%fo1s榍9RsHOc sktc6h]芚jgr=ЄP7*g+z:W9j~ }qٌXsux`H~: }"qDS:0%Qք`Y*33$?;am!.OsMF95>c!d]Hp#ֱvZk;*W6'a꺙`(sq7џ}R娴09?̈*"^W T; 8'nt\OY$RYL<;}eMuWXb,fPA Ib{~WHIIsc^0 xZ3V#cw0d@\Xlwj59v az pd`WS+Q@>)*Ppck]U*s3Uɡ|!B49===:=;_< U EKA{ׄ@sS7ڞkgzD&6͜q?ѧ1mylacijWV*&ZXx7jQe؊x`kC#9dnlxkvim(j(N!u+Y9)]V^a_W[݉>HMƁv@t@0$:W(uu zKQ ޟtcїO1Xp $+MMip*:1ǒ?l|=RN?Rex1̠CS\V sBU ʨ ۶JC 7Ux` ڎ66 K RbOq03j%Mʴ0-ORf(%iه O?iҁ<׬'x PZق4ǔwqЋ6z5y r3r/FR 4CHF:)+=D x}:͍ݑ d'ǰ mf;Y&ʆI k.x(tVI-Y=EFJQ7"S.1yj2 obҫ͔h7\w`jmN˧0ˈ$ 9=====ylOcHӾN)©cW C6F'qUP*6 ``B4yRG<=ޮ آ>UXE]DkډMi޲[5OL $ #D&p^-4tyXoNaF^Fà(=y*Ҡ8,2־xgd ԧ^笮9aC<|QK;Rӊ}cjLxɾ6N 'S<3,X't*yRo45bSŽ`r?-s㓪.aFcMұ,g#nP2!CeAvLӗcDjBWT.T hܼ*mjk_0Nc}WY V7HPWl9ddddn{ޅE^LO- ~|en[&\0;2*}9޾xŶ%\Em/0mΆ͛spP=ݫ͓ ~| r2kblC~.aCy1ꩾ ]Sc2.&+W3v=`}`jvDxlx|^Agepn^vX`g`^imS/_U}BW2$1.&^׮ ɖHO4y7+l#Xe3є󛝆ՊTmA6'<6!rD6IBZ+8FpE]߇uvL/پϼնzZ /?|KW ]5SBoA U$j/9$'4g8)G=CO{b>ђ7Cڅ0 Iutu8kD̂r/?.3N4X@^e5bcaBy9 A*Z@'h; 2=:';TH3oo,AT0Gy^6) V_R{,\>oj*?q@@jMo{fF1j t y#6Diҗ$ms0Xl l?Gmɀ4_O{ȡ~lP:r(nۥ-٩5YO_{O}qxp)=_t)@lX:u#h2)l  N ʡՄ;)w7t1%؞}r'7xq6])MD#c.N 0P=UzjCH,_Tg*mp !;aP]N"zT򐗝E VU6IVI nK5|z:oɉzf\8*Fj*!mC4fp4Zt IUCw]հXF)C0LXKp maH%0yq=p( L 7&+&aDĤe-.?WpYcqSfՐtx,7 k+9>CKw1mCZMFjX&{\E%6X<渘>>B/{}9|9@J71ˠ> pb)UW%.G>!^6LJ (Kq:^i#c?cT&Eʨ4t:47UR~¦H-@-/Ɲ<.5Z/H +h&u5'J9(*xx@ Q@@@*w/j!gÜmiUlj%>鯍!1ޤiZgc6;lN$g\dv˾RN~ǶVmwni?3|kT%Ik8+n%ʍn'@@'@l+RTXq;&xKKSnXSgFNY[.X]#„M q$+&Hh*P"-h,%DeRMRsb#覅.ccW:A+`-'j*۶n6}t{mvўk5}':gdddd4af'iW m$Ԣ,.DX s(-!\#Q `7a_H"BK( {!EE)J 13I|=@Vn,r#*#Rty_DS}t\gqo ->崞Oi \ų/kЯ3>.c@)lu19^#;QԴU7/3.8ǎ7T㉫G]ʛÖlw'qOUxZKψ bAq+-7x ==w7wxT Ï8d̠g0E-Ygr @)iM:ag|HB~sB~G. ӹ>$XMU"+ HK<&1 k@@@{vٴXlP)8>;;M_m{lNfJZkǐ`A=M6'/K{^.cW.mɧG yN~v5`{k^O>zlD&9Na{`={}q#'2lԋ3J هŤo˶wd|߁el07Ntlj&}')a\HJP]e"CF:뤙"ة7t(Bld2O=fb,&%H &*pZtiݴɞz>vH)?l x/pz5*tyEAD@`0nOp)#TtL{*$/'< JI2vԘ_iڢ>!`[Z, FySdGH]`TWs( X]Ha ָ($gK&GmRdKSPf[wJ7,,wax-uOKv9MSa-x`x򟰢1:g2׳46:NyO{4i8>s (6TpZ %N~C _ 8 3.d8cqKraɰ Yj3cqz1Vgե ]'?MfQ ""ŒFiV0ed2_ Y-[):glvXYUB6߲[o#( ՎSLYx芜X`?e?_[Ly)D_~ / -تzmޤx-a`jn/664Aybx2ke֕gnk'n(`:1*OtRs$4 8+ZB݋a *vV} Mc~G s8:>+, T+aPJcў`vF" fX8B +:T8TsL]Gq~esZgYf쐺 ^e+.'Qmz@ m%TУDFW 9ddddd=W%ñ,#f6.HX#ŶC8 !8a/t?V Hb%h"&j8`]gJLH~N%ZʡK` T_3:BTbv8QRiȢBqXڕ-l3yE [A,Ao\eQl/MrJpd)rX{h=Xzu s 7zT[^!rSW:y J L ,:V RcaJqI WF#r2[1Mdd5Dér:{ { {`z?=x-%WpP25Eq~-\æq#r&-`)SƼ`uqwmQ1F<<h~}-[˯蟚?; _Ou]oq=rZf8wk-q2%XCkY&3؀ :X^Y cD:t1>qPtLu /,˅ɝ@bƂ!LE$, +. 0<}E~'n\AH(. -*0?1C@@@@@@ ߈&z<ƖKRCTD,"#4N/ٗY704xtfOyX)^5=B gNBڡa :jB )MrǛrj:w KTK\\&q*c rN:5[?7s<`[-zYlyn jues}&3o6ׂ9חg*w8K_dMx=^˟xq2^ek:[[ow!:kpZ؄)Dڝl8NNuI,z6?>$8e_2/l(M,9h6oƛX uF?7$Vn$yϷ^s ~B Lӡ-5֞hTlE<8, v[<X~ϫ&{^:!kʼk~jgۻƪ:4-v5kZ3K<p%wi:}`vNYU~z٦}6)x6^dJdZdӿ c/4~`4M&4!6B=Ip.eBSnܸ@bE~N-je,M.T!M;,ln`ͧ7Khk}i#<¾84q %X0,D, ƺu3z\lQ;<JKt f{>5z{.axzRhViHgμ1&9د9j4feL0 S^`7vC,N8!rK>jg|RҺX#V&?7u fp{*dzK2*Sbs>::]l\ad/,O\:ҼW˟:uFN~2_Ȥ,1N_+iqcEÔobyR_;YQ>Uu+ay5~˓FCGi'!)=&e@h0H?'¼߄0^|,l2jKWL@@@*wh*X)U/q vQb JJvc).<\M]eX~+Jz` ,!{jzFux-( ~MpzT~>މib}ஸ_MMMWlgp N`(uaKЌ vUshO?EИ*݄БJۜL, a, bq!Y+Rr&+\ &kO5ʦnhZ~Z4 \ x EJJrK|RJ0h":jlׅyoP3' mPt|QND&mڇ<'G\JG|ɘPܳmħK@QWYdIZI.yO[Cq.ӌ-r 3Y7)No4` :,Wax!+cmr`E 㜀ZS46 _з<}!1kmLwݺJq;6'*ծs'x-omT`x":0(_^ͯ֫ʸĮu+/;֎Ksx' ];cGuxxmEn>2{N:<٭!tā8aat$ uwb5zcIC+tJKj~B JU$N"aVf2RuBAA&KMp-4*xFNoa e1u^/!Ԓ2,hiӹ K c,f7Y7;L0kS8{|7`"8&> S^Ȱ| w{tjζ$b1E^oyc@dRv͇ш|?A7NX&\IǛDOw#<,Zn9':IE!J3԰Li}@fߗ -ue'I"Qog7P ;d7i <n8}MpTHԓk;WMs-0Bxʅǒ9#,{<'Qo4` R/,OazX?lm> zٝ ˎ߲ܳtRFWW ??QNX{Hq*&C*UPǓ 2^eyAإKȩTNLxEIdG/LےɪG~s6<Ңܰtu~,&DY 7!n͏X `D`D`D`D`D`y G_BZa[j ӏo^UK`6|a7 $@06e0(-&r 'N$sc8ӨOO UTf=7۷i!yM}ԙ8nYv  ;\|%x s_sAo꺀Xō+e"ʦ֞y\1gs&u,kjy 3 i>48{'a/r~|1I[!cb'DU 1Ę x._(M~OϏƔk&&,s`LdKX5*OKco7@Oyoi,LH*ud}3}g-/ S,x SXt5.S(,`^[t]敉[KW מE~]O>йķ7eoīCǖʙEq |j}+KI_`ǃ 7}pG'߂0? pdyk/\*ˇ/Hxp}C{7$VuQcm@Ѡ$3+-;p#VyQcs47>"%PH$qa.J1$O'!/"ɦ:K_U5TMW3!ҤFP=ATl:5q[8MX)U 8d pdϙ|D`D`D`D`D`Fb ҖbcP^3$Gݩ j諸Yf\|&xfii:H/ SxQ{3H$NrSSSX/sSXb!w`a=Hm[6O28#qu 8_@IDATȵ1uoj- `HytnbM/X1M@Iz.{hGMZxE )7Lry4M:ߊ=CtP}F "bdq. ?3eOj2; x >_Ӹ`DgǓ5}Y'O~'7|ye_{X^Zo"o_3>}vvī`Hhw4 ~"/@ħ` [ /8 { 'h$!qM&Q!e("dC_woR<.]5Fb}\f8AT¡,HM)Jⴋ\u!+R~H܍ 87^`{DFƑt]Ё\h6 t#r 45dҪ@SIu&4.$}/vj%qD2 Q(nV5 (6c)soc,#c{M6<q/>ztv&CiIݤY5 L\8kW'W;o ҹSkDln2FhL9h[%f<;ތF^sqIcx &=mf>@x»]ś36PQ*[=b;<džGqϗ ~ !v$xO6M7 ;U_-bҔ?>eMjXDb.y(CcjVFsPg$B0x myHQk1G2QiD`D`D`DpEرw~TjڟOS̕p{Я"pgg,+{>||kV57+˼.I?a~~=9Z([C:h /_,/۸PjS~6d88W Ya_ՙ󝺸5q)ded~IDy$ҥnN  pe=SyotOa ⡭'3ȃcW4Cd,|C/?W1҈MD7BgخוkreH^&0NIfϢ3Of🨉8L9}Vfh-l)eUu#tkOΩEtVr/&cƴfWudqnSjkNdC7LeoX?9rȘM[]N^ y~C]|;xx+^1o&@Гguϕe#+b{%}#t> .{PKev]k}Yprn~ٗ쀤 = ‘RErU5|(fХQ>{a W%-8o7Ku5nO2.47,6we///F.կywO|eb(\Õ;W+> C,o,*> t|ׇ_*xax7L@nop_W7.ćsz7܋p?Z^G<ǏHXѩX'Dǵ`>ca+2>EKKA=NDD GIN>+)nW/[ɠ!I| ;AS(POz'Ұ ؏TNyt/9Cfyȏ7L$>s}o9Mr8r[?XK-eaed{/: ̱xRvSγV G*R ^!$#I&ca*ӯAxbE>me3 W>S xa騴&6- Osc,Eg###.'N+|+~|>27ZCvH&&O?/_ٟ'e;x.ڝF0 -SmǧxC?j=x5\V|}@e7o^ASB,cDI&N0&>mUW?܏tZ㝈7{k<pɛde7e3.򰓂r.yW@Č}/&U28-േ+f\I0 2 [o#####7{s-^tuAE#4)5`Wz2'!^䪢@D*:BiLf>-5st `r*싹Uj'%=09Ric֕Lk]sϱiuLԟ'^E״"5el5.S6> 6i\>;sx*d,@ mV˫1v$ŦkxXch*m`0:pdeI},recNţMc;ec 'F5kcNb_Nqiҥe36r&Lyl//>KE"0NoiD DS w|c'˱#x }^ūP\o FtH6P,kit~`Vs?e (y qxpA&bΰbp2z|4(i7:d\%{_?@䥠J>OX~QY(tkx*_̄ l+iI=zk߱L#k#[p}e'vEn CгO,&2SuZ廑ES1Ǩn6fEvCR @b<*? uf?_|HR:2$4{_S֡sSKkI.7?'4]'5R/r\)YVٜrX|pXZ,_Z>^>\V"4oY]p3ocY1N"q|[[snlXEzKX#['^.]Ga̧:Ͻm~Qϛ a$ӈaaXek:&qC⭽ZxryG>XVYj6"1%ScFj- ExRlإ 4j> Yn>n@I$6_e'pYc GFFeʟ?~vO&^ _ e-|zaV~̦~_(is֜,Diq|F8:V~g7=ѭcgRC/J|+ T;tw-,w69+1N ̎l"~ff+||G?XVV< 2 I^p@%I-iXjI-G&@h#Aq6Ku_)gʻM^1!|qlJC6+Fc7z8{(1/u6CNJ<$Q*}=TRɇL6A#9ľl a )ugyL#y6OaҦ)Mٯm ƙ}J1Ϟ/cac=X(ulT&Hją\$D#l /-8a٬_8^>}mI{eg[ d|spl)LYvTj#n~!k.7Ggu3^O_!$m_!Wi^[/{ ]ý [e#:wIG5Ag|?> ^X-em˃VW~tO?=^z  S s+p' !GN(0s49hK=ʫ>\NRD-3ZDg"7S7u2x2҈-F7> klTb^uUanۘ᡾O(K^L= }R2 ">ā}.n^&Kysϫ4LGv;tvV&9i/ytb#͆2-7-dLU+!ɗ=Gp󘘼T.[KJ΍쵅r~}6|]z|̍kyNϛBE|>*sarxvTžBo_Z:R/2H9=gOw_N>ASf3f>Q~%϶Gیfئ'uMH=`mX [Q!`mOTB[[OfÖ,؈r0a&ooJs/^8_ΟP\V]].X(Sn{_ߥQ.o/^]+_m_,[ɝɣ,z'|u__O_7Zmm^Za~B E:ߚ~K#pD`Fկ{X - K>?]qWmeF<:g^ZcdNUqB"'y4|VgȻRru^}L|dyen |ŨݣlNFp>ϽJ/P99G\͓rcA;Y>ߺӾU{Q^Ө j^M|Ґ.0dxL YHu%@X(ƤGŀFFFFFn)'``<][}4RW:tΫxX,˳z{JK~D'ӬՀ!2.:=P+RʚFO['=s& j~ry.\/f˸ƛҍw[wc]K:aD55)eM|R*LdxgB,lݯGcxIʣKG8Kpvz6lFg3q4 'ߋ奃Dž OcNe8CTE}Ru z <lM 7h1/* "Yy0Ûɴ;?U=KJc84r3dst?؞򙧾6~C赜N3Qa!nF~8VE P<UvS~04&Dq1M#aJv'N9Bc?"0"0"pwFܹ3n)bs kϝ=^XЍ;Oݙhroz`73,܃Ou}oJ7쟶i|.nF`,Ε?S3}NY ~a|SgWw3N<^ZSx=9JnRtAmVy!fNiLzd$F>"0"0"0"0"p8~GqӺOZKųN}jh?IJ\D|{@JD,, X<+.:)^krO6KH,87"М7syIv N{ӕĸE .01[FnlV*8lEuAsѩl?jٚ\6*GOIT:{ډriufqxWcT|ntizړnYk1m^ChW q|φ>zÜ!{6o9πfXC5F^:5A}p[ `ku<>J`g1xLK/eHôڜ=Lp:Fl[VY.CN10@~/.ުB9Ƚt-~ocvڈ(|7 6._)? T鉓ƨQcHgWyaPI$S2BdyIy(MHƒ∡!$cﲕf0Lr)/a\¨OIP$Ӛ8xB淟-}9okH9'qGϞFn/7|ٜȳ6?µzwMXM@suGU7y6r:xy9,WR[MKvܭ5o4@M;`ctAs"aڌQi<$S奌s OU8  *ۑwc|ULhSe+ꠉΉ%XTAp:}_W呏Bp1_ku/{(0^GvLN_m* fyOaĕyB p7T*lX 5, u}Ie_J6RC?eEԝ jIBHy<xGŏ]ݞ_.K_*sŌr+kK峗]v{kt #Z^7M"-FK9֑0`/͠ ֶĥՀOzRI*ξv۔&Mr x&?1* ~Actuç3槯5W?Ų ]UooS3H' 9 _ШY)cYHh_@d,lI2%5_e?pNaCKUXsxɂL=mm1nЈV>;{G8,wiw߃twikx|Rޏ/S#vҧ'g?' 6Cx1r^j<~q47߸xl^FӞD'9 k`Շ_1'E'b꤄ALA̐SGPEbK{HiPg&>(Q8yrC$NJWm9}x8ܱ&Kq+gav 5_}-'\U@3֣'"ԑ9ʞ%ZH+QnW<ɲ,K^=vi yFN-w9Rnm̔VPX#l3f"GY V3Q3T 3-5>jY? ^,X||b)ϯR~"hrE$;_&xl74knhd>lȩͤv&ceb_8oZ)u)X7$z N[ nx5c_/4O-Vk>GP3G'Ynq%*d,$ڡoRB=QdjvUOv)HqTl`DȐAq`,L}N jY }%ڒt1f`G`t}s <:ýEk.~#uQ_-s ߻?¶wVr7c$^ {F.@^Shyj +Zu`:vZK!r?OHx1d?pt!M乙sìPA&7HSűĔ]F`NPXblkJE8lI2ǝ>3n|D`D`D`D`DEO@TW.m]ߨY,*ɵ2HUG:YqJY5)e1(ZG4e̻4qh,JhHQ/uIRft [Oɕr2a}Bɮ$-Q-BJB[w_,ycǪ\T_7R6^@{C퍹˯](6VZ=&ۉm0]ojev<F5F|/m4?iz2%OSαpN̍w7DxF6"0"0"0"0"p#=;0}3x`af4xV2/g˻Y8A/\>ˉ9:;Cq+Ƿ&4``\'EP&,7WQN >P@<)N7b⢘2B_De= sw2'|,}BvZώNF1SƟk1g sb'u H######o G.ѩN>늝ȣ+R"."C>U%Py 4ef^:K|.x"/r D0?37] Wk)y,GۂVER!PgMD+I mSp!( j45{l-\H7CX>+em{sʁ(e\r soaZwc RE|<;ĩ̅~&x_8,uU=eLI,Ƀ'F`m[kZ K_~#α0cěn6rAu,:=!h,HwVHND%̢2Nꇯ 3wm X[XjK*S7SgOlRrp䒡8ZEt#wpƀT7+ c7"0"0"0"0"pG# /kD_L?O% G1-!'6ɓߢfT|*2&xҞ[8dȇcr8}Ogד2+??W]^Ӹg䥬 ڍİͼe85S-'nWud9E\g^ۜCH1ҕ%;-Me'rl9~긴&>x,_k[<97JrQ9:3a4Sb@mQÁQo_))NS[B'LL¬"۪QR8 q".D2:TCE"S8v#####w87bpշ8 =t5TQ/G` ע?`NM=|uxB.t%n;mɆn|e_\W-LM跍 < b X AfaUfX:qd9P2߲le3Lw,t3)K9NM^bCـ|fX6/sjwj{PO^i t Vc<݈{#,Gvx iQ|[;Qל)~:Օ=~ėkpg+k=th-&;vyO B{VnNޠy%&q&ӃgPiԦ'i36)@ӴxX?cHZ8N&3kuH h8'I(5oxl-^Mt,(PR䫫ey~;ZuvsZq8%sG1*x'eL3/WOܜ] 6BqH eѧlnw9P#37'gJ2E!5o];`rOz|<=\Uara%A#{U+J(.vEX6gX RAgX}dSn  .vž\n'>i,gjz*dUElCy2fHyMfh*& N@ҙ#e+e ,Pvuhċ+e.0x1-1dy\r\N8M?S>9 "x} jg@FI< ɟ@+qn, cn,L#s fzI s҆KZtihxۙglKETɔ8] oCґ:Il.E vO؎<mP6D9zt,-~Gc7"0"0"0"0"0"p"H(\9puD D7?p0> ?6PKk[|~N7`@k T{;M3չ Xs`N|kͫ?嘧,sMذ%bVS&~YDLA 9'v*"W*O yG/=}搵cA5?Y]}GqP[9|i0݈OZy1ϔ|zXd$CU3ÀTyNJ/DNqg=I )LKDXJNA'H'yQ*{OIO\It3U#b֟vgU HYރwJ|S}ԚG%qΡC֕{}CNGYu[/ `)}tXˋyԑC] ?g@w/,+j дǻO+*V+'07HC ry7oc-nSoG~8Y| CAkz:?GS ,ag&o$Xe :D*}j -Gҡpe\PհNE_*î=b:1t~%,^sDCȼ!KMIg&Pgnca;qyL +c7"0"0"0"0""H,z*6NN.)ɓ;s}FFFFFFz.ԾZEֲ_P}}n%%`FR\o sF[,;@O[BˍNY/S)fuh1?Qtwfnnȃe}B>mm lx2M Ok+Taw d٬,Kv*/<Ω eV+Qq蔾CìBw-` Vy-oo! $UӘM|O t mɸޡ;,G:``݇F"䩅};S'y1Zķ񗯔_A]#:hbnQboA6'3(' 3Qv3"PPxd*&M,n'(+t%|Bj=#>,S ewڛHƤ"3v#####o~=`gBKL>Gxh *?l[u4$˓;o:[c|T+oO"ia2 Q#DiKZP_Љv8vo8_6-.,O>P>qlKX?m'qE(oc $ڙm]l83 yDޟR>F?7,|?{z ndBzB/`N`z@IDAT7TT[׮AS ZV dQxx8| > WۡuAL8Jqr(oEHD]S1#YLc,0݈7s,>H#wYx|򾳻tu}O_,?WW1˚:sOs{"MEzsͺ6&o FtNH,u'̞{rѳ rMܠe*!6(>8~Q~'ҕNJ)@Tn~ 3Hc##179K9/x嵻]kV׾Ee|f}L@Ry3'}dB%~%Ix_ICLG S}ErJ LFs 3S.Yj<`՚)4Bv|Ҁ"WcK{˂{ZOpT*R4pwD) keċek p fW.-{n|x?Z.>&X}Ƈ8F(>gs>~LSE)?pt3^@!O{Ie1Y?o^}nԣtФ[ G <1q\^1N?/a=ZHop,7iFiJpnߏ+L `Ҫ\gi-t$RKdҎF&!0 h3&ÔuΧA^҂dI3ÖVr)c7"0"0"0"0"0"pg#=;İ~`>tpG[#pviwSػOsK|ԥg>JO_.vÔ0Zan._辔oʹP`ގ{=ݴͅjg]S6=Ht%.K GNE1l1c>cƱ!=uR/K$n$uST).Z} a|KiQ6$O؊N(* gا u(&9ZW:ЁG{*ZX6ߢDKnNj!%RwYZ"ʵgg/nqq<=R.DZc4Mr6of fDL7G('6z“[vm-Iƒmϱ(#Kڱ #zLm~g4Z[)sX}6N͌9<% ;%Derqb:ݠ3bt`NJ>8ج[T+&+.[b~ș^@Ig؝7d(F>"0"0"0"0"pG#H<S#-?ϖ1 lG`埿\>g_\^Rs_kzy=;vXyGnΦ-pПCj5Optn%b2!2tj#IBA?`xHz)-]M.a֋y&x)fzIJ>Y>}ԚBځ\ @i9+o@ƛRۓ&Fڷ.0O]CWeKdD͎62N[hz^>9&mH&l+ayy0K^# b!p$_h=CjL/`LS@RT/O$å6Nf~&ʫp[΍O0J#t [O`iD`DEmO,߅7 1ࠤCƷ"ޏo;s7h9]uNyxN͕4XRi7th#SNrxs OS 2i9d^NRHz|֋|>Ȁͻvӆc?]8"0"0"0"0"VFO|P9:DgaNBҢܣ5w8DgD䛖u湑+W5A˻S4g)1uf}_.r b]&LɅa80T>$S$yxʐ::ZNILfxW.]\,{Hy,^'qx;ϸ2vQ6`r.O?ԐF׮sxuMY{Xu;-`/wCȝo8-9VD'{X-=5zW]Lq#{:2' 44& Z^I{{uu7)9+r@OsM=]bkң R'[єIHş^u']i0G^*$gkl\&,wӉs3o8{xa FFFFFnGخM#oԋn%vRJ0s"2UW.=Hb( w7аN,څ.K4j2l\ru4-D߈KZҳ?8* NW\Ϭ ^8e H#<t}`YVY9RrLAlks,Q7gVʫr֑Ǯl~>瘁H.|?ѺvD-~7Tn)JLΓƀGLJ Ĕe"Wt zQ-R6n(xCg|kN N2"^ 4KGΗ_H1$W{39jc?"0"0"0"0""0n&۽-oR49X~gVe^im_)3ˋf2z=J>Շpj>ĉ Oztq<x!8`?dR]Ó{Vm5}`=^k5'0vDDF 6IYNiz@08f!}NDWD/3UتV &59)!Բd^Ƀ@yS'5$nU<~NS9ox3IaGFFFFnkΔMf'\#VŃ~|-CY_O.#f_6Ȝ}YE@'f}K_~r_6:ocﺌ{Z*;ェyVfl0  2L$!E0 1VfV $+^$d`!ԃf-[]U5We߷9UWys>sϽݯ1D o6pv>b" /Hk4a3>p^6k lb9~n?rƄD&>qk#3˜|U9q7ۺ\;LNٷI8v?}PoS `82[~b.f0)VlSk"qUuE\oPE+h/+{t܎n4l Bs ť2{r IaccmO ڹ>8Kf 3 d2"Af@&,em3ݏ.eGJN|HsGzP8 'ۘzX#u>8ᖈ |r,:WӶX@ gh:,` 2z. {j ,dG;jRtc6R5SQLb؃1W"V80ZsLZn2@f 3p3m `eX)}.0FRl`yE#1I[ Uؐ6};&xD4K/۰+e9U Jƣj(hА`(G-mj[ac yg;+?\s:m< 8?]x ÿl*AEhcGC|{ xz+>sr|SG˂_r~1xY9}g6r[/` H>^VOz>=xe#E>5^?N8>(phXCw-۷1ocLk k\xKv>Bԃi~Ty–.*?am؆8(zfoMhG^ fcV4ic"= G=,y.; d獳/"Ol.}Y)&E Xj%DK=s\?C]e3enϙn6o F͑Wfϭç0:W{q`(ϣXF#EjF68X8Dq\cѰO߲_R>iK>Emkl)q聍D<z A;qJgUK/+v<HK/no!b!60<6 K-.Q&0匛TBM05pp^c7C/> ԆM?";~bυWnFf 3 dy0c̲2z`:wۭ9wݷ|û[&?IϗyH/)qub;;rx\^.lR2 t\Dsyq~Þ3 \/? 0 騟j:a$4XP Ljc 3x8H fQ@^CUȆ^w8t!6  k ?Jիng/0H<д=(99M:~Έ*܉!Z<°<^ 〲6܇|=⩶NO%΅M8K?y:3 d2>y+pOԮ$ l <{syϛ7߱L6JO|HfJ& Uqv;gڣĄm{>BOkY;n/0D4Ƃts87|˿X@{sA}Ƣ^>Vb1G<\(pI,d3+lЀ/vM6]ۇY2@f 36'z7r^Ѕ4e,l Xk BaU\ Of8L8 eTpb#3QiHGym̭գnBR*nvhI#NrU@*nܖ2#`Q tn \W}G>Gy;+.Ϭݳea/L%@*;9]>UYsž8^M5Fi8} TZ˜G;pZȭ 8]*ǑW8c?!l6}&`Õz.l{8sWO= CYadlJh/{#X-F`!-{FܐS+|-,.3~FN)P1;n?"OSmf. d25΀l 2:Wf>m+ylOo->3; /Qy?AD~c;Gڱ_v~'\a0ssI5xAE[Bjq@ 0`]w BgNyGV( 9ZCF!*=sX n1:V5 d2{Mu>PdtA4b`}cfdoyQ\ vU;ǠZcj"yv.3h>ίeOxh?j .3?1i`’ #M￯Lm'b&;VюRCJ2g7}|2yoTg}ǿqwp`!{ڨ 3 B8΃BO&e=-Uνv]+&ᇋ h12`$@f 3 \ `ef`M6XsX+[g?{w3?2?|b#^8S^o@.j˕ﱍC-%s&'& ysYדX.PZmF\KMqFfݚsqvq>|, <:"MoCoxsfO¨.0 eЍ S d2.oG>9?{hb66tJ&`$lx`ksC̓| >V D2 s~ VBD:^lï$cQ!jH@%ɺ@J hոß"¶6CI X/Zg#\{#kПZ>ARCy [ձ(8ȥ3t1 MŽ2ol`aNSg M_.'yrti XBǸ8~Le'}\7if vc&b}`yC)LM7u#pd/oڼv]f Џ>/{hiƍ)k\oJaþj+7N]c0Z7 td3 Ipd5Mmp_nO υII=\ uքkG\ áCFyHF d2ky^@>7o |[ʏ?|{脲d? [/.?;GϯqJ4qwLZ޼; 6Pkʎz"j-B)=zsla kr4e}\XH@ ~ ?p DP&Q[̇*n#e5GiSjb<;ap#ٿ?zvR d2"MT$cw$LNThrqUj\G$(xO_ eh9 9g=fF xyćg-N\p'juctc>@oz^Oڹp}yAf̓}?uE~r G,Jh qAxڃ[",0QjBl |:Z;c>x@Z7Q"2@f 3pU20e#sBLG]rذ]!N͘]85]PT >9L+js:B'mѐTK[  :~YL"LDN&jF}+Ph>i(iuQG `((iAhp1 \G`_4 *\ <q/ěv_qcVl81A1&Y`Kۗʼ= `i}ӿ?sᎎ.O~_.v]f3ȡRwG~#v+چ8#Q]=pz\96.9݈ <8hMήr56 ~;F͢}6V}&n>CR2{?Lo"Wo*8ef2@ffC6 >4G19mMB D0.yu(;hJYAr܅SLrԨzkb:Ո\ₐ_h9.3%e7aN'\0X#[{$.)hrUj":T!kǢM' 10ێu@ٖX h"ѿioK;r'ïX/8ҍ"3 d2+lorm"b21#p*h4s̑PLrE7a6 j.@S0!TֶmRHo[ >s۰T5fUaפ]U>#/Eh6&qא[VLċAa6>V5> 1-;>I :~<΅țṫr d2up2+[)>R.73pCf֙o]eS_{d:V>z~e,;=TvyiF 8yhs!~,?'ATTkAmd?hh?BGh3Ǝ#|BDc ¬c#6:}!4 ZW5FR d2v!lMb?>G< M ܨO/۽1L 0aPFk̈ța&'@F6C>WA#[=Y׬zq[ %@6fG0"f: [ []ܫ;-xT_ mAMhi!3ojHkP5 P: a"\?߅fEʏ|O4c<⿘nb 61ݙswC6k8u&"il ?Gkc5``r6pPh3L>Ǯ=Fۻ{>"CQBx8xsʢ k+okR>r4ZezXoGϖӸnv+1mbچ]\m\X ~]I.v l8hی7mI `(@g' EV/xs}Nŏ<>~Ma/іAa䁁V>}UV.0Z!(7`MG$9q.|`/hd2@f 3N27q58`O~52>n 4U뾝{]4e;AݞQ@IDATWV~~`oD.ee/ب?Es'~b.PVOJ!hkXK[Ju,X|@Eبm +Q|BZmPdhz4۶aJ`i@r39왳ex?=.+oi d2+MK E !$֭VXkfm8wԐiy KuXC2pPIu׆i/ao#m0vX6l#7GBjdק_6&JVFnÃyfЕСrQ_BDȿw:nJ`:JU78]AH1UEQȎ G%c[&d<2(v+e楲#eT)|+rBwΘ5&)5Q\a[ W)' FGvebUsl*/ O l*? xrlw Xs8$*>#⸀lRY_d1c'2I9)7,$5uę lhn=V fO3`BZ2WA^e 5tm4GKa;>0V:j9Ik N K\8@un綨8ݚ:kG50O9t 0(cŋ0-Y`$[cjzļbKNs{CƊ?Orթӥ zU]aZe2@f 3pE2+B/&2Ma{77 g={YVD>zw^vݵ&z8qH ÷pr%cђX/`4@_(Cq?Z)tVw@x{&FjGn&a/=g*3 d2uIo1 \Z嫵c\JkJ>ǡIB9'm( Nշ{ ҁa5d6=rV4iHMas%.X`lujN+|.'-ŝH]\{4#ٯ٨".4{L̓xq!N[潭,m[)OpkO~OQc5]ڹEx_/T_x^d 8y nʚm] ϕ?.q9P渻ޠ?=G־'@ ]k!xtd ]@[>8? c& |~O ^`1QZ5#W%)?~R\v|e-M2@f 3p27\ԮOiݾ>NF3pǶG|׎eOF?8r#?wv#Vٽ <_ ĉ gX|:cYq..p|=j"|t&E߰ ^>9/YC "`7 2˿  Oa) E$~@(ç2`vӰrD<<,@f 3X7+fW\@̔1 znSfNҾ֭[{Ӫy8\gNw?ufw  /wuvh̻hd(`Q lL],!WX&k324ID$0^vos8*bT нC&旑ͮ@Mꇝ" /=Q"@ li6!WDDq.͖ɘ;76kD|MQǟxÐf#m\/݇뾡LݗO=J{]7ϾϠ0Vt@a"OX^EۊA<l)n `Fu轆|Gp. 6Nѹ[>;Ւ9yK4Ãk-0A:6jv?d|al}DGߨ`pLB.D!ʇ!h =~W#Sǹ08'x d2fYn `~wnȒ \ x烻Uvo^g>t~8Żnoƽ@ 1by#R`fC m[7Z`/Sm߹vڨyFX^&Y>ldžJzkcǑFυBRTJ}W멟>$5 #>رf,Z\? ͛>*k62@f 3vX Ե)=_ 첟Up.2AS7-_~V^(g~2ϓN͋;o( iHe Fv=Qœ.o0 GrXEᆪ|792&3 d2u):2n@9ۜ&J$df o32!-9PuWyd⒳@N:h]X ZBHXކ^-ZY7ِG1-羝TF0(j2.V.vNmЅɆIv= a 5Ћ; `_ .qw-h@f 3 dt&L_/88wf}qx63U]n]K+?{s7^ Fy[}JN/P'y]H0X<J؄=dXl׸01cqU.qqGf^;Ih~ɲ8;KK|;q_]3@f 3|M k?Q3@⬲h\% )wW7p8LMVl S*+= D2>x.| @1u2·7mTa7g d2@f`e oXFtK>;e&e`}ӡmGSީ8q̳>[~ўx.-ZxOYXz⏇xE-4@ Dqq]0ІpuiK ړ}:WT"FأİGXXv2K`l d2:C+9 / YJ>Et-ۤ\X2, pZ)$̡UۃuaKvF;F9fHQC>W }`14]:PĨGV*S$(ul9إ\i u_AW%aܑJE|؃ =^ 3/oh2~j+3ovsʹ?X9wm\}3z8fr->޹*'ƨ pݬ No5Σ[Ǣ o,^!q#A .ξܰRυK+#u hPs} Cnҗ)8WJw v+++z~ȞOۓA >ܱeo~9vS@f 3 n\݀pƾ0vu32[͔|-w(['LKg/؍8? yZ޺}kVSoN/e Kƃ4`NxڲŦ" M[' /xs[C3Q @}ԡfu1DK$Ӑtv# #8O;/.o|o@f 3 dIlpCks5mf40Aj6m^73 &Hg6&b* .΃:,)&4P7[lV;?:oCkN!i؃$DŽ24*&Ce=UFU…,hn݈űDD͡x P~A:kswrj[͸GGYu&ɀŋFɅDïUkJ?3xdG9Gˑ] ZNJDvd\mr+|V ]9 NF^(0%V=-J t6>!݀ # + xC8zC uwҞ{<c7ENķrR;Ϩ>C^Lk@bbldQX(?78 2_ XZ!7l A-!yGI`&'u>@B?ɟiq &G޾[ $p6KJ4V-_}[$/ ,߁!d d2@fz&8":G塌zqwjqg󀷳 ]l.ا0 m@椵J71Gc/lQ۫;Ws>QK-$ڃM ╭AMWcqG|]N:j(BPaNn{б}M4YgWFGo[x3$ځ>qxwر$D*W^wUT;npr,l6 HEKv,:hTxa .]= %Lhn'{Z(\أOw H| ; Q|hǴ۟sa0ZnպY2@f 3X_|Ui}Ř\ l x+¤ g`wݿބt$n'^+c_~~OQ7/NWk4#vhq 0X ǿ.dA=\&@C,hkq|8aA\T{2v[Ԉ@ i%nJxGkqKÏ;ˮ;ȕ@f 3 d lyyjiFmhpc \_H2]Syt2gυ'MqzCq%KMfC0 t{+:(G [I|SB ,a~W\F36hxX`zQGn.uM @JJuŀ!Yj&PSgH}B0o7oNMܶggʩW|?y o ?RZQ`uFGcVvo-`A%\D yff߁i֖<oJ˖q&L3{8ڟ@3 = L}1ùڂaD-t}*wy[y׻vQe2@f 3pdই-HZ5 zxoNGsN?aFӱ׷ C;RaoHx=B57{7眢 \[&kHzSO2B4vL7I:ڎhA=0w#)qxF[ۑ}qZ>#ءULAɁEPHU]3/myJfNiH p?|u nam_Ň Jcp i|7q;"η{??''ߗh`؀kٹrjo>ċ3BϹQB ?7,\ǛsQmE rlsf9S0j "Nj(.XLر킸Tf2@f 3p33kHFp3ozVg> 3n{vo.o_lH5;1W+gNsK8ۻ1xvnxþddM}+<؃S d2:@bQHʵN0nd2/mmOs{yo`Y\wYNΕ_WOE}eqS.zA8gA-6aDž-PI]ddLl7 O\2Hmm14$ѬOb{( {dP /|6LWm~sϡU1 d2@f:1s'0*&\@tڌ:&tbݰ9&3gmF \4ɂ f_}\tYp*|Sg>_Wʷ}S,@f 3 y\"c@f`e`GYh)xtOwl/3~Ϟ_*S'˧ϵGݭEtza@9wejʧD6{};FeN, p-ٗy  dA|3џc`5C_9nJ=շb%;~U˞Yg2@f 320etFB_FՋ~y u F(+A&5 ΘɅEn<0tU*i&6~pM6aٰ[!6Usگj 8>.pƈ`5Y-V1@=ت ۑ@;"@Ey l'a7Бgeˊ_xXY԰Ԏrn8n^'}@>AT Qv#P3Λ ċ /huv4?|幗'ک[K ¢+$E{E&Ɓꢸ)Bܾ S s|u\6 a`xbA~=|\8`Ks/;lf2@f 337=t ;*!@f2nˌ4~աmv#;~~|^٧O3K\"6L#|I;$'Vʩ ;BNYMCz-^@oX-,V|AqCUemSR,L0`:3aO =~̞{o+o"U@f 3 \|AF_md"یu|"0 ;#̃xD*ơQ4Zkmb>ٹ2LCI>. .zU>E48y2 #Z4PO;`բS-jW杜O{>*6*rSe#-"F(=yUP +Z&.dރQWۦMeֺKs]ݺ2rn-O)x {= ЪbA1;l ԕc>xGͨ\S/?x0D+ރeif7?Ki$^}%M ]T|W;O oW3ox QDsGkt9gGȚd2@f 3~30~Ȯb-cx@f`}e?nM_m? prGg:Z~ճ W޿T'؊mPrwf_.z +( |37?i!<@&Ux%Q`?`@tgK֤4KP ޞ &``\x[oܱs#ܙgq; OޜO@Z ZX%ro2 ^LeZm6+k_#/V?RzlM0W:U q,ThG! ˡt#1yсeFUU֝ɢjkk ުВ/놊ڋV5ynjqy.5-`|v5KX$lYrP7x =1*3VKi.ZBض98Z*#} 0k;S[t},fħ},3&_<#[v-~jKy_N?x@~闡 -B7[qmP=i),:rx&J{{?wvWbAozS|hwX4yg h~>{<-L3ykG|oAh5υiఁabTtk}HRc2<;˟+x:_xſcv~"'Kf 3 d2]n]p^v@f`}e`^\ [g[߸|͝;|h^<5_~ïSR*^:V=SD'6x7Or-޳GqB[̝^ZRBвgzCX\`M9PQ|Dȿ۳iz.@Lx2}KSl0`Gn>0IyE*s d2;[4b&&Pm+K(60np:KTk@A9.ZϪ2j`%lilVqAvoV!~pxhFvlZF"HȆc6 rQXD U/l]zzŋQۥ89 j5g$ڛVfC&/#sϔso8Yf+t\'djnr'K0vs;=7Q8E5uGvYO^[\)?+fg~ko-r뎵恓?y\Y@![(?/.u(F ;rv2{,`1 H^PCB-`]-H>a@: z>@+rOPBoMMd[/?,_d Nf 3 d27H6cg9F:\#sR\dVZ ;΅1Ŭs 4sF[eQC+Zaߤ%?}?V#V?13v5/w>}d1E鈠nNR4yOj;P":FS#Z{r~Nࢷ8%Y=+xUd&=18gdkGﶅre~Ϝv"'`Zt9owl/ ec.ls%waՏBѰ/]ȭ[Њ=Shaky!3u?4TD}}ZKoX\2m8!/nkxwIW iӦw|w^c#ff 3 d201]eH:moB5t2p˶v +n= O.hO ח .œ3Y1۸y&'nk} D<}X7_*(B.4`!VR7?H;zcrZ y[Tj ;DM&vFhz;0a%ZIj26LÜ 2oٖޱ<6"B TwRpFfyGYm) +,x-DУ2ku(W N7e#< @$S-9Av0d".7yD{|/r'e>G[fFެO ?Hű;<5m[4zUi:q.l+ޯml 7j7<΅; `;ʯ*o Q֙@f 3 \w5B?`Oȃjd:}d.>+L0+m+on޾@ʙ9QNvSz/?[)yOryR8k$W8=~۽nS}W$i !2u<_D^ME-^ɱT>& A݊@t:(EFAsϳ{?O/;vlH֙@f 3 ܨ9`OM!4GjLh 6A/#GOOht 5D9#f` L\lnǞrΞw#E f9LG~NR>=aVwl¸ןlzJsIWfZ Uo2~t A!B9ٗMo}˛ly+S62@f 33pqW1fG&זp6%3X/53]N-.mg;&\ya.`N#Gϕo<|;z|6,fHǹ.NU0h:Ul+*497ti~!y)`E XƢ3x8٦arEǟKZ_ƃTUO>z 7O־xKٵ+YΒ L@IDAT d2̀gOXz:X0=+AI 0:\ihZG#&tGfhJs3A@4#, -y\B^v$=,}P`E>Ջ0{\ #xVtHD4DtNfFagF+u@N4>aBazmj[搄ڌF.nsG5&Y?Wpyי*)2|zMe[R]ĝ·Arйime<30vxʎO󿻩̽al{|ljU\ma%QYc6YgC{U8ɋgϕ_ W @ ひԟ ?o,GL)y}v0!º# m8E~s̡þJdPNa1 `/|.(1Rm/T{w]vn2@f 38/wo}yFd dO]]{l36;|xwu]Ο N~Xȩw_}'~Z~\r}~<5rY,."-`5Ed%I+ @*  D{6(1~ilmE8;lqt63 d2>;Kل"XyGw0T8XIuCD&403{֌ )ͩ%$-9,I"`M$媡=i- V>6(k߾Eo*[\AmW 7c D[vC4՚y"W Sax(=~/Z\&M!C p^z7<֝+F{=%t{z_{g%=x{/VOՐ/0UL94,!?6MVf8gY'6.'lgK:?G\fn]q߱h -wm7 J,]}Y2I 7_e  w %]Dy舥f{oFf 3 d2mO嚀tirAәz3-NnIb s*N|LcƋA `UC' @ΎobnGN8{i˜ȫIq w"' TCӁm@hãt!c өCcML-e= H_e]9<6m[?(^ia.s._Miup:&d}R',HC @p1{%|5:69B9cWGc㷌! P&^}t N9yN`(RqCE~3N}U5vt{clJh2p"ެ|Oi^颞^J{Ѿ2T`hZZZZZl{k~Ӝlm[>b|pk/N/Ǹ 7?̭k6ͧ3̌dtB1jk_Jo}'ґSw+&{$&O:>t+QS >2% ^֙xϕ`6C956XڵEEEEEE#Ї>_~gVk]wb)JI e}#kݩ"/˸n-W$KیlS(Yg{eu59}aí,[~:1LC"*&Y뱲IX#?{o~,ڞ8:^zp:uD u_. MtܧaHbah*#F,bL0U2#5!EU&`s.%!Cݖn:E /4aO`b[UQTm,li{Ѵwxlh`Gs/0ooQ7Wq+N箧ΣiFC&54ic*&ЄGa'ҵ'#w71巫lल \V |1 p!p$$ɳl2y 7rcx4A!lcb'ې #*RGzMz(""""""p |]]j:;5~C÷j7.1{_ pRGGbU>ПOJ3Si6zM .! \a)ы.g1?0~U|[k?nɓ&:jo60>8`;i Xﱣ Ox @0osG POk,ci1u1;} 62'W<2"eJ"9o!?VjhhhhhxX"6<,w +--C>p`_M?"M7׶lVŭՍ| <`CgR%j5(4:2z|2L:435&>nf(-9џI'0 U<ʓ Wl81CbmڥR:ޒmW%7zT˒B6iY@@@@@-G`8VTNqBCpȅ^*5]媷Ģ'v! rEb_ "ȁǵ6{Y鵭+9  ºZ“+P^FU *3G17ݎ:TUU Pފb'^""K,Ruj* IT:^OR]v4F/KO!.TqM aq8 6Mk/c/Yc>qc.4ݴ~u7\8&?N7@-0~ӕº %v*>mx'N6`/Gӹ|9]zis9cHحb-ZH>Y6". _aYjW5ҍ9]&4dStقk#`C|K------;}Ixd'mJ[>B1 H?Js'G_ OԏuF?k07p tjqmK|~“1#4hrDUpU(KC?ɏɚy`[xA&f֓{őp 2# =̡u( L08A CBwz'ϼQDR:3v'vihhhhh+ԹagW}"_E\2 tfFؿ 4/zcdBzN)t7 Uv£/`2i tLڂGKQ&N5Xr HƇ5 ΩklZ(OjEuC#IY ׊L\Jkgk\vCJSiQ{pk#֥qVzy-OlGy[&ZJ⨪-rH")-1a^{UQP&'/s6H*S^e\Lt?]ڽd$+󗃵tDz#ql<;^/^AgK??pTeX߲ߢD,x$:~os]z,{A:m,+9^:k?yۿ gy:cax Y ]ڶ֬{Ѿ4dlhۈ(˘ކt_96n~>; o/n߻  -(?݋8]n s tSљ41>7vVϿZ|CjPMAqD{9=0x3n?4ӂf+Ŕ S=a'~ v˼@q eۮoE"W'C?ub< VnhhhhhxD#ǂ4.WбFG ϕ<>?!QgHbܤ LQH:~ 1 e:LȖ Y2^meA bZR~*إ*,4u*k>HBR>:bm و䳕K[L%ntG>,l3S=>UYt1 >˺~3,~5tQE=t1r$Єqv=7=Eg̑<6zzk-ohhhhhx8"6<]No|Myjx"!os/ICxŭBoXKo,o3Ciz@_{Rk AQq@b)*2:<͌iu}HJ( \]=|<͟~5;itq2!aW K JFh2'f{ 9Fg|fJ"S |r>f:+ؗ-ltxx }0OhEEEEEEE"z ;9sE^UQ(pq15"[Fz=@PPR1iC܃R&/dZ-E\HLG %%q^\ N"* LX!by`NZĊ9e:p1"=3uZ6 Z V/?\Æ#<5luʾdt/c7.]7Q^Ba^6wab*ݜuB25ߝZTӗ!l*.'cSX'/>GLD,3:w~۴--tEaS<`rJIӠS|Y5ʒ1WW~GxPu~%o7l aܧ ַpSUu-z\EJ?6G'6oߝW+̡N|s=-c,poqCTPQ-8wBr]MmT{M򗏍q2`{ںQt,1C|Om6--n:gbz|r0=1=^ē'մ٧<}KsOSk8}ԁ;NtɚtI kfS4A͞38>O=<&un?\شޗrjxD\g7C1My-">eOG$6ZтuK$e7‡O&1[jhhhhhhp1/)TMNJXduiu37RF҆UuąG2|҅ y٥yp5UmO.t[׷ʮE/H1&Jt툰O-WtLqbR6#z̗F9d鮧q+/LH@Χgob]旱A}/F3)-q$уiVrW? j(<ǜ }Yhyj̺hrhgtM"NX%sL a+,e@xkXx55K؋L-ErK+=#[X=ଳHO @Ҽ( p: [þzKFzIk3iikhcṇ~#̧_M#h5ۋZ# i` ߋ0M4r*qhTqC<a%b;]fh. Fa=Ӌk/-~G9ؠ3e֑XLw"6;ޘ:b }ajM9e1z}ף;ᱢ 8`cPu|mxW5p_\|+}k'~`̾5KKpBRxx0V+fLPm8x!OLᵕ4*r~4zb>3>%NuḵS"e}SeTl>6Ib~' GKH}M'jĖZZZZZZ~y GЙk87.c0A9 fױ0+{iSɋg C8zhlቃ߽맰6Hր׆1*>-c|`tdfT l֒WHO<6~4L~R8OQb$RL' @l!WXk-tP" &;řӣR@@@@@@G@?Nع5xo:SXtzx#sL+P(\TyU&:(wCN5|-7TK)EJÅd=#UV\' 5Od-̺t)+/MasuZ+*- AxGpi5rɰu Wtd燎:3Aѹ/dCYGl /H}[ l{nI"YN&FֵVFO͖usxB#-@HGj=fq` 8ޗyMP?gxSND<76+ӿR+@K3EcarAlW6ɠ'ə4wٴr$Nicr-1;7ӾцνAuTd8|2 hQQ r D{PhBI e|ADNK------>=^ ῑp űu>6W8fW01M47?T:l`GH Յ})Z܍B>^UVh3C\/˥ `rZaNz\ntY&й tC=Ч_44N#t3o@ ȉ+{T5ѽ tPL6 !|<՞>lIַIMF0baf4f;2x@өSS jn|]j/|ӑ=؃npY}ļrO)wlɱKOL _Qy6;-w1QE]HWT5]ZZZZZZOwcEyUT%d =Xk1#EテAt"2 9~N@(KHD:KeI^FPHynyCr ;>vZMjyڡ/GT4LGY8k]ӤbuyY;,rbUm^AfpF*.™ƥd\/2W*\+jDwv=A~'DasAN,<.WrP GpZu_g[1p p kG ~^ )jo XD׿c蛧.NX=2e<%WENtEj4OQ9NUzMN8qx, };x#A$C˘8]q' y/$be<&3f"h>U#Ӭ;:k0]EͰ]2lUJqF7ݟS-<h|[ɣ9Zaﵥ0|"{a@:S77v9dž1q7'1Waֿrl$ͭ89vz ~) s;x/`b^; iG?dHx=#`3:nFqOgpqL~ ujPHE}mj&-e¤T ރ^KdK^1%pZ[ZZZZZZ>zDyA%/:*㢜ʠyaVEC{Vy }4ُ2b_hf ҨtXrԝ~F eYp<01bD-a/XOBCY\D%>L |5Ñ$/ Hqo Ĭ#{]nvx&^޶ `'T*\F _+'~ntxyl`0no-˩HǍv1#LX6 e_ }ݞ؇2>ߏBQmrNA![XDho`?M5Ncx  <>:zvAʍG?y܄6qBm68'8 YPi4 i}w24񦉧LO? %bSl#|FV!B(¤%wƑo)dX8o*I)sޗtiEEEEEEш@h1 f1z8-IJ <|~K'!7hv qLjR7Chܤ5)wqtwӶ& Sm<0ҟ8Hr̜D,qUEb(1]{ bb8=vt, bc'm`17EAH0'!OsO¶2&mb2t0[;(,RkNr uDnI6gNNUZEEEEEEFm8wch#OQ n5-A 8+Lwo8` 'Skn ~m6gґс&/>HKk+|'#R$>`a '0Ψ#Ƽk6T/mk8 Rqz%/-=4L~ 8ęamcS:#ÙCf3ϤQ o*Ɵ]tLzЂ b.5NS_Ẹ\J\ iz;I~X1>ƣڮqI_nr4Z֛Х-fdӞЩciltJ'^vg2uZ)dƂs " >)(S6.4MsĹYl\kĐwo^fM|`}s\*3߃9k(s!{ x}̧Xp&NAc(Jx6^ 0 lB\v C9.{ׯC%ИƮ?ɣZ}W`?3F=| CG۸у xKGQܶ~mש 9)yS}3sc3oUQkB1Zjx#û8o/n%ʡ* V"L} јMݣN'o,(9c!K ŠZEH6rPc 8xۀkH`rUurM M C2 16dIip}a#fЕ(҅D^yA4vn\F~5c]Mqg: OݸW|$s,t\V/-9(WOV0Z-pгu@IDAToEl-ܳB{|݉DsGGX)ZDviddǞv%?hʝO t*H|ȅ \rbm?R26l(򖇏¨qwD.&pA^3Yޱ劉vX++uVȋ"opZǬ,٥?A=6%KFLY*y#8='5cV($O2:NW=ݎy.0=j=6 <'q2_@v$ʱ촺LC.W7NJc_m-sā|!XZrG#6ƯxҟVbG̉q d* /ΦK3mC\dc!) yRNj6u$B/i&m8 6_ڀ; Yx\[8x4~ec/>N՟KCxe"Fñ %;mm}0?u/..'G æ2G}0' xK-----PGfϦEE>RCr`|nz ^;>/|F~Ultz7:˛闿t!6)NLxKBep 4s'͞ /9T0n.n 3 soQSΉn#pbŝ>,K?&_}A*7],5)g0 *djs}Th y^JۧE8|~ʖZZZZZZZiD9&vr>c4%80RNuZ]F`X.\bM6˻|e8^&4<| ;Aկ'@nFőDt./|"l2HhNMC C@v}Xg|eYa߼3|^^82NxxZ>t1>T~D<<=%=D6żU'N_l%@">%QՅ¶"V=TGm""""""HEmxnkz|;-%q+i;<'F:+X&11ȍ0]􍩸*e:w,3K7LagOY\0q=~ =d+jY@@@@@@o3\JGG[{eb. s騳+DO `!IϛzLtR\rO=lMWM }N_t#)/l+X,(`ry𨑶}+T6x;Wr{s?$#o%Jŏ`cȳE92"rsVIg,s^F@Qҷ$N8WpAޡ*":u  }i -oAd\TIWV._@u’F![ cb+rc1TXRİRx}zxtn'xugB<05 j ƾCyY)uB[Z]0&j ?4:{xqc>6 V"fX M,lM-܆z5"}Bv4B_:;^j-"""""Dmxt}o).swRh\~+2OO{i>^>X:8L 3N"GU%9/FhAG 'uYNTZ2pp'Ⳉv攦yƲ۪d?9u,42{CL8t41sf_3iak7-c,&]:PDL,Dܞ(?LȣDueE˽')(qP C| |ѪRpR_(glQlN:&+Hj b6)3d,.~lEO.6;eHֆV1 q&ot3Ĭm}?7M LҩR-G.{\&}wLo0lVA#6MY|#łx.ًsWL~7ͭ)U Nx,0eS PVpi[ubcCMd-/@LSSQao8 4XBN]i=p@ǡ ito 0'E2:12aUIx-z!\5D[Ņ[9^N|"cٜdǪ'.ocQGprƒK V>01_xf~fA+ )M Sc=6"n{=s`hvV9T[ym2J#7^B6Lo 7Z""""""1DmF-,.nRC-|o/{\c} 'p3S3C3Fc#F:w[_N΍tҊhުz ]`a*!6(IޅX[ArX&O Y !ocv\%CWopg!,aieN0!&NLnArzhis/;4>Tx7[iyZ:~$M~"t3*,ϠWz: _~|䬔'JU9] 5Ge#&a~LJ7R寞=H[ZZZZZZ}6~ݑKnGt`\>p!!J3i\}$gmyQF'xt^'/r:ei]At-7}Eo-(][e t,kTgC.ltXf>SЊkze# kvdeǸd?D|oD_TlP|6G]o*5Ldm5ee|z/ O2g7ط0Єv;"y7X f,hj8,>, QJzYk 1vZޠ6Jsc|= \9ٱ/}ƿ~b|k2I6Ӱ Aꍡ?Z'i.;\N_k[:n #4c'ܨڙ P$RU7S2۽BC:{',ycC|p|Dո>gUC3 !E6._M}2 =xIjgKs% CYKUV>lEYXӍ_ɗ19́T*hvmhhhhx"6K_ɻًWUC2>8Wej7a<(!O*W10/|=ы أ<?ni:70w(n"}\`ɠ406< b'\?d.fIM6NvQ9ҳxDiRo,߸u Q[G]S~LVo O/`u(`}/ph˨{]/s;[ik;bg*vfP/#2Il>x5֥̉w0>dc4u{~lp X{ ptlH|Z|:vKfFfme7 8f4itS,t+zqh 1nXZXxc-@115Jqab>{Jl|q ZDV!!c$IyDsQ7'cF`>63g}l ~/d<ѽO ҟ._Ǝ<xwM8-q@bnGp9z7%1e=e40$d,"""""""p#XD/9\`&=T}y;$+aUœq@,-~L=MQafī+:$@ S] ë=>AU]|23nw<|BwYmt2ًx;|љ_9(bP"E VvxGeL&~VN߱,~ y;m'r5fl~wg& E)P'qPNE*ȕXg9O+zi;T[.iI!ret?Hn[bDZ1fl! ) @7)eQtzV],dXu鉲KƆL]fvh?tt:'$9 btqP^o>TTX:Q0+ aS:W$J(ǓSk)HNJ+[(f |+ט}{Z@ywf6FzgR<0^ !k./ }s~G@d#腣iɥ:I Y} $QYaAt~O1eQwo4c_<`PG̭ҿ)s:<>wd2WZxl~X7~UVD+:]4O$"K@@@@@#萮YuFbE`e쉓;q7yuif8=3=fҿ: l~;w޲|/z~MVP*`?x͗7PAu$[BELOvKH,۲.UA znʤPK&m[C!Ç*W!׊1'iyAb³/꫅^y"=0!:ߢK'$q4ŏoOY|L$ײdP˱䄒Qml޴3M]$rqa= t:YF^B2v/b' EeQ 4Ǎ(5566Q|U7TCOQ|MBF6fMLM AУ=ǯC[f~NR6YC@ښMdmVmn=q%>SU8D˔C~wRjq2ro͠OȐIՊLpII=31~xlߛ1cښčn`Z0y,#J۷,ȴ[j O#H------?mD 8Nx Es"#XAI[(tg''ãfkoccNNąl>po\I߾p H Z5Iɡ6~ɡ`Q҆ek4 sF7E ap DAXy`!dT+0*iYi FNIg2x!n.$ke tJ\L"PIGㅓtOA|OA|ȄA0#ZP$+t1QyIZ/F.ʤ_yb[ -ohhhhhhk'b!EEe(@`Iˆ>"{ya&¼B,ֵTu+l,>ɨK0>ZoZfJW`O }=5p$pm#<"uyx.롰ʫx],xdזeezuD e.j/ELH'hiX3+[JeXH@~G\ѓGIL[kC\]\Y򑐻&nA)]d"mn}-ѵ˪xsSIk*-ǫd\ugWYHX6S?Y1uʜ')ˆKaGRca9J?GjL=7gqٮ+ƜDȚQ̺ξ8%x~z,}t*6ůw^6@S|rB'ʐ 9)CO+x liEEEEEE@AVy8pmyv"0FU r9'pFNFZM0 Kl+Y~ 髯]*<:+LCi 6ŜrgƢ@0WbhC1mܣw- e&iiMxv۴lPCU#ntR N^c`a.%&oE)}TqLVA%<yD[`S`Qmw0re?Z1ɸfsA^XfI_<>c%tcc,A~Z------r00J1\_ =CXp=5>^= 3*Cu򳼋:uy!lT9+y]HrdwDŽw(pFܟM_<1 i(}'GVh{Cm"f˨Bib` GtҮ-------cp.ŻaKt1NNCF@au}ɤ -W87jKF/P4 {y!nO׏_EɝF$xUdt3~w G,畧Crl}3S9 Dk,ëyY*nm#o&Vp^2, \#8,oаddDOS,#o|3șNѬ:c =jC|ƓI)'=U7y`:#NBk[52"5Ӊ67!ZZZZZZ_^teX딽Oه_HVu(INEt)٫D KW ^GSԔel,`kyݥ/?\w7ty jt婃:폕י]"E][wxdy-*ڍ6fNftZ@އ&&WC/!CU?S6q &N \4 G,G>_) E&d5mRr\!_`7]ǺMl;=VϹRth7aV6q%"[ o0[wNH--}ߪU\ɦWpf9 ^ƫ`xAM2Wu:Ԁˆ,$wЖ+?q|I 0%f(&8S!*hWXǛ~TQ<$b V?%}jf1d78Zx1Ekxyl+yJ }R""""""(FGG11;蠾c>K"۝]R~mZotaW099b;n|xn95o M^&4l~i3XlL.b3.gR!'1sА lYLywm6끅Qj C!w$Kt6&{pҟS rҌ;!7=1A}_ ;ז7gq4&¶}ak"?Y4q?t:;-<(c:pUNEcnCcI {=L'UYw]?n|j C!$dgXG]B[F^Zƚ^yg zC^sLjQۜ$B:kF]uc|,YWΖv ;%v{eM&þ3 3/Z%z͍WkoP+p̃ŔqFA7Uu]Rq/APN&i#$O`hc\ @ԶӗcX@ʵ/p:Mi,Ũ~)}+i_M}k/ h៶VvQzH?טWNm?"ʍ2RWFʾDƷ,aYGf'VƢjQ6ykejzjtX*,5A7= >3ƼGU<47ׯ/x5 1vf?emq>h$Խ/L?zdyԶI%}L0g6::!MGVɲga, Xr"3: Gu"(KaW@"u^ h.Gnzgޛ@k~\׫ZjI-K WlxH'd3g&3'Na8@& B q{"˶d--YX-V%ԭVwUU{~'U{u/`? ?y| 5ʘ~N~@Y|[ʛZ,22020202020202pf`S!K֚QVNLtD!W9W}'SXG`1M6d65<}XOg]'Ke6f;#'|d r[Iv or&y0>prmnAIS!oĪO dQ6Qn,OH `|){ _Or8QVS| ws!}I@9NǶMڄ N!3'8u Oވq!t ث?(C"jpxi|Y[\=UNcKV\bq[rCTohǁzj /3oϸ+tp<&ĩ=[Gd30Ny)N݊1e]lQ!q&Ǔ'[{*I[8SgFգdl8lsqJWE'd6"yK QukKkʛQۘhKoyқJ' Giķ\d$(Py:Һ'?p%g_8Fk쾂$|~DJA :ʠ\ڳXN`me4IJP/%RQ),^'>(xgK8Z#ga}'C Jȳɔ+wOolb߶욅M?#^cA.O8inW\|7F8SΔ!{3pϹ`d 04 N|'tғ?t&~2"tWtFU;8BO, Oض%9$O\8WdR8O줞RZ|*ƆI,d@IY։I|ٯ='klc#J_;=Pүr= )QTߖ[9q%8gpF WxMwfv?Đ"qe<@7+Df PSx^>*rBOO\26f񴀇p%3_#Q,FXH*O񐐩7_ 9Ԣ#L,x2' m{XGHn rc1YcK֧"OSo)5s8f\ -_}ݕ 㢖{qaqX8{WbΟ{Xo_Ԅ ######省q9n> #˙xӾg|m͑N{bYxF5bܔÎglUg &Gn9g۞7Y'Fv ȱ.ڔ_Xg;k!:$*SK';A ~GA؟-t'#-?:meXG}?!8yIA`d{3|OiELi=@]YtrVy8g|o.e+oa\kҏBŢju[xJ|ЫԄLF1cY8Pd^K"ͳH$E0'ȗl?ūy OpBGFFFFFQO7kzNr>zN|~a` Xj^S't,9̗`]kEn5.Ӑ]a{U)̳Q{Y9%f- |o*mXjwZL_+> O 7z8۞5Cj$^2P>G7V p;LQ AU{.PwHs l L_I'cxn gwAJwS:F?MӉksj9߹os)CnhJyԌ Od랣R \M 2QZ#: VaCXL^ D( lAU p.-1,i1]Te\tFh >*ߗ(X>Dxž!9O"}$?T%XkI>p}eup.1+04NȮnIb_<^> *>ۑҩ:lPaǘM*Uf̄j9G ȥ Y܊Xq%j@fGt~c-OH svUL20Zpimf|zėyɝgbaffYgҺ9dIQFc]ˠܰu| zr e;~d!?Ni^o{`mVOe-ea3ooRв9Cl-a ،Eَxwhx@u\y]|,iz+71<ֈ D[b 6i^źq<~Cpk8¤[OhYc9hpb"Yu4~ {@r5%>ȮsY<+q+ HlxO2-}iX˒7.HȧY0U]Ysf̒2KSJ p7͞NZM;\RT 㜿gCs{RV}x S,A-h[&2:} Z)jBaVMH}b'*4n;>tRM|qFgxRh7/7Xe99<XQ6/˻Nqҁrc.PXM_+v~^q ؈us;-TW/`*)l׸!8XM@ԗ"a> k@؀&*!r+BRrvRk6eA{ .nW"nq \+Y'fl#:[p~%6LM_|1v,Tm+Zw>ib`H{2v.^9ZN -w|rO +8Ds teaHЋ'Sτ}6GwT~:_⦐J(H2(AvZRcdasTFgN >bZʎ-- 2202020202020220.`oz)QFkxMa Vη|t.赾!wn/3ػPѤp֌ }h'0̵!$F]nwxFi䅎MsfIœ) @'[>M'ú 0CҮ{'%cDlDZ~2>k?? &C>|ܿJl.8QU" mFwD,=@#hQE O36}-A?N0߳\s=]j{ 8y22n` rwev<`y} ~N?@C6\xC0je^ @Գ">b3BG͑fHƒ/fcY̼2ށ \zd`d`d`d`d`d`desܹ.jq9gӲ*䜖j$< 9iE i;79M3i-͞BΒm,AمV3e*ѧ笫? pGf[$*ڱpxed-,Hu |e8d)TcQYy2Pj7Q0c~Jz1Υk.sc.{&ͮm_ܨ $H7ʽ&f=X8T"N}Q-xBtѡ?! jIAyqYɏ&NތqS2 'QVP=S<( PI,:z25^mD8i1$$uH{ynxm~ha?t K䆏ɸOˁRؤJ&X{e?܌@{nэA ƂkB]Wo,2 u1y#'OFlĢ-7ۈB}B^F[ lF`)ߵ1ޯS >67̝jb)}yÀ^ pV! d4qܨ z<5)WƒSNW0JG˓O,Ǐq8 LY٠M2T^C瘔 j2*iAxùX%R^ JloSA=$o Ko5FXe[lgc }M)}(>ShȩBg3PITCf- :nG g juryCj0%n#}Eu2QKW5}l]p!@>;H# cܢ-tq0ĺg+Wڊ\cq*w=0_<-T]r 6]JjLyW=)]z1A5hsxGR\wAqj1B7=$64vPSƢ X[9 JgAؼ鞼^q2IPcu0&6YPߴ{g1Ve9yT9P9|d#######7~s~lqxnE2_6:ΙٲTNU:S?W=m.Թ3EE=NsInQ$ ǖ9PN zL:|ylݔO}j(.7A2m2dJ>m-6N}*nhSBO -#G<%!kǖbhv\Cnlqgw_>{σ)>rOs_E\w¨GFFFFFFF ̼ '*Úy"3{1yʩ(#N]:UNH*++ܴ;+KC}y=#H[E9}(&It™_,oPYS\4jx^~ݖ_Nϭ"o &R-ryz>0 2oz ;(¤5/_c#74oc=Yt_t_< 0bb ,K8gOlcj?d*x=5_r',t:n~CG;uE4$>p'>q9,Ƨ ^0X6Xvp<M胜O)|<.x(\f}P'!e„(5HeZ:ctuޥ K~Cy`⑁  ,2OwQ2230=]c}x`q }̇OškkK|͚:cZjulw6K6\* 6bclv Rf4Q iO%7kGmޡaH͞U ql[=k dh$mi-mQ33{N|g܂O>ygy)*k}:~?t)wO8f ,l.bkiS6Lpt#pNO<֤Q?@$*\ІU2**h  ^0:,c+9xaX??a_NE h: t:<}?#יyEa j U/+9rg" ps%[-3:UW{5pPotd ~bh8XOB*$fےR_,swa ט6`q+;N@ns-\/\@<<2L$Ao$8HkFQnrb+A\l ߹ ]oҸ܂_''?yG㷕O`M|}q~zW2=QFFFFFFF.F#f`^ۉ vz(7Toqy,E7?~)E}.cl`Nk@M\sᾆNX7[VG < -%o Tq+-^j3&nQ3*v)SX[6W¾;)+gʩSqgG?]>O\Buͻ#~G~=X7}#######?8Q4R>Ye I  k'y8g9ч\sN5nrjO<Ғ3Z5q0y1G"eI[uBv8Ifi먩7Z9~EH3#A-0H-mVw%bO9: CJPۘJmvQ1N-RtTzpT$m@;);޽:LƖ>;INhO|8?r1SD9y翘#Ck#SHk?1k|)ʩr XWf5_|`X  knO<7#y9vl~1#######$'.Hfed`=f`ǗbxݏU&Wu7[$o"$QpSTd6@{褷i,_*w~c/9O/;ΊD@*uCNC?iҳHƶyPqgR7'X_>t[n/|3x%ܖϭ>2020202020202Q2'8k8&p1E0Qp`2Ok̃,D\z`G:{g˟|j MCL$Fie ehF?x:x~>u.e~Nww~ Oeinb-ibi'D†^ƆW遞%_s'%2#-/6urb$,3_>,3^%,~wq3J 'TYLԯZ_Xgx;p'cyɣ~{1`e}4 ?u깡pBw @^WW/E`@ í Ёv&`0а`Oa xg2* !#LJ09.cnlx m €yeW5'~r{ʩ>_~mw>󿎍=Ά/9(hs~x!*Ԝ:gĄQGL39sNYI;͊ySGcIZiC)XQ'ŒkQVfvfhG'GgdMO<6{7"`҈8@2 Az<]̆| Li#ל;6?;ǯ)MrCG;L<&1LbG ~~X<&pA8HvOo>vL\!牕G,x%)O%.Hzq8 'h柵э8Z6!ܓK  @sO'/Q, >b@9?}¦772MCGB~bU#w :搱|Ż.{܏']q;'{~˦M520202020202pg`\p. f?. Gl#O+\f=nqwgI7m*pƷsym=666Or;Ы{BmBۂrm(#? ;ujN[Ar̍Ɖ<nI*{cQ #4'ҙ^ ]͚GrS ţ @Yc/N^$F ######~brwbPU s?%-1B$磘#&lKTx* Ly%jlX#&JQXKlآU>ˤQr%<͒IXu>yFvN SF;ik9RHSG4uØ%p(Kg)AxW.|*M#?'ir~IԱ!NUG2s!Wm?C*168E_rƄ:[+GSK0 l3g܎D{ ?#Qi`Ihs &,ЕX1+CFæ.S>;u8tԲ0sX#dP$NE)xd'{(3foh.$}*lWa=+oq&t=a^'w\7c20.6TΕS p#몙?|rhfQrAdME~"n*@x9#Aoi ?Zs m$[{Sv:FެL ^jÀ005$̘F7(IA-qrƉ:^ӡC8NN[VN^zfff lFFFFFFFFNq&aL)w:4odmN(ݡ,QGu0딝瘜zJQNfcnݠl,3/-(ixLmY[b)Oj2&x}T 0oU!/TsZCQEcj+DNf:H9!r6Au8]U,irCX- SVc=5@e잡2FܖY|҉'U5͋5X˺ x(5D𔓬},uh8t/X!?i@f_tB @ 1C_ѿsC'>|3Pk 3E;zrYݎ^;1u+xRBZՉy${Z0Eeh}:REC 'h+% ϳ 1 B Y@f3?k(*Sم@>VƧቬQ- \ŋx<>{t8zʱ{rŦRoOrBЈ!P%% 1(~*# 0Gv?8)T\ȍGeӡ)@' U8O ޔP;&:y&d$nui &^9_,ev~=C;ua߈gd`d`d`d`d`d`ce`\ 9kffѕS!83?gq;N]:Ƕ" N|,޾{yWؤ乔_M|.0Gl#######7_fbCJL$s}@8lM4OdHKsS)q'=N^3u>HN2tYͣ T#u%(!'D- 5bi8(&phB* HyLܦoR7ee?tI~ӊ@V} P;-=ifq yOVlʏ -೼zwIZGLi!elߟ1g+SKkGq;x?W6qw~> ~Ltr/عqWs|G#<^wdMvk~ $bR¶ [ !B/ B:3yъf[mMXa{ؤ7'c{c#- `#3{FyLvX}??)?GFFFFFF{(| p?0z<IK?O>^Uj+p/ʝ@FHNxO \K6|BV%A^웩7*032H?Oܖ9O #AEQXkԅ(lnԹJZ.[3V;xZUI] >|L*9wq!!QS:Cb|(o'0e頂|5MעZH6(W$dD#4i?E//eqG󀙅cc Y'Np֫<FCjyߏܵPV}>[gwGlV (Ƈo ~2{Ǻx*WZ xuJ 38z"r\/TbF,0o{4((Lb0 ,ܞ*>)67T+o7Ԙ`FFFFFF. ~^:HFf`+f۸ed`g`>Ε]./ހ[6<#kxug^M'Pl577/:XɍQ.unBa# V4'& і9hFM5ǀ ۀA``%-xZ}N]fuMe媫({\]EYǏ(|O{~BiX?ɳB$2)64ܔӘjJ|'mNvRզ;I;"X}=N12\e ;goa| 'ߏ\ _w?V=;Pr]7beNOl;>- 6e<\ӎ5[4j2b{F¶8¿ 3(9\f{nO ig2lhCMDD,=2 "sj=fj`(@e"BF*ofb- OCP=>rg{6O8|s]=M )ح`Zm3XSmsݠ".+־m@T5Nnfg߁o:ƒTI/Sy:~ʲ~jy8NȒv_p;}͟|\VFlO,ʛkTE暹)XЛH)|J'7O,9t )oSQ(daxG\> 2:z@7*+"S63tRm >V,aC$ʕE3&$k᩹7ø?XZ:8\k󿮋 ,Xd7M/'72p g%md2"] 6/x.˾#/~N;|A߮s?b}&lT9YRaro$,Uk1f8 nM?x<ر1-W,{}/,[ ؋CVꦌu/Roʼnjb9 k}]UE}3?Cy~e]o l cH8süP/Oq2yQ=sb9 r_%'N0Mۄ, 卵bʒKX7YGki`0>qSUx)oVx$qVӉc:/3(O۫ 96vqsL+w9,MyGN8\ PV ^8Y> uOS\JM>ߣU|9[}p}chN@Hg ,V ֖mQ|;XCFtSִE#Cyl/E`7m+op-@1"Ik NτщcNH/|wm/?e/)8r?xY]Y5 `r012n2pn6]7bw N6_+گ7u5; ߌW2߽!O,\}^ -ma&^x;;XʖY83X" c\R(aONlƆ?wwm۶>k/=DϚk l L_]/İ0MfیU-Ly9%m9z5cQְf]p(,O#gh^mx{ 84O #=,=ND6qVT]Őv+0qBerH" H<剗*I3#퍬3rX,MAm?>lVx{;~`x:[~-7n^5OO*mYHNgg`/ح_H|Җuղ }pLwfBO% e^bz.u}`3N燀1/}dBYvsioydYtHӮ%?עՙd̰ӾB7M(a.SeϮ?csxwyK^!s'\ϙc uN(#=AcF\f+Vag{<&XciTZ2^WmxЎkuPlgaj>lE<ƭ؃l`0!A.),ǣ bSc¾f!<A=G\K>fM .)v`n!g2q<\/)W~ 4~+l$CBI:jJa9fT4V|TIE̅jy΋'u$'5^ok_?Ya_)888Oȩ &þމ! }W ׼7T8%Q7xrA9:SM2́j椮hGlnQOF[x/+3xmp{')iwt tV<޷|* Gȕ%=M}Rf5>tr12 (2+N7A#CW6[fKmQӭV8b3{ /qXGMn ,''~|˷:'O*??YlM~d`d`d`d`d`d`d cӤC02pg` X\<׏H0» 30&wBm؈/ar|z\*S-yfѠR \ا}펋 roJ_7$#YY|S<9,fNJ>>yl%wD\}gЪ?BG eIsE1^gI^5 c$ -^=nͷhO\n](__IޥM#ϥ<{[*KՇ=:gPIy) A1vh]Nr8=,AQ@I3%יC{^p.qp^㹸8֓4Koې1WW0¦JM\_?2ϻljlk+p٨Ї_UXXU?V7ҿm@@IDATb6^izrh[c"Av9P HAo9|X%'tm#a@r]zg{)p@p*З?R8j</gnO!ނ`ģ &s22֬iՅVk/fgyxj(\K^Y^?$szR?7; cڌ x @Ŋa \ p p\yߏm{b"se~Aݷmxԅ80k kiHV턕MlA @ndebmxJ`VMff~5>(>I힨.Cg'aM)8}.eaPwcv3"N>IE:s{צr{T~}?P:En-?#Y ӑpݸ0/ Ks(KydDqyƌ71wY(4G;$Ff?&QFy,rB"h־ٺHD'k19 s$T<ձIOeS>fQS\q|hD!xFlP6cvfE`&jCC;PX,L.yoWs}ݳ;?o`_v#Z} ~$ >2%qP~>Ж/gV,YDN_@KWr,dJ $&)WsaO2 0ϲqbg*kZf`igWXZӹGNk[e=Co|Mh!urTzȃơ!&Kqjȼ** 1qQ)Zc(8z_uFYO n7q2cyTP5 9S= pSؾM)?3 ?w5ֿ /8YqgB^,YmR `aU K_e  $6%tQ@x[CyfY(SOlbhP5KOSi~]Bp 8LS M&d]u8m-M,}ǡӛ mL)\")2_y򶷽wo-βl8rq\~};a220202020202pg {kvaǡ22027^ O?exɓ^SN _^SrZOI}$gTwf lnN1[qJې`>\b3LCEm^H \­iqb ?(0Trz3iVC7qq; {13VIw;?wjJo_)C12020202020202pYf'9K=׬ 0YCT^ m395 }-g rӰ4μI짬ٛ}r$|YsX'0Knu^ۙY(L 6+pviP~Od29dߜtDtwtRNH?c>Z,'MgRcBijjA 8>$iꣃned*[\1峟{a9uc>0Ca'_y_Pƛm ־!5 NM-t1/o0 apHȶ C8.j9*TK}1^ gGns֤sc۞wUR\זws^\w:u9囁qoG3pc'cu=y9&B2/{Lz 8b㚼SϺhG.<$'l 96=V6Ou`@^ n{ 65nxRB'2!mKjr+6m%7AmJjpAq^윿4'IyK_v7ChGFFFFFF6zf^vO|0Y3 9tB|)ms/&,Ot6k.mc)=Ƴ/k|æuGn._qUȩ2=.y! |㥀\v4iZ{ ?<,}6i>h#^WUPU. ZHT&X!PK dglS }jy|#^rz-w}t 4 AsQ# egM,e>h1\Qi6o[j5ȏ>ȸjU&1 wIeB֧䡹eY Oz,̗д GBڧc2($PA,kHE_?.o=8n]}QFFFFFFF&B \jX`/ed`di3[=TrQ kO3VzQM VaiЭMm ';?U6A|tI*bB2m`4Joꐣ&o6eۀ(gȶZh|>믿Yo^L_)< \&x&O5bLlG*42ĘGxT>gTi'bvдRfl8)sXҰdwN6iXi("I.o_~谫drk\ 5!%}oiC r6t2ck7Z%GSl01mc,DlSeyR=ހ lr$ŞvQsZd?k If?W_J٦MWrԊa2guLχ؄x RTxI}Dc3C+2~O܅X yϕeH|WٱOn9gzsy~,圁qoƣgs522p2p}].Ա:>H\S|*,䒼_ov4,T\t)}8y-k(2BF`F`izʂRjan)$GF,$tma+ 2(>nܶx333})W]uwd,㮝~ϖn{ j22020202020202p30u ~.+k([mꕆRygSuJahe%5=g7҆6$orXn\o/F)#h N s2CU\s-Ku 0_ŕxz% UU!:OlΜp-OM%kI> =n(%9чEMҫcPc'JY™ ɬ?2p:il#O:6En?%2Gw/mQRQ?Xe"B6,(|ʗxHP4E4lez>[2<]ZW~5}9~˯='a420202020202pyf`\pyn3_31?^2Vxm?uM DžbY@.{tbB?Fj_2xy?[j⎍.TR:!RjۚM@H2SXE{yTSxDC2G!lg9 hK - xMq}wKX,NAFas^S[b.'5 q-yNlċF2^6Aڧ/Yn{sZ2qEuuFS!3v:'[;鄜E tP*7XN̂8va3-prK;a"l}v|54.ew;R‰:y'?T:*H9nSFw[Gb+Bs!&8.؊p ?RjS5x$'K؆²9%^G ,׹w |{# 1Uq(<R^yroŗW[ۿ-W]ٚ e>L0q2O؊(##X­I.1йkNQ?HjKoRTk4ns/)o<=7Y/h-g͵Rn_`4vh#~2d Huu$WXQrЖv6b#1m$|Mf`#avÌL{;ϟ͡CGQ=G]2202020202020202 lWƴsJ+kjb~FyZ[SS\yҒ*DPa^t2s4>cKJ.kf+d!1.OYUN'%AtV޶`ӗc3N8ȬXc-F(/hsPIlZsrX<Q¦0GJd0UGrMKmjޛkzUeԩRUJeC # s (@ (6\qbㄈh_ǫN\n @B2WT*y>]sjH9koֻ޵{o m{yPz mn_5됻Orφ }GBew+<f0j|0c- Δ lϵq<(l9~ɬo8I쬅'"F| #Vb/SÉqkJλ9 ?`z鯗㎳'<+{V d2@f`6f ϖo}h:3 Ρ= <}WQP@ j{}l`Qs&pZƇ}Ԁp.'Tx1(\<`]0o^ǶFpW|S"M 6*h'uiAx|6@y81{Ab+'?p\t>ӵPYN9{[]Ge۶{Ť"3 d2Sm*jh1ی1Ĝ*~wR cedhWmL~BZ9=cfy> tm0.brz"~Aev!"4  \Ff籄5mL'5WE~_c3G'(яOïX _n C`p†qC>\[?{Ӯ?zEvߜkm }(75֥="_QDi80 TGPQXy3O-׿] P哿r-w>!g2@f 302Κ̾g``Փx LE)3P3; ٱ̗-Roε.ja+!ð2wtra9j<i{taA5+ epa(FfC_?Ѝ M>H-p5$o6ECN=ٿַ]I߼:=ÿ&"3 d2 mjmpCm)%S^ 85[ ]̇yíifz2fu |y6ݘ V! . /l+\a'$=Bo|}/j~2'!gmcu^k}0|Y@lq#ОoXHD,˼.B50= ד͋M3F8Y]Gѣ{ ?ʜU '2n7!PEёtE{;n(_LfD;+.)3XycUʗ1L9_:6/d<~"kaX#TAtЗE~W?eɒEz5jY3@f 303Э gs31+) b&0ǔxb?:ǖ\\c} EmA?h?س{kSaRXmǮZX-{3r3@`,C5Їй/U@ 7aI-#@6ęAnp@< ܕC̛7/TŲlaInd9 1#}ԘbI2 &|Rي'>*C̟VS"Ne-#PdaTOKVc28lGB.-$PC{a  , xA8ep@zXþwe "io؏6k?15_n XqNd mć_xù)B fOXx=wxc:+cvf£nO$ZKw-mGfKs 45`hPd2+2VcsT\]pdnwx>;83 d2JFʳ)d ̵¸-vYt2]B2Yv bNPs}mG"pUʸk[ ޝ-&ADZ;vbP,ՆY4{- &ykdDF?auڱTy9`,>$=dG.ZP< w(/^ Ο?r8_w) lcs?h3@f 3 ؉vu>ɜf|N+@ ;]pNхѿ 7a7{p%NpGfB~CWkFy{ʖ%~6{DaZշ56H_$O:Цe;QvP G?^Rub9|#Ɔo/۷Qڸqs "0jOBȒ d2@f 23D3.Gګ"d .xZeli{-̱ւݖޮF0у%u VcxBN d|)ָgwGEQki4ǝu(ںC,pdDx@M&xio. DZIv!C`/&]?P>V?轠7\zUe2@f 39ҮJ=Y#\Sʘ_Ja23O6It]޵d2 mx䈜-a峵Ux xIf3Kh=-o5Y*ʩpPT:9ʏUBH׺8/k{l {yx:0[X3qyg}wE䀆;4%(3 d2.YϞz=:{#Xq{v.kok[McvMn w렏Ÿf GW{:Ćڮ #GSjC1` b~cX~ܐ|[mคe^JS8P6h?[>pl&oW9',#V(ozEe=xr9cM,@f 3 dfs欲 {jfOяbZ6uω*(͸NPіhQx mr+ GXq:düO |C-|h dw$ua`ܤy?Bj>b-5v)(>\og%E:X~=~#xRO+k1K<}Py]'a:n?o۬LޖOqSb`|Af]~P׮g,X4{~pFƁ%셩AeSʦE[(]n`JCZ6A. :9sلLܿ|ѡ|@`/2|F=r #^ڗ#8pe|\w`U;K_aU3@f 3 f?[=V{lA +,Y? h3WM"8-ֵ@"pwFXA`o 6jH'{'1hԐVWˆ7L諁K+䓧dWXk#d; IBؾcW >I&#^`Mc8"`.O6=paS2ȭ4{a+KhC>??h_^}73_?.=6 d2@f 3 9 p"fގaƬya9Kxڜy"ހ.,<+ =& z[}RGb+fUJm^=3W#>Z4?;1Dhh˽4>ڍGɥUwєUO#@QssG ^@?`k'>2=웤?u.v=y>X7! ~vL24Y)}y@ɠd1Ɓu?V IKn9;0<yq-cB[иy oڹpV'tl#o?_W.fd2@f 303x֏=5,ٛUjB.0J}~񍍁gπ:x/:Il,H*9pԻ+#=.? ?6 W*gpGmt"eP|&U!$PA8֡iĐr$tCPOYp8Uӟx+{lc>Un>Ғc d2@f 3 &3f(4sLb%ft!#68?*(KTA65N6֖l: 0?šNÞsx qz1;ڞ2 eV' ~9glTk4cN.s.T{R~2vxb0C/W+[c5Y[&_dk vkO)׀ AD  _0Zsvg2jkX0cBEdRt HI: V"Ċ_:֘`d іpf2Fuǯ`dw(+V,눑?ϗo^1 d2@f`vd /2;Qt'f}2"gSb}. Z[Iih(t'hArߢ茰̇w+fooj[+ @! IH^qx7* O+ f8#(qֆ8 'ػ]gY۷?GQGxo+̲ p3@f 3 tYX%yf)5|/fPQsZa}*m 9;'9sLZe4w%S6LDmpp>: NlX裦y1A:tID1+ձ֮ra-Thf,{G SƄFX} QTOp'څQ/eր/F ,`4䓸< 9^'5x;׀Y}oOG!B)-`内v`*"wk(CɚM d-}ё~qe畿'K_r}@f 3 df~Ff03Pb[ lCQf%302{|\_﵍ŴX ȼ*cmCn DB'VdwQo'R}+w^t2Zh "mt LjX-?Zܲw'8pW> Q57{NEo(??nkdAW>zUyӛ/*?HyXzmy_:i) d2@f 30S30rM/m>ɨ]_C .$\kOmV I0Jo=yF8lPMܭڞ/d M0 \ qoB-"67.8VqMh[&ʪcT8s65ۯMlZl5@7̫XtG}D>.Кe/]8| x8_N~ ]pEfxPo3NA1hJ5U͆ٸ3Q6 BYin`//ih!ˆ0Lt إȽ >?3boOF4Oy|9r|4rټyוkS-['=eO`o,^wO^pٳ{Ok_{qg2@f 303`Ome5G(vxY;Y2/[v}6p.Uk Y@;m4}"Xga,tB7?Ʒ.{l:N L F`ؠ߆vpWJnHnMfZz^ ;sc,0(~{۩Gw><;߹qǽ/`yuelll̳N+}΂a_W_~Ν|S|״f2@f 3W#}نOGc|tmI5/MPc=7OhnrJܰ {9G`N|W|Iah<{dž_좆T) d2@f 30;20qUƊq2f(ѷ&4^? ͶiX Mb0>gq0u\=w ]آD,>4}}[Ћ'E Qq2nv19koc ]SOKg:1/92V EqG> ̸ |/j:+jmx?xkxxQmV5gg ^2h!2%+WF oj:A ? L`kF. : ^7xM F7yO/:,x` xZry"xG\ S娣-k˧57^X> I d2L@^0^3f|q{"302p߆mF-± -AorG:/z҅SH#xNxgg{ABn 1vmƸdtZl_0F=Qf nh O'V8t ؏9圕Ar=˵߼>7nkTN<|C+SzF푋?ϗ ̟?J9lg2@f 3=UXcNmimi0>pR&8 *oM@P~ ,PqG~`Xk\ nԐ?.W1{?NR'}V9rD29ppoo nbʼn.8X;|X`sv]fh՜T<?Cm؄!0\ 篾lJL6= GHD<a#A?a+&_$F/1MgȂ) tq-Vy|^(Ayk;`ĘnF}S׵C=o^Z2r9; ,X"o?|lGcz'94.;fsdeϨ4q~'!b1W>#B(FC&&񊡳sewPv6>d`t=e}E鏗 W\62@f 3 ̞ ԫ&wې1JCm6Ŝ5 _rCoqbXsszckQ@1hch!4O`#fHF8AM Ca:G2T۷Z^Ek_~9Ӻm;ʟ|W.*U@f 3 df\FʌU(3pXh]h]\h!K~akXcX÷^oTşl 6+Y~;1NۆMFtKQL;ߘ0)=lB*jRѓD6<at {=^r-l,Ѷ:U ] \E WY[G(ӹu]Qp 1`Q6n8֔w%9AS.0~`,;qkaМNظ]MWXK3FzX?CuRPi8ǂ!pa\)ka8iSyeW,[/ZmO߰c/㦍[e玝(x2VϟW^ꗖcYeg\u "v-'3 d2)yӔȤ9t3xΜrkF~f5[v7n*_ |x%@zm`_}v!"'A^h[GNOl3m3Cd^V-m^q@^Ey!m  i5gq;ɿe.);n=L Cܹ|/xYez<xa9VvSzp=>rSy+^pd: d2@f 34f`p oVrnkMW!FȫDr̀wptX * +K lByzJ!F{4M>|gftUa fǚ\(\sx gwp̖E$ ? jv zvBA>$eMnlCxaL&\9c natC*vq@o YYz\C|8J*ƠXd8j To|{/h6R3,^hhNuNlP)p Q:nX|u2h%]tIpח7R^ʞ([8⦈n\gZ𪼃<ʫ_rŊwUu-׿\9̘2@f 3pe /8Ծimy"/xړmp7xl`aΓuŻ>ÃzSD[ y 6cO X@lڄ&w {xb88'M̓DUuh+Qx;n%̹moC<~RcbEExq|̮鸞beׅc LxƗJ**[? (R M'۾m *q>UnpѩrtGَ7~կ~YYbAi d2SMN4{{Li83ݳ9Ǒg4)(\b&EZ-!,!ri#/&vbn]z뱃'&ئHb!$3Cφ| 3;A;AAfBl {hrnӉoooeLE;Ö.)8h2v_`Dv˦M[7<`C ڹ.~bS('3 d2m}ō0f(h5f3@Nh_1{ƿ ;I}196E<{8i*[-??j%- >D,u㨷AcU'B} !Gr? ZUdM/ïJã/)Ae6 ^pm1I(Syi֪C%dV6؝붔/ :ksc_N] 7IwDYaϟE-.8c\:zta.&ˆʎ9vw㣣]|= .:˖-]߽EgeH@f 3 < O5i?2'7̨/5 ܸfc+ wc@mb.>5]b i"HCW,ؘ([uwF\ڴBpm6`Rq .sm 0^'nЩz)wxS~yxlJ;~e}p}ŊpbKY/~{V{ Uk<|f|U-Zx@e2@f 3 <㗚M|V`Bkz5t:A;NRc}L'Q! b?. 9؁k'!ֆpM䡓~v2q2PG& z{rZcWqܮXð| :6sYM}4% P+>q 1],]}נaZ|81KMWcS[Lm;K ?ʞq]yϡFXϚ 5>M3p }7m?Oql lY5]յB1kZ] гV PRF00bXhwqr͛˦_{?jEeuڇk-|/+QN?` 6o|][n'tO@ ΧN2@f 3x&3Y>/ח{<\{ |W WFA7vdܕizNOM -^`;ҹPa#ۇ0G !{Q{|70[.ckJGs;YjGzES;N/V_].хGt9g>i4 d2@f 3x30i&>Wqd̜3gΞZCĵ7!ΣNiQY-ixW~ 0/W#E8H.. ΐP4Ӈ %O^Nc)t 0]Ty X pߊOsKNţ8|qUA}ȝb|LPAr kWH'ܷnYGC~>\:vF&qZח  [w;;Gv>Vyٵg5' zm|{5>> BYN-s,Z.xź?S:ZwO|z%7n -]vut<O#x^͏~=El@f 3 dd /dJlxW$g2R{U{m E_ӬMNq5l`v-'r) IݽM=~hmiM :Q WV.c 8lz׀sxO Oz.^zUWY6n#?#o,?3̷d d2@f 3820aOŦbuLh'ɻѱ9ybO)N6^C.'=&..?Cx*&WψTmP,sqЪ\TaᓃAB^nv@c'pacC-8̇o-ϭ2幭88:b^Tyzb@(NG7sKL]}X:ޜBN>մ:+:dcvrֲ[ʈ=_,Ś.~cZx9,eݺמT^~?S?J@f 3 d`)Jl^px6><2pڍOk׎C]wz_y y!&k&{˞-7+H05ζgtI|ױ:Ƈ 4>T 5Qw7i6qsY+!ߓ=&[ߺ\oo|ㆲs'lj' qO# 3@f 3 d _aSG|w{zSG]€H8b\M{7O_a6WG|MַРS㦰?u4űOd>nP;{{bCÊ tE=l`\f^@c]).=Hga!2]'~\{)G:x=>{20K.=Yh?|gm?i4 d2@f 3p  $KbOxȞ%3030nn([womUw,1EzwABm+v!Ѳfl[IH'6p?k ]ˆ>E@Qp1-ڈgnU.*ʃ8וK.F .xexO'fADcN|f 3 d2YI{%6T93Q3Ɯsw̴Q~4kPns^F`:)o p8_t ې-ڨ}~v5fS+Ǻ.z *Vt:ʧC!휻ޝaһPw1URe}?^èws\8+JU8ޓjI[<ʾxǚrMX6 lIغ* #S5(cϓN~).<??QFVm!fe^mՠ΁V ÞyQU5! Aa})r*7e0 ضmǓ%/9|'eF@f 3 d߲?P31 l2nߩlLAydw߶ܷq+um*] >H h-l.R!Md0H|!Fz'H.!#r'mv (T{r{)cSB۱w~lyLsӞsby/,oהG^6ldE^o^sSyyg˗LCX2@f 3 ̵ x@L`b͘czPn7 8e+hKc3{gVQzigCF肹%ȯl:7 v*:ޗh:Uq@gVb[^7(-.lzC߇ dbդ4z~/4>hϳG߾NkŽ$q?GGMFjl)!H;]GJ8b2lWdԙpkG;F`'cEGфEjE?1 aLaOMǫr Ǭ*^wzIe=e͚Go=xE9USO80De2@f 3x'(G *c>Cu'E 1Iw$r``==|k<cop5!WhŧQeӦ-+_S; ׾/,] %83 d22+; X؂^`,cv}pqo=, :=tZMMF'fcg6  {q2i|Ȭm L8ڻ95lH%զ+ Nzd\o:yUy'>n_WHy *+?S:j d2@f 3f_jn:m6:Y2z(lצV:864>'P}߸_v7h~ 5qgrĀ0xH8] ÚO~& 8ok}cr)d< ҡ%w V_2h-~$<46 zWg? cELuE;j[_Um]mZځX.rCvum㻰GLZm4>|ȶ?*;ʽ(G 5 8֟`6CUU^ GA`CmX4kυ!YV^õ\U֭{p=Uyy˞@e2@f 3_Pg}Fm1>[)0yמr= \j{͞ XpmY>zoeZDZ;Eg{@Āb8P͗7 g==R莇* ;,o_= wqGW]bǖwGYѬ]A\uri`*3@f 3 Ryc }6 sۛb%'oCNѫ }F<[iC>s5TyX yȺԛ"/Q @b,x\~p; $G$Pjkg9t@D=E41N;0tɂ۽Eg&_Cʘ\crW7\?wޠ) d2@f 3p@'P(eX8dF23+m^/j+oMv.WMAN`XnO+rՃ5m&6$M>[!G\5PbՌ8dlqi*ľG!Q9G,->>"<E~iЇW. 3Q d2@f 3Lg`: LhZ^C9}VjBq7_̌I%=t(z~(Hi6ovi gO8_rƙNG@f 3 d2|)AR"dNbaI ̠ ,[0ܰfCټsXGƀu= 8z8@@c; ]?՞&AKh|dsdޮW/o`2`H6`d2k3O('tXdQyW~__.2ou!.\rǗ?zX@f 3 d2g+#O\& 31mG6s& 5}Q q#ઞw (~eS{4M(W@pa{ 60`h#up]0Cg5l'?^G~&f!>@hAsOLWZQ^vy|,s{JٽgOYс;!13ϱd2@f 3pHg`d>C'8$؇gcwՄX}MI2')_A[6衄 2Q퍳RkO5= c /jB0_06鸝+(a2$<bDg:&!jW{ ]/47$4E6 `ق7 %(\@` 3.`V,AKRI0<~1<ں'2k %e~l?ɵ00kLP\#?v}_kq|X4 g`ks[C/e<0kweٲC3+!e2@f@2ȃ);u9bX2O%_,Xڢ (}V],QFdٱe`Agl%{?.GOƄpj7mǷ7*Xpf?R@f=]N/|v2p‰ďc@f 3 d2j㗙$ LICla9CoWj>"jmB 2n׿N@Y l VXJ`Gx?dHЇX!oCC]#l4&V?c;դti_?(b=?xu szlq|U˚M;ԱG¢ǥ_]"Zeeb{O%ױf|r<6֝JZdJUQb5I#o1Z6bk< 0>^yK_\sMe[/ MQ^03@f 3 /y24hgHEY23-rZكy]kWZ;6څce.aa Gq+~vlNd  !H&1wPDb72ފh=-~bi^g {J^WB._owX-?_^3@f 3 |25^ᄹN1F2E ^D\ئVc5&YtM29|_azhCmj 5j?} cLQ[ɘQmŇsvf1b30R`?D<6?~C|S1øZ>3-eCEhrӚwՑ[[XcamGwe,n\^m6;/elDy ʶ9Ū-vɭ&,b<䎑Z8R`$h,u&O*? Oem/}{j2G~@f 3 d2@`_I]f`/Xh{C-4{83pe`n 6mnC,}#ʑmXydӞ8teؤ]PRyԓe| lO#Nȫ'Ӿt| G.nS#&7!|]p~?|iy)oǛ9^&mx|=y'ǟ d2@f{<{Mb'L#:ѵ7Y8ɏi3K`\)lb.CAmo'dࣖ]x 8A=zaz\#>c͢ZOT[$jYS`Ї+TՔUsJ8 H(0snr&I빣VѷVvk׎vu¤b'/_:,r%ǔw/, va7k,g=%o]n:`Ӥ.|5n 84ȤRco*;qs 1>[[ZCmcme#0 *n>j_,5dٳuLT{bMY@ ](vG. ԱO[*dQ%8J֦.*Vq_qF9keʒ d2@f 3pg`N{6l+>7fD!6AF=a :"Rp=H>Q7a/9qiP竖. Dmq4>ۈErjNwW>bS@@TCѷ1Xb65{8A67y2؈ٴ~37>\(v]OQBf50*]LKEF 5;>q몲ۋ<< Ćlċz^0鶐=WXSA&4ޛi20F߬g[W -]w !:3 d2@f)f^)2yf`eG\Ͳpgl-/>:I>Zs)oܺβf˘N4Tl+s_'e_6P3. nx`@GQbe^R~=`J$2 I~6\Ud5ى-ʓJi3@f 3 d hYx_/lz.0>z Y%}!,|ɼAR@| hq - u nⰊbژ/(<:F,}xΤ{:YL}0̀ύc\ ^9XQ: !__1.Xœe `ii؀exQ__V)e^`Qѹũzk̃jok\@O8\ra)CĦw`I:ЅOo:H d2@f 3d`’$YQ[`<0>Vl&#=c2p-~znĂ8F_q{\F2% @X<]) 揖 ×/s)&!Lưb u $.;iP{hg :>PD*'q<Ůia`S  tJnxD> d2@f 38206n5O1FN}>*ք .4ہcҝ$ج48"E0 v'POyjlua*OFX}]+'yT'Ԯ iQ}0-6֦ 1DNY,uv~j+eՍ+҇ѕG\xq_]$.X<^&e=ea#- # ixvUbQM_f8 >#Nbzg†v%RSf{0Ͳ<\աtOifF!! K $?6HƬ5ؘk/-^k$V(49H&uU*ufCuws|[#; p-D&' jmx7#As"$@"$I@ 77<)IF-l3 +NdQ ;4U1.@ړ}E~D%Kqz[:(pe &% IQEP5RaJʩYe}Se'‘||PT_34hox||>{'w/aQ`?UD HD H{ԝσtr\cqXˇ^i8Ηx.hZG /+:ӓlC>ȋWJ1hM[?ia8dH"1:)Lp Z /3Yf3+a/a5p #2@)uZcQT]Yl#l\?@Kqz>h)Sa<[7؝;*PDu-9$F)M.ɶu;.cW8?h#f c+4el1h%,}W#7Er- @9=h5{'ǟm3?ues$@"$@n8q4 0Bl؁7dKMIo,0̷vOCY@ՊmaJQ$0!Y6J(j ZwF=>HR=`EZn}5W2Ч"ʹĦb,LI)*Pq?CK(ZhGRg?0߾즛Qz'l=cxIHD HD H^(+'6܀[ wMnk/ SkTbC.·m潹/\"eLD<80Q#DszȇZ~̰d.rsXwo}A&/-| /5#s;>ns7M_}cxb=pcѿ$@"$@n8n"߶۠Mtvjv|nLU oP0@raݐmq,Z$}2Sluŋ!+2@=4$P1n@i(_P]E@(%5=W.0UE !: i!Rwg/g>ۻ@#ƕ 79}IMD HD HN1MX/qWJ@IDATf\ }/"k=}ʄ,&Iw}uM޼WlRd$+tu44bRe|BEƤPѣ~pϊ}+H Amj,k; v5LIuWBA"bAFfDy6]@C$1`NbѡsHh`K4yӰ5u76 x\Cө8Ĝ-㣶fgl6q:Z~m$Q%<ݲhk} cOwq'nQ1Ro~R潑ΐ2MjTĖ1;!^?鎮KaL\km |1fG#NI|ϥ{޼'or/|V7tց;w/?l $@"$@"p.!Υs9l@Ms C`g}w%I,C/DC"46,_ܠfaE7׎6x+wxWr}cAᵲ7aú$@"$@"(}kqYniqTY{r /dLbi=u\;n҃{{}{H=qEܡK=Úc\c%Rͱ+AG1v8<#v+JLp16JPY^Ωsݗ5|x.O C/g^1Y)kUᖘ-^ca`uv q &Ielޖ/1L`]}bUFfVIhLA7.T8ް]HLDh\o鏢lzty幰\h5t {)_}|i5>"2@"$@"s2$VH`rlY˾9?YZ)M#OW>yEmg%$PJ!3X* R`L/^U ݾ[*%TNm*d*|JKYq@vN@3U%6Ҩ˷{v}_ i?/ӟ/Rjۜ7G2cp(/™wP"S`{BlΩr9(PbXXwZq~CbӰ [_G,$}QB1 eQ(:.4#bl.>o߷v<ɡ1&cMZW5/Q݁t?ފk4Dxwwśj78/,ʣ G$؜אgQ81oQOI4%Ksa_}&F_vv㗹 vnmhNx*&@"$@" ȻUxQ2y~`Iv]?v=׿݇ğ߼qFD|2TT(CVKxoB~ :Qh(Q$-oUEγ R\=4sie)ha9"7(5ćt@'K@m8Bc`%]^b RkX6K8Z>%? ua ' 5sE-ﷅ{0 ATKl2ۻZH;mYA'4⪜msʱi$kuw3Nڋ~ePALez_C.ey`_r.S\Q>D#63㗾*}8gff ~oqym}?, 2-$@"$Y@n8K.Ty!y`0ۓξwEO~PS$@"$@"@VNތU/~zets¤zȼ{k dtt }פIp/;hu}}9m[_sTB̙j=y8V“ĉS-ցw mͅaɔ%C~X3 t\Ԓja#Vh,QZa#q}Zrcd߉ViF9EKO ߭]}־: t+,۶ vEr.0ZphC)XdmR;Dؑs70E%X""O W!OÐ bF/ɇmW/5wHs=%tØ]i#}_ ~}sRD HD Hqr9~szgIlDrVwfI@^;^ k[/uWهtTNeMq~)Iz)O>* 4U]/EQFdQ -Q$\,CR_2N ZFQJEE :Tȁ>{wWﲿ=Jm6>HÉ@"$@"$;f߅M_Gu{cH:p_wSy*(`Ge&ݽt]*.{aC,\gȄ-ת/j*{ kQ#ףlX [hc# O=̱ECυ_ģ֧ 6k¿$p87x&{vwS~r)9$@"$*@VnĒ!$4|ێvAliF̮9{lm-oE1dYvd]łe2͡0Z+Kx=hF R*HbAaKNń8k _"l>QF°/`6jC *cT(Wi}OI:MBB0[]t_wO$@"$@"p^"G_M6tf5A;iO]Axɹrq?.~.YvlU_f)OC,cakvwz=Sd((ZhG\=*vn/HD Hӊ@n8pYsSܼn5cm|^u^5/2;2htcO#6TKh8vlicl @yhf%zܽxOw>HDa$l7I K|Sa*~*!ڧ-ڇ3{+_rHω@"$@"$KOv̹&ip1]tx']-ޚl^_t;:vѓMkgYǑ=ߌkDS`[-VqctdĹSÆ)*&3>~;˒xVOmcҼg./N؛nG y)/ɮ_hL쑃~ޱc|S}V9*_DIk:u 3kl'&T+{d/ MZؓʬT zaiOpp{lx}ᅡ%iuyz~R{;~b%VD HD 8gO32'r6 肁GZ%:b池Gmj& i7͆6uCvĸ=15#Ѫ(Уȿn4e"FgN> _xO/&-{!r}Qz}_B%W,*HQ *)h+ :eKڤDÓ#c-,/[}_ :d@"$@"7]䍸IX䂩Vc;ޘhg=N28;|\IzuNqI?d]ԫwn%x=hG%Hש:T8ٕx1Qp`MY0JdM{nUYr#r,H S.ϮntSjjbqnl_n;AXҵȓ6}ͽ+F hD?K0WVaJ}ʐXY]iul/sVl _Iغ 2{ŀ`<3x 'Bтqh\|Zgd?wp3P:OD HD Ni2x68~]iW_{6' ʞ9] F ?} 4/gDaR3#a׭kGGHcL|?}B8gGfQLům(75Me;)!~xT DžkGZIR0 nt?7VL跟xڷf}wccO<>/vH$9HD Hp_ÜYH7AdH&;L%xsx{4Ak?чlWnZgcXQr}7jϝlxo 4 E_ pƠr"m[P͢,VP(qj*}EN>V!o9hw-ޅS?KW_nW]u]yeo]WMO_ewj-ڗx{t$@"$@"6.p~7l.}-2WXt?,>>_MޗPȜm79aqxAn=ÿ?s)5ɡ/K;'Q37ҸxR%Wt8է:<[eDQ_E\\oݖ:UnsKG<+QWd3l뛅?cNH%:~D7sZ]9a}+y*ɬpW\ Eѩzԇp I73NGMSa$j>9Ş AHt1M\)A_P(vO='oSMv5W( ^|mݺ w~x>0IKD HsRqx)'0?]S Yt}vnj}v7b[FXb& ;vtCJWYz y)zya)hC[,DP_&*J_pQB+Ž-*QIU 奃~O֌c^l/yɋk_dWaO8|o CE>tZfKaZ'l9 S Y)9oGWp9VC\_g,IŦgHEwYZzuB_qؠd36e'[!bkfX/鎞u-m¸ha_c?%;4hw<}X\y[z^c[!{rh- !+S #]TBMAy%[|ɂLLϷ"W8?Ǟ*JPQiT`DX[=666WTK^"۴y_Ć=Ǖ跿o+D HD Hfr|2sY$jy#ͷ'}O?\L/݈'wK.p+rZ/+s𘞏g65uj??noD HD Hfr|2s&V3Gfs+!:yp>'w]Q}֪q /E2G*ORWG>}\k8.?@"$@"$@`ii O#~ɖXp8ny lƀFwL)1s0q#'+QsCa3ޱI)ϳdW荅z*:s:O#X+u):wR-hcȐ<:;'!iq^ ߴh|@6 -qXW6\F1/3SsAVf8|! mveޡcrZU\µfEc=\Xa PW΅WaKG^YaO]ҫ|<቉uˤN|xدg|rs]+$@"$YY?@"p#q-_sngņL9|S[g!n?g7윱sxڠј_֞9 zJ\ (Y\g[/vȢlVE `>Gcс.]Q0SL) bLᒇY̡JFY4lypTx)EewC~mpM`Ed$@"$@"p&뛄I|8.y~ܯwxۮ;r{s96uϻuyhΦHPs eҽ zzn/3{Z#9d̴qqy5tqCe?ʇ \n*\I.t`4Tp4m:Wi M)~!JKCH#R2mS6 ,AK\'~Kc]kaѿ|I X!.䩭 D!7,"[7cSGO-*oPTh>D==-N\/ER`SN/2N-z:-tQ,$>; ,;N[jy(Hcy?6G.W Z+_y?z}Ps'vg۷o HD HsSwu.sHVLrk?O6#ƀ.Ihgɾ{n=gGZ C}x^?f##'볛vzU),S$;k^l[ /P E3UHhU (Hgq6j}0^ 6JaC bGPLaҙBTH]:4m'Qtl6pv{ݷ\kcp``~?g{mzا2i۱c]t= HD HD X=â&  ~`cW6^Z"(EǸ'٫jhHx{5òsB/ux\-mչKEZϝ1HX:݀X/ `T|ySy;2jL}-v)R]&d?q[{KQv;XoYg-ާCK\m>?p>蹜zm'sFedKpַ~ lnx}wۥ+ww#vM+oo +D HD 8O0'8u琔-sxC7.'Rg{l,GCd=xwvu^:)bq >fC~Y?Xb XEڅl(R JP:{)COC)G<#FP#:k~^ߺ߷`WضɝÝwg+g]ɏڻ.'8HD HD 8 3ׇĺ9AhZ3^dž_[4a" Z)4׹~?EևQLP]A_*t< ihK3?loڄmȅw8O2{\ yoڞ=~ >Г$@"$َ@n8ۯ`Ɵ|̵cᕛڱ_&uxf߾oLil@O_Csw?q*&:KXرy[jld "P:*TA/Q( j0Ӕ[84rM_Ĉ^F_6i ro:v'6X ѐg`믡3?f';&1$$@"$@"$g'mle/c]:_EPݱkju?$k2M[5iq4 |L._8$-^{ww_i!nV{PEq < Īsm}26Zvt{rY rԪeIT4Di,ԻC @_B?UF-c1_bpm.r3oz b'($ }[voo`rPb$4k" !@&G`/l$(XӑTz/FWʅzݧ6a7_Ûo}3Y_g׆Jz۟߳[7s$@"$Y:Վ Ұ~Fy-$w옟XDB?@Gsx}PhKk+'3޿hØmTHX†.>YЇs=>k8h9U/Y`FA%T7 7E=~>"E:0x&uuCUE*-+>?|{v]DOm۶=$@"$@"$g'6`xśеGQIeyW^y/SJ S?%EHrVHSHxQGPOw:̈ #ݣR.̑8h (IRZgG"ǥʖ('Ȃt<9R jߝX4W&eU(o¯h_GvK]9i_ο=$@"$@"p#+'&n#H/{T t0~EF PcBz|A=S-Σýd`}a{-IzR{zFrƃ4dV .=ϿJ#R$X0* hOu̢S%-: :SK?i|/|,ӇӰGy5°kF5N6gϸg}8G-=$@"$@"$94_ְ !E;qg }z!O.e#X$`., G?{v}?G8zԩ %M)fbnn.kC՛iNqF‚;)% dA)xGgFȎI[3cul <{Ȋ ʓG0: Q} ";~ |H>Yjc{jVs^:d_þ@ oC<{j{/4y] [Lģhulgpo׿1mD#@"$@"p Ε+HNNhQ[XoH)@g$q_%^[ gmނ_-xM ŏ_L pќ~1.ܽM{6Zmmd)=Ry^Rrt놅7}:{%O"I AY\7PF :Rsr-=QF$=2n~<( pqx;4&kg\b=y̏6}~4MϺAlbܞ'%5o>|"V4YI"8J,VF$B\؅A */9]2*f"A{0&}vpzcNJ;?G{v[rb!۸qCEN"$@"$g;l"pweHY8q#*%|U|1ikXr/Y{.9L=s]36@T6>o:oďij`e1IΤȹpO> :cY8m{œv l,V,b!1EƏآH₤ aS3%XX)'/U]/B0K][dm]0:.ע[@da{W=J?/1~_UxܦĺD HD HD p =27}zA*EHZ-YsYbd `zC?l)@JHCY. /LkW]u}??bW ID Hs_:fsHsHBy*c 3<-bg섘v)_3k O_v޴}̮Xkl9%?ZxaA{^٭ssg#mv9WoV/gC&(8PQqBn O;URT@/}$}$nԧjXEԧX 4D=~;\vncOob5+| /Ҟs7nzu/ɇUp$@"$@"$C ]Gº遝{6#Wjgyr:l VQ0xPit[<7Tk-Wi{/kJNvlÚaxØ;7a#!76*x0}rJ}Џ cw?&l<TbЗ˓M]2!sbbMFUد˟A܄7eKD HD\A 7+W2$KO-TI~ދدbqٸm9wvȠv#>ُE\&ŏ2>*NsY,(<єP P!}r&'f+>`Cc"?7"<ҲKªo?oW%[D HD HD!po;S>ދ )"t;c\Zkc׺6G5#Ԕ`si(F7~F1%yz?aYkmviΉ4^n/.9To-vU :riЊzu}_6f|bAQ4:Ͱ=8gST-T5/~=?nfƕ$@"$F{yC @"?5\ #̐D HD HDL"4hlvbx6=œ@Lȣ<Ӫ-=Ѣ3mwR-\Pԓq\ N 91` m=lsRrHQ3 }qoM\Xc\[xr}P#Zit ݠIЙE,۶U~gO=Vy^"$@"$ܱJD 88W}|׌6A!{Q~rJ{|7~^sFCsZtYG(;.~D|7eKEmS#),ID"w¾>RPxAfEq9mpa E]8ZM&zD HD HD#wmۮpt']BU3/ `I<*pxϫJBZ\lR"Ac:P4Qctv؇Lj ]u rB;PNH:taQP&h.[``uy幰ԏCA ScјhO/ٶHE\$Xp.gfp=uh6gSs"$@"$9D 8U_ǰ]6ۉG`mmMF]gjNID HD Hv 3mۍ !c--jߓ$&FEbnj 渑VpcЭP)"QSaxM9c,9˰?@)3PϽoD/ C>b3q`hq</ܽrҕ(Y* Ke(P vצ)uT$hDt /pޱLc؁hKJUioip>u}B1֮S;l펭 QL$ω@"$@"$@"2H"ʆ\gHki[wO+nω8qs2 r'i̗dSR2nD*VΤP ${/92=m(2=bLz_qX-q{Eg\%X墲o.JEH_AJY <^YS BD qݰѿcMvD HD HNADl@W;f춃 yl߾}먽Q[7$mbXՃɱa{g-b/\Eby#caދ R]:H/PX/'ƢRI/1iHT]e|ms<¶ gc݇TBD HD HD HV#شpng ZgoaI Řǩ!X `' VtDbL%uuEt-KA[-;$H.E^״I> )Ir{;_vݱ`sѻ4,EЕc 5p hr )WƠ(չ0U:͓D5 BDbSd4>cl B"@"$@"^r;%*E$1kmrxްm^eJ6[]@xK/&EX),xA=SYxd\6ž|Uq_\&dIֽ볻xUa,L{pcqH3SPeKD HD HՅVtǦlp=ܲHji0*䱅,[cv69}׋/ lk!2o!_ 9h,SW˙9)[R.l΅8}o# d=DMz.ﹰ.ࡖ-zH5_/Uq^D HD HN=cD`">>W?5EkWl|y6Xm?^'/-Ej(xx!J!^R?o!rrЪ "eU|\+Yg8<.QWYYCf 72ia;gwǮiы)fKD HD HD!pxqvN/!+aΝykcqs!T&:˚et-l i(JB6lè|1j U|9K#L5VЅUb B+aS?tw>ѓ W*:[tZγT$c /i0".?bSD۰n8pĮ߼.D$@"$@"p t$ dȟ3gsͷ{mQjP|$d|w\RBT@gł8x,q [ oԯ,_TKq{(Q%!O?Kq~Ç]oX7O:?вk6 syND HD HD#0ͅ%;zs^suZJ<* (tb[Td_6>2_成OVzF9 0SxxqPYnKh״!-PF ^ ӉG먮3 2~ngrD HD HNϾs"I?@"bvov'6_lm+6مc=Ϸ3 Be?|Ew)BOBFW5 z(DxAe(YTݡf UfqDqݪ>ڿ)oCx=W4?}*x$@"$@"$ؔP̢Xa+Km<_52!v!a|_cQaSI2TZ21m'F?Zt䳄X|zMg|+h7=\ax=sPr>;s2cc_k}WQH:&\/9/5W"xy.\Vf4+KSxeGs|^~i$@"$@"p t$g]&h#}mGFmr,ouކ'4!+/{hWU\` d qj,/$@"$@"$EY~.O6)3e/ӜЂuϘj;"*^%wRFbů#0sDѢ.E#h BZ@VmPF:1] Qtz6Qwhscsab@ܯT`+na;-[[#֌Ȳ*0gٞ` <$@"$@"2r)6 'Fɹ}OuH/Y*Z'_n먍a@#?ī? lQ@*ثXO)pHWD"0AnI̒Jү|GkuX?얢Ǿ6M1Ga飶edžC"$@"$@"$.=ɬ85Sʓ>=ZFTA<^GM] ZX*rͱe}8tC"j=]>H2_*YY,zE#|쀶kWo๷7hmy)EJ ̅Iu4e<=U[rabNQ0\Xع^軬2Mu.|?RVFl@"$@"$p@N@"pzx}h"؍˦͏Oog5X:ls[.'퉩IC)6XJ!*74!UZl/lH?.rXp_{h~Ł|WTqv:ڲV;\ukm\ԩˤ-oEQ(["$@"$@"$'lܢ?2dNc1 Gl9 ~V͡wS%)0IU/yXhZ,ST\^A AܠM—ŀD o "ߔGQ]w=Ґ,T2'#ꜹΣ) X墑}0 $=◤\1>g@?ظsE``\87,$@"$i@ 7E"njC&,_7ez [/?$(^X*rB5ܖIt)^pLB]Rxm( fx`aC*Y/v7x'Fvny078'$Y '֎@]͵ilݵ繫DD HD H@nx~xt"2:Bp`>̬==ߝc|za{Qt<\6& - Xk/?Q$Q[Zn@Q"@i@ (XpE1"QTaM(Hp,ۥ!IU\E47;߶EYעVD^pƓ8]~CD HD HD Hm3۵3C]E,(k/ U^XmWȝ3aIN9+4PWeTIFҘ_1.N]iY(.S ӵoyr/{8T3\ _z'rpF*&.l᪶HA=wuIǚfraY#pчI hs#+B g Î }*y:10["$@"$G WN=!HN ew{gz;«^WUGrg@Uŀ`ndо o)n`@髨ާ$#P,&&<(RĂ?uv)@-ʬ*L@VՌ`᱄NJO~P1ib\ThH[}uFD HD HD x,t𿰄A<=9 ߥy<:|+2gʵx'm 9 тu>QSjԗ˪[\_AtYU#0KB}lū:.$/߼L|%@UM݃cy*y~M"%΅{C |vk}ڧ7ݯ"r IHkkIp`>21y.,8HD,aԹ.ߓn?GH@ +rZXФ3{?'G*lkA1wfXOD HD 8SnN^<ѝ3vÞY*H4;kY>yAe k䴗m`Wm^g?Bz SD!Fqbɀ^!݋%e\U)Z,EXA Q Ma*enok 5礫8VlF%@"$@"$@"iϦ[xڿ,/YO*>Ƽ(VɑD:4Ց?)id<@q݈X+_ g+'{9JD HD 8iL9kG.‚WlIO O:o_ [ Q-2("uXpJTzH%k$bH䛅wߋ-}x1'J66bP_:7`O6l@"$@"$@" ,ue_;l և\/s4@Ʀe"zPP9PY-(Id4JʻH%@ mK*d 04ʢB! geűt*! = k// [g<-9'f1U,=b}Ȋ$&S*pH/r~e[J /]|u(G0y{̂ oX;dx cuV 3&n 6@"$@"Rr)7'"pE-55gfCںz0Gx?_x?^WV[U@SQ-J$6.v/KxQT>)@UtQR蠊kSO; `a=^@1ܿX = HD HD H G?o;x$ rE,OYO3ABkwn*-4h9X y $ '-x\ם}{z_Q-ox[ṀL!/A$L̒`fL&2$HXI-6eْ(2%}﷿sKݏ\޳U=so" ҅|Mp]e?(z3!3Q%_7ٛ<7#޹MRlup?HrL6εO=x#U( B( B( +9ςxcmlLiJ8Vʙ>氬t(-E@c1aqPF ("K-NQpIV d6zew=r,CqTW`cwk!'Uk{FƯmϭȽ^,L>s[\n;gXB( B(6 ZIB~(txem>0vMoΔ܎]43\'+a)'XpZF)/)&P2It2%63(8e26n8Rkh"pJ[7m` fe,]l (a }v"lhu( B( B(b.L_ZUVڥ:ycUJhn.yr\*lg0Kxsb4W%QUTM;Me0Hݐwbd6 ᶪ8!{[ #nB=a`K{/tca,l^vKLkwqw%P!|&w^kC:TwZdi3Lg%v@,,_vֹ y/ ߸Ա( B( B`SM p[+ /S+gۇmszz̛E`كx{nke|R%&32xsI %@ELcԷ{ئ&z֡a&-())*?Y-l$=>~^@!P@!P݈yI}be]^ɝֱ9l%:D8 P-8F7Q{0)>(W8dHv8&l$ ;qH{'X"~E*Pp4DXsI op #ZÆu^~,0p"8oIݵa* JGvky)ku|Uo ު8/=i ?Z@`@!P@!l&vL..vf'̵i laSm_;H D ޕ*ćS pƀ |*"Erc('\zOa,_D݋<;>D#JڠK*Ml秏] B( B( N6ޮj;S:F1@Dje"AʊF*jǨd\B I~"Nꎫgt _v3'6c(Yʉ̊|ɼhևr)_xP_vCC@D>t R(!X8K˒uſ!> LK}}LO?tonSx.`{jWەՕ.HTuxSD 7h/`@L )9 dRL 3a9ar$~&*(?}[4E =2cxߓL]L ױcLg+I ΂_<}ER@!P@!Pw:kc*'U5:7B,\I+fI8ئE1#|?- ` kўއ$5mR45Ft3seeC ؂} uY,Gvv`Pjs;fO`o#2(;HHokZSWW=bub`/rb!OxcHsr,YZڳ_~ Ǟ5.ኋ 7RWO^hOͨR@!P@!)Mo_Zi酶71(|x'ZS4k1%+OSнw#G"@y &+S@3NhDA|i܆@&>vs]$-% Օu|GrHIn_^MubH}yiթ }ۥRB( B( B38Xg_?SÓ+#F) ,p&)Cդ@()'mӶ"$I *f0f!b2?3|S%'eM# Km! hAԻsG# \Wolke7oSiKr rXچ/ ;feBSX@X"cW3Xca?m?'.XgO9.w4CeqS?=_`sGZ`HX@!Pf Pnj, !o1i[}pl(g6z(.vO]~\v~~#_~ ~ >FLt̨KHnP&,, H]j>N3SLPx 'd졵?iW-l㢔.Sw*cjR@!P@!P܁hSV_nkc0@Ȁ!uM"`B!0ADBl42**#_oӓz a hF("ArNC0ؔ_W$ 9]<YF oR79d@]= =uѮչXr9:n܂qF-~0L Y}C\xwمYRVbNFTrU B( B(6Z)BE`ܓgo`ˋm [2~?v\m۔CͻwgxgsCG?}|N&T`VEljLhHkh`(FjI(x' Nj3 uW;>@AR/o+yO?G{@!P@!Pm'O*2XkgJ{@A\A߾K2>BlDJ5$1CVk}T>knKu1Jz ZݹM~vȧ:y%hXK6|bAҵim wdO ,5-gzcb*HI-mRb?/zXI:ğmN*?9'2n" .^o#Q@!P@!p f/nV( E,Ʌ 8󾤭[9=>r`6]/cOƸmO~C20l '891yNf0o<0t1KɖHq[C]^ԖWS|O%ywJ{ eτn B( B( BvEcc7)l;i4K{O2H3zt(SਸD:ԷTRu #5%(gW (چ;GQ. aDār$JiiUx]F4.xhokۦ_Hlmg&޸nwW})H8|ڎyI'Rֱ7C`zܧ?DNG;wOm~ )lw}a}MNYK m%[B( B( @-xBC, ٥U~ ĞVc&>}m%l%PA#)Ȣ>ۙp :Q2㬜S8c"˦};tM ;dSmCp/I:~o;~N^@!P@!P?u뿊e|Hıj8a D,`G1C~ݺ֠5t6O:k푨&h,Gq9ɓAB>'.(=ҙF@!P@!P\jųw '_~BڽmS[BOL9&qn—?.iE[N0DSJP'8v&/MBf&,Z&>(-H<0&VD矤vr|eٹ1޹Z'm'b!hS/W( B( Bito/b&O(P bBWIYO x>%(g;b Ve$#"N2eJgp^ꆍE4 Gg+ԳѳAL>m@ T$Dٱ5ʯ>zg/ߑi yY#*KJ .o呷5as,Q\ {jrmGA}nt~)H(B/O^lWWV;mu@!P@!Py; WVgʊgqhKnzޤ񝘩V*O~-s(L<9م4@d52 2<KdIK1DBQRDԀ dig%2qO}@{NWWvhN} 9q-3}ks!P@!P@!P0v9)K #Rg:QUq0 nl;pTc% M3W͸#kѶ&C'Hn1Ȩ%EC {d $?+~ր휠grRz\sһk˦$#S!3KmxEmk<;z~`|]XX9Lz/(G:HeOuq5?.\\l+\YSxj_h;uȯ|=i΅@!P@!P7 Zp,3_]\i=v=s~-l؞QLw:mA†ojeϖ~b(9lL*0D{uʉKd v:zN[#)P")qICk~db7(Q>R_wU6$-> Hױ?|{wP B( B( BCJN++&ױSOF(FM*P`z, ~ǀZNT9ߙ7s&, 'i<ɋ oI6lCi0іaY`L:Cbw"tJka +Nq&ܣp6V:~~0w}^__uQ,Zv ,LKvry׎ٮN_3 !V팎*@!P@!P\n{}e(nk_>&:{ږɱ[ڏ3Qr!0,o޵G`J8^:3 ]"Al'..C}&N%.Dı\Ɨ u@Xs^ _:ƽ4@!P@!PljFwԢ?~uEK[ih,}E"{/|&2'"IR.D}e=ó,D6\_]]og/pSOM`~&=,L IC} Ɓ,Oui- zU/ B( B( w?'W1֎J ~I ]QIF$ DH NSO* Ql[b HQl@.A%`t-QgSRFCXr(@2+'6 Yʼwd3+F'~+U;/>|/=ne\Z䐟qrh .ϔ eL뭝9wukǴ#oCK߿qٮ!Jq&K}U( B( Bf!P ne ;~vfi5_kG'c =|wїp3JڗGPB@t '2A>Z|T"bіBپm9!asdٹm'dKZEfjcF؞?w=sKs!P@!P@!PpBr+%m8vvt E SzفnN+n[6u,?~ZH3+t>=(%\EF1 T"ys]A91OT”N Ӆ'{(AK6ڦug6bV;O6A-]~;_n[]#\ku%ڐSQ0ќJ KnP-Ov@&S޿z'P5uF#b֞gj@"YB( B(nXV k&/{䋫=;8޽v{'ڶAb'?y]b"1:HJ0)A9 V:4BH1tɏLV>:xvNx{$lAbg|! .D 짏 R B( B( 59ύW0ϖ)syTcώ ,JNRqj=W $($4 [ ,:v $a11?W\*DY,H^6OݠitP&h9 1>&Oj<=ݳD#8xZ'!,FzF R[2T>)FbaBY!dLhd' /C'K_ ',=Hc,}C;P;bۦG`Tv(0[ivavc,+r)<@IDATk=a|}-iX6oa.۴THvr=7eZ B( B(nZpBC+Ǯ[nxoO?0>휾= ߼snO=vT 2)'p 0ɠd Q} % p A| R۽ 'm&9o+C{v~q`J!P@!P@!Pl*_NukIO9p^3!."W*:3`1qi#b 1 J#'#X7eetWCe#YӮon+CWFtw|XzQUv<Z P@!PMAH!p"@3Kǯ%^[{fo;&ݺP={mxObE m[m_A?L0Y'yTʇթ(2i aR!yvJ֠ҁZK= wlh"CSjvۺ7§55RcgjC!P@!P@!ps|[oKxL#';$g]jl)HzE/e@&s  dh-.Џ!ϽՅ3>J܂pA@} )&.itWn޾_xPGLLF!ƌs1('{g91hI Inxԟl[p77iJtO.ɔ M}1!+g vӛ P@!P;J5^ B͗)ѼZ;u}-sn#''\7`To{̴3WdEG9Ng"n;e Ih1[v9 pvjHA㝎UУ_>>GXB B( B( BF2װZ(+pq9hqꙅtlʫqǴFnSzH\0ƈ؁:IzNuL 4OvH]]D^mhaTO1 emr@w}T,'N%Fuv1ToZڬ*LΚ~bgwۿ|1*pꮇ7~->5+ycƊ }ԗlN}~ |Yvnbx'6LR,LxБ=*ca\lWE_;q}dֹ( B( B7^"Wᄇ~Kn&vLak 의hnǍG _~pO=2` 19!8K-RlwV"/,.7QF ى6;Ï,3J?JBI>>_vz/O]hJ!P@!P@!Py1api]Dsx]8vϒL24eN}ZV3Leb惪DBUOޞ&[9 Msa[Ams̷$ɆdLE\d@dO(ɧ/'McȒϒv(oYn)u;?o"6SHb^j{KI<K_]r8B;TbpEe,LmH,LI Gߐlǎֱ#Zչ( B( DfoR/nNϿ|o/UO!3-^$nW?nm;2t(#GU$`RkH8(!1H"XPfY qp;3l`M(@ tJA= iN63!XlKgjAR@!P@!Pi0Oc;oИ~N"s0eSrLe2OSl{O '֩zrǿ+ -Rpuȶ(d? @0`h[-ΠN{,pO4tmksߖi|ѝ[x#nӃپv0#\9aR]9 :am@Q?.=S?3wc8&9;=Ѧ&Q~j}~\ݝ=}J}hwQ( B( BF7`0/\Ym;~=uf 0[ڏi?׶M9<{Yo"s⍇;ܳ U޵};Hub|LPR[ )D<7o L ψx,^ڤJd;hr34<uJYB( B( Bxc9/qc5?jQ{ ?wxC8mWwHsBt0^,W| U{?ep6(Z +V\ )$QhkQ\-X(5Ptb*pQזVٯ6;noww?̱su+u:<'q*kq/[ ;C\ Y Ll7(NP_!A?OA70{B?vj{ۼ?:PB( B( @-x J!p+ +ڹ%$:8ֶ/g϶g m}BP}hryaЧ}%?jpm&DL:)wp~$"pf! (*+p)A؏'<@YLM:^9mE?wJ{b??\@!P@!P@Nax`6f5(Bq'9yWJRRgOB+,}kۊxN+?7۹ܻB(IwpxJbGx/qx˻ɝ.&7~I(3RO\}ѢN }~l;`- U B( BQQ-á/Ma/||ѹiR{0?<1ûۻvmm=s9PPKo6H. aJh(DG0le.+xoSAafR#)Nf8aZvW%0p H# iU\n!~ @$~s Z}к7X:Z\"}¿ա( B( B#p#uDJXFlGo@c[ݿu}[եCc?ʹ}+cG7ԥL"dCex() Q;_.~j3wR wdE,87{P?wCeYlu} -Z3 As!P@!P@!e 1Ƅ:{cd1\Eʁxn4.uH);[f sAHj1ȓް6MaOyBKS׵(bhقI PMIJ_le ٵv/BEWk|嫻ҩ$.\:AbTYogCw[/]'@AX u0-fS_y=$H>c#"Qb VTE: Юb;볇ĩJ!P@!Po'D|o+؎ 'oskmhT8&W@&!myPveq?b' (`arv[|R6/?~kH!S LDe!O2lHZu&;3-DSM܄~ʗ zĆd%q%%OȨ< >thu( B( B(J0eNac=BuPטޢ̜=硬Gq~%40gޚvV\xZ1c ˋh:W_gCoIXlptM]i\J?K)s_#1_Zaз-61?k?}{ے덅};a8u aS7G ©oCjS򡚷PgB}ONtgZ@$@!P@!o,n:ǰo_h|z\mSfOm MމkO?vT 22 `B!eÄ"Dmڀ*PrBv0l`Cn5"<*TMSHE?ʷ~kOt:@!P@!P]`yanI>"~l99(#oLcUu %MFr@5< I0G7='/]5GaNO*ڤy0|a#׈P福niWŹ&"Cb>eHNL:Dc ^pF٠Q9o&M?ztOo|{xfqI¼/"X:}=[Sq&;{XX=_Gw.q!靟īR@!P@!pcï w.ھrnbEuO;~xlo ηyޣw~3i4@0x`;",o98!RL1z+x.a %=HXՉ(pq*60~Щn674÷@!P@!Pw/j9?qem. @ v!0QƖTڱ^$0*1ЗcVW Կ?N:3 &d3Awe+$ ]jԪ@!P@!P9p6+(w?,CgUqX?F頣"`y(9fl*dg8Z<]5RBM[6~:#>wd8udn|TchMBt{nQ Bh My{։7/uz}_*hnPGbImYQ?7Ԩ'"x?gd:@JWdm#=:)WwM=xz+, B( B^j"Ur "'_^hW2_Y[kj?ݻ\~qNNIEw];6Wd`E)1o7ќND11D&F, Gڗd 6Y_uw@d"1<|U B( B( ;ʼnsMaɠݤ3G3kK'?ʣP`1¿j'Avۧ/ mݻ$S!P@!Po Z`+BᄇնHflmx7HI841Ѧ W+iKϷe$#XL3LV0I+.Nj8A tIUqAgeDI;h>۩>9 T;mكLl3ť oT) B( B(nk0+8$&Oa@̑p5HuSD1j&%j򤧩p;\kͰLi17DԹ%Q-IT&\P4MJ߽~N{vMçڈlnCE t(dҎUHMTU#a<>I:PovG?U?#ٳС=+^𾱂fX8D_[_s7| "eggx@Q\űScg|[B( B( Gf,'o_[ X\Z[mxcm R 8492B2-ᄃ/++`hwI e!RbJVF&poHCۣ JdWC #'*cĉƠF_‚:< 4EgIzW ,XB( B( BvB/WqÛ $Q#XtP7NF`Ÿ`vT1 GL'X=MUڷ 3rAAt`vҮ{8XpehCv.p I4B,+6t0ӋPDUȋOg,v(>/Ѝ!% `~;caJl'q{&̧, 礹?K/"tZvSNLB_ka OaD- 0U B( Bx7YiϽ|=yz-cELk?}`K}3mJQ(]>#_?}u|P@;'`hΕ8gydpG:+|A!mJ@F[~BX = d Hɱ޵)5)1Q zTV@!P@!p '>-b=h1d?i~p=kځkjnF]ٻQxk}۳.v 3NJ80LB9>_YF1?8 s21A{6JgBIJ`_.vHg҃2ڐ@:u( B( B(nQ '/-E*[hlr|ڑM%vJwX@!P@!P׍@-nJ>_>>/eNcvLam'v-jE"{b*=qwu%7@>@ \g2a8SJ($)\U,,{ a0D > whٯNڅŕW.GJ!P@!P@!pK"+.]ږ0ZCkIƫX:+58yA$n^.d 'QY|̖H!+$mec;'C =NZ).p?l)8 礪Rw B,E=}k pyQm&%Oe-3IB:uE ϵ=sҚR=lN[b`'Q$gfw,tJMeOJ@@!P@!p0nRoœڹ|bƅvum=sWh}jMbz7_۶M;8eg}%&@#lH6l. *J_L`!cq X!+ݨo5P 6X>=''ȶޞ~鬪u( B( B(n)` ~cLk71~jāhpf0::e m5FsR4%= hsb^tK/Q'JO#=y4B;>ˊɓHg-YO<}=|5;OecQg$ 6&caujw:`?Z^ҳFG&e<Ҿf (K{}Qiih=끶h#$blҩyxt &zK6*AqP fl/gu@6=6\Go}/yi O!= Jo:g  B$ iT|o w!I+vp[B( B( 7BFoP  pnyWN,KXѳڇ0?7`J!p}!=49u}%%>h_\&"Ԝ MIQz/uХNl<? JHР "X,!Ԉ/GQ곒%0}!3+KmR@!P@!P#p+}Ļᒝ:)'$P_3#$k o+H?3 o)6R >Ĕ^ɊT 8wI sE2ԧӵAc!P@!Pۋ'{ۄ_Ƥ'[&J?8蕅{.H"YqHqSB1Rdgu?JunLl^κP2:L2r=齍1 M+A7t˸{Q'=dZ7' '}N)>Mnu<~U?ƍX> NXwmVW}eoxFGGu'FX:n'R B( B~jǪ$">{|}R[ěKk[;k](KBzo7]a['p@1IOuL,*LnV;(xAPȳ9ezVI%Eم t1 [M3cB( B( BmA`Vλ]ZjmֶB@H[ZpD4EbryD:8nfiT0ߪ{Q)N~p89xۢ.i,f^{A+-՞8IΨ$ d#:ώYܢ5R}Ա%2(LȨI9B*1-Qt;%WmǷt\?.,.5^@cQ⏢SህL\ Yݎx.ۧ'8*ـXnoTؐ5-EGWlx3/k y*@!P@!Pׇ@->J.A˘ھqaYˈ8޿c}| oIGMU[vͷbQ&/2S Z΄sL 0 @MN7P4ɉqjy$tHY ʞ]®|~vj-UЯ*@!P@!P!p 3poڎrQ@RI ʒ%vg@99w0H?+rl?n& G?YO'Zcap26mb3|j }fYw/zZ߶́!CMМ7QFmWFFgm?{][eoKn |r(&0֌o}”M泱ֆ*v#AOۏGmYs ˫i Z3,ûS B( B(Z:GA^jϷ.+kS<'VwcW8(wS=޹?Q_r]L  9!!gqɬM4⛇"*(h+mH0av$ zC}I*@!P@!PMG:߳÷Ǿί60ճ栰1 a7;ΔuMCNɠ,뜒Ҁ̶6DC҄g?<@d%Ht-Z_XH YHeX؏wMh{w];*cYEK~ 7ӡZ?J˙gLi5M"tt'P6h7tT6u|_Aԟzxu( B( B>é@Ʌ+ +ms7?>r`mrz^z]ۈÓmf$~;p>џ|]]^V򀗷.ba|w/8ݡBJPvgf2vU~Z?lP)U B( B(ZpX%$8~;[Zd߼w}螙6Sk!w *7ӓ8~!t@7qJT'yԇ8wa }99a;ޠ@>Jm]XOqvN>eA$}?ҙ_XB( B( B-"''<{j4E;{$A-ƶ**0(]>m̢{tҭ}q;1:cPK{g-O4[ eS.3ɹ'>zN{iW FpMծ%Wܒlc#U۠B'd@FZ;8qO;4#UG{Eς`㏧ bIz1+o 7l><2|Fed Kcgˋy||J!P@!P#P C8>|>ىɃ[i![#p/&1\"d6 ; RA"XBbYƉ ֝ȐIxNRt iC֥mE9,uʼtJR@!P@!Pi.bPvoνڞc@M"d1M>ِ+ǒ4) f7LY4Yb:y4n M(q?ڷYR(<@oJkmNs߼ˆoJSh428I]shi[:xh+vH\3<6&^iEm} *bM0e\:># ӎRn߽p.C?[g^Pz1j؎ bg%%k2D5j"?O^P T( B( A8ź3W۟^jlqmݿu}SwEUrp[Mo}y7?rdO{(@=yfXNfJ;*VD򁉎#i9d) gADm:9%9?La@cr3CتR@!P@!P1r]]lmvc Ni46 7RohzbGE65d,Yפ|>P0A#]/OAgI M> мQ9ieG kuFP7jYM{[fôUwE\cXslc q1o' .jJM@ /oKCfud,z|p Θ{S>}R`Ofh]?kb׉Ѣ:czmB( B(ZFEWg1g8V--pAA!q$%hmeOeyP 6=TKL擙`=310)t@}($>j-U B( B(^NaE6vy5GNr|tu(K~)@,I6)lLv4Fz\2ͬA 8K+я \'=x^۶n!;LI|tLfZ0kF>e^/ CyRal7'yo[ ];1T$~?gە%.'ba3&e] e<5|TpW R& !(3(|@eziSjѢԵ0olpE|s> X( B( BS _>D.@IDATg/.k&+~pLwawB`؏7w?=N)ȄR@B#>;InjdLV)rK"PS%|; ҁ46(-C?tȫ?ʼn ,lP B( B( !KPb_[O]hmK͞JnL?9e $aCNWM!cEYÁǴz:NȏR~atv@_/@ rR:bSҔLI|]Ԡzzy ی` gzUuc; D|{3(!*cڻm7Ɓ?{ߓi^ ^JN1/iNƅnMm"K?[Qe?PF`U(| ĻI@!P@!P@-xmls!N-j]XȁaN:.z"jmsLcߒai:3W@:RЛ3@fhҔA"l;AԠ#Zk)-{'orXXۧ8نjBmA`So =ɓi++NjN.0!i3$ OPbiFK/>`zP"$vqA"Bz6\42L[+uX3%/ ĤG-du( B( B8A"間KuAz)@- qַI$Ƭ9p,ux&Bg6FɾcQ$$d-g~S~ &ІP?&Ac[WH7)|mS-M^f5{ݴ?jډp/쩦fl(po[jGnko: {gswL@LHK>hA<rsC, T9½Nʇ6;3MmR >v&dW) B( Bx jkS[s_~v~d=ӵ־}w\vO}Uy{9;~o}R5%4K`9 Dd'Dܾqb{h߶@!P@!Pl@gM-;*c9SReٖdM%Pt^լ.zuU@QkM (*(`cf0h@ Pڲ.M97F#`X Lcv(n x8),c.ܦrLsぜrdB L<-hX$6%KjWz(k9 X!MȨʊ)u}M{Nc^,sOl7|h=m)i12B&|ZjSm|({/ fL CLV3'm{!Wqt㊭墍kc{Zf—w/'n6@ :7ӗ yn_8̾d.ot#`0"Yx!僳w8Rjr}s͓X*}lS۽Vkn(T>ĞH0Q ,*`QZD'$Bҫ W@eU>T}l#QL t4cˏ>t}Uհu##`0grkeP)sN:JYE#Iĩ+y.~GLf僞,B 5qAF(Iv@ʴ<6D~F5Iw=-O&ED?٩zi$ L\2ԥMC/T,UEQS+-_ nK)ݶAK Ijv⺺La^oA9e~kv%>EP^ha|<: *,egʜ612ja=_) h7:  -z_ ~m?dtفW} ͺl0F#07 깅gΔyp#ez~Wm]U>U[պVݛF/3_o46Cj J^G J B0p\'r ~: }OE;}%+W$ŐylW_/F#`0h>{V-,3lVsDMDs /d"ů݂>5Xk DUH5絹}Z<,Oa5FO 3%1J9-C_2_Y~ -v6VdoZ?ti?W 7;=^-dMR4XcqAo/9\ⶄg}_Wa%eX)Ø_qI_*qB:.m2BB4,*s8|Ol*c u>/ dzqMѯM~ĥerdke#`0Fohp@=GGf|ݶ+sntֽXl-;9ݿu_.D;k:6́<^#, d4%0μק|<<`x0F#`2@(aZR.~ ltHjN&2< _+9|1ҺEs 2erBV9?FkԩgH" ,ӆrÅ):.mģ ʱ$A\e[>v a9"s3@tV9<:(Ф˼a g1ᄶm'VAtfL, @PxϢ\f#!4z'HG0#haW`6jbN9fŮYH\iL}gqn"Q]LbV_ʅܐ0'<Bו/9i!l؝0 6`?p:c4N^)^)I?;EzAV{=>d0F#oX)gǞڥjZ K,d"dhZ y]^u5N ~oB}$F kbD0I?:JVvA!ӫw?N.L %̙WKWN\-yjoqǦ#`0F`oÕ3Ç<~|rݎrњ3a^^Ap5wpib2d`A9E-Qa(>L)B+3Av'pBO @;vic?NM}ڭ:50F#`axO~N,e+x(SNrj!Q`HL !)'a~Rj)\A{CN/-JB>Vۙ&%V"|GYwCvR&QcS2J H{T @'Zzmil #UZِ)bJJ'C{!B*R67! \{^p嶲}TyrI~"q6)\#"F SMc}a1=ڡϜ#:;plٮ U=8OF#`Dp4"p?7k{rlfTOoHiM@'YRvrq= dYN)e;맓y.RVr 9dLr5ﬤډh̏B̡I\Pυn!RtjK`w৪l^(FVGOd\w8e*#Q_ j_ V%:= JdmIG3XX{RnQNJF%z/OuO/~3 q1;okX`L`,_)i!oc̆Lh'ti3!R]B_jSҥ܃S+d0F#07 ES.G/kG뷭j{GԶlkFcx-K˿=SIitO$?=sx! |LGEPCJԗvCD "!1^TQlRA@:1GvxT|1F#`8FEt(V8[tP;-\OR'&YyG`*'5r'hi]9 QMzAoDbJL){-cāJ Fƅ\gҥHաb?Av#T٬&W ':L2:@(SǃZIUHY@2H-XvHJ~ʴy'i#R9 i>?:_{I=_*Gɝe,44"Wl|1(# O XT >ݟ[H|nAU~?Pyc_O87F#`@" S|878X98#Uc* }3'lyp"O3w/݇`D(!`#r(?d(ˠD2S1 Yh]P|ٮve\U T!CC!ЕhҮC5UÙ0F#`YGਖ਼R? X(ebZ y9ݓP?&=i)4OM!/iSyKesAI Lg^먪,lT2zU" Ār BΟeI.p}k;*ODiy%e e ;-={e> Q|o<}a%Ի F6Ŏ~E* 6[k٧}n"ZdZ>lC_x7'#`0F ! Czb9V>l ]q W0;1Tu6؄^?П6z!@w݂8o'@d!M.1e[e$',j4mS"6@>rִĻ #`0gl~/_<L)e+7| \qX9;L~R?sʪ aW'1iENPb;c5-Kʐr.g;; r eMt+7-SB$6dd$d)N&% sH6I /9~R~5S-.6s&@,YYnxH^JKԾh{bHI$wwƋ+ u&bC@܏|auI/_8^QCX?Ю9ٞ!J 0TR<ɯe>QJ.[2JulWFI%Gt O+۽?K'ۭo?9 O!E$747m:[j~-]l6!f$Ca3so o/F#`07x$-׻Yjlܶmܾ}u<]'*[up /#HU0p30cZ3FF,"E 1@}S5VQ-2<)(,:!_!`G:H6C8y3O-7ؔb΍0F#`NO.xJ BNt2/UNQM_sնmTyN1uc+,u丕~9HEs >__8\)Bm)'bؓIdmFPVW(.oH#`0F".}{go_mP>EqEd`h.h@=FDB: *" v)4E\Ojl'@dCt䴑|Jލ7 '#`0Ff쇏եGoz .٘8s5Li%LTkҲwUc\1  X|bYC_iTMdҤȱODpg[SҙjR}XB9;8dGX96z '@lI) 5_ @V\-yѰYɲrH<ۮ|/ǯ[_~.s4<ypGA sxH h`8}aҲemO/|F[mRRUfJ<@U_0F#`*{(<'tFt[xC8UcS V λE`+`C7^^}Zq@ĠNԧ)Q.QiM2@r /D<bPrCcO T٦_y|^ܫd0F#pL)[),a#y_R&?l$v= 5Nc}&O4f%p:uS0/-_4VsMH[!sEhRφP!}Ύhmyw=2r5h57׵K:%"eFc^ypiO~I뵚65SZksʄlzO/S/M`%M"EN"-PRWB4DMM,;(P}UUkzc.yOT̔ѽW[,+H9dd" e%ZedʒFEٞO3KYC'9u/,L&-MSbʀswx! Ϋ-.S/&-IFxjC6r.㳞LO,OZO)R &˛4nnD:Fq9,p鋦b` %}X[h-uC?tV #:/F#`9̩Oe?Cf$=df’DE`blO>_RH3"0@I<ۯF`!Y\`P"} ֪()ǧ"hQb=dL`-o釔ޑV#:P~tWy5 0F#`^r/ǹ4uG=Qʖq2d]9d} ac-G]TOR61U_VpJQCalC!9 ۩*d02i({aUOr7ŧ *E I` yi,qIm!W_6[c RT1eQCˎ\@fy^se>U4u#S]G IEIwJ*Q}a8cqfB= @T miG{Lk/#`0FRQ0._ᘯ[]ۧ|$P,Y.^'{~/zgjPEW0O=0Ashf0\@ɠ\.GЄ`^K[VvcZzme{d0FBKu@)[/e38 U-&`j *6X4ʴr],QXnpjeN%YLr6]IL#2RwJdFCAnbP xUXj|I!=IQ)se8#`0FONR-e-6䴬 -yYVJnŤ/)Zi,3%喖-?e8i?j~rVRS. ,{JԄB.<_;,3`@Um;ťVe)R 6rS & vB\2A6@*پ|I!'y"^ ~rU4Z,yDb៼hx\Y֖]_|B:1d1ƱBH pǥS#`0+!07w^ܾmqm#=/O z(HP<@,O<+N? &T 4dA>QY&-k rd%w)e[~}/hoN%>"Gq#78#`0F@@)?6t'mncD m0/Ϫ ~ĪFRHF3a[打&lieՏF>_Sk)mQQ>ehZ[PJM>|3 ')q:7b~<!F3mƋk7d[ٱaM7{_??["!? CyU{D94 sSگ6S3K~oaT2I+h3EyxWo%#`0Fp`ow\7K 8OU.pOwl*l  Dav6 ( }"tCpHzl`cVμ|Bz'֔yw5NF#`0/N;,sӮȳ)P皚HVes JR[*XO>:?I@=&$ ҃٪UeK@곭ZdHcBuDu,!*IC֥ZO[iOndsq˝ $:sVf֓OolLCmH?WzPYw!xTY>?*TyabCAՅuhsgm-+hi!0/ A@avF pLֿwZͤ/SY ۲h/ڧJmû~Z_8!fi/[`&RNF#`һ2F@0UӗIemԸ IˀEĠP@C,VBDlK; Y`Gȅz6`a;dAAE02#`0υMuU'cQqE`K:Z7٫M,f'ixIS^d5<쐜̳ ]9*ʔb!+.,њwIZ-e+]: vtumPʑ@;vX[9?iUƽv'E|7h_f#z@q=s֗[ʪ`;{^3#qa\IO@/f- k@FFYkG4ЧdgNN ,ңֵO1x0F#`'# ydLdSpc-N7rUf{F"4B: Yь80l|1h_f3 41'2ONm´۷2 -I۠?4὇z.#`0F<7SS\x!&d('>'?s󓴁y(f$lU}?UH\^;HdI,VdZʵ4G ::Ň. >(_eiN"2iH u맭g?VFU?̳ \K_z,GxCe,/Nk(e3'ʚUN+MS-oRs3HF@Ja<eB~ڄv=eC>4y ^XyWq#`0+/[Ŭ<-^L4#p"~tTcG9q"!C`b!Cw6t L^mm1 FOat>L8#`0F#p /yI) GD/-܉W&lU[n,~S51!e5BEud(+UIt ôNHlٝK.kL[~SSm7򺏔:{ZM<{Nu=q'+=|7(Q eɃ(OBY+Cr|dMY[n?ƱR /p`A_lEӇGK9i֙u꧅ߜCfΙ P?驣vqY<Ϝ4&aJzg?3|ꓞɼwecu2`}.\ȉtp9.i;}΢ yo,W!tE]ξM{nm:U6/2ȴv)GWXو#_ J~Zwy"p 妋} Ygin1 B7iZѬ$oS wF4ޣ8 ö>}elY?wș0F#Blh~/䶹 `5f٫4X_zᘳ)}8l/Fik_|us52P/dCIPĂ!"EjOB8@*<~4doʧxqos#`0/Vlpznf P|ĶIZz;CZR/oe9EdJZLcrb>` m>]_`C}ȎEvעBy)V!$}e_+9ZڔhTmլ#e)ɭ`SYht2FmOӣH_>X^&ʭ(9'}@}7]^91aV!ioJy|jǦWsIa9w`Wo}j|Gw7\r2F#`E^O͍b l-Ʌc@9 O怡pȩ鐭X+6ԓ+V,gE;oO.\v A vTu2F#`8pn~xv{Z <99q2[7)4i^KtXAJ6m$?bxyT#)*S6r:IJ]'<^W%-*>H<&VuFied!.2ʉP={J\ZիO")T[b˱c՗/F`7\\ymsxw=VuYX ~ æ4th>SIOLh_]8~Hw@ [܅x@"#`XJ}KY4p8ŋx{3V~ttBFŗnb-X00AXctU#O@”u b*i-fƀLԏЀdz5Ӻs#`0Fsa n٦ޞRzS?% ʰl+7 ce IJc9ze^ŋ}r"UdY 82KyP ) ~钞H#'dɠ3e%:.(ΗgE`rO߽>V}0yD(/PV%|O<ɦ/r|aQ):J{?`o@IDAT,.G;Gu›VO۪egF#`Xxޗ]y2Xyj@Zx:wI܁;y&D#OaL46<[*?!ip,!Xw e횉3PB V ;3*3 5ڰ, &FǓ@O|k:m0F#`sb`n$Gdrzȼ&=E92XsFV7˙9g͜|vx'}D҆6lCCIlׯaZG~:ՎצC~gtPgL2i_ȭ _\)ΌGT7`݆ v:_8)1UK_809,d׿[/v[{=pGvp9yt2F#`XBfej>,7`#>Y&M.exr>".$ ݖSN&Yfe }R+{Q|5F#`Xh6di`HX&O8ihKf5Uf=7h2s^.u:r+rN#Wj.3DѮO~p|#'z֫~Po{:)C" %%>JV-(pu%83'^#1ƍSeӖ)Ls(B<|T–V{I98lp bso0F#"p7pC_1ЕA |ӹ(ce_dNOП9<(`+U+XV"_H^V$X؏`GyѓX#yԾgA#`0Fl1`^`g5<99p$c3aCsB9Qd)陓$pnKZFV9fKjիlRRy"gm'yNFuhc+w06:/TFb>Gf!pԆiǏ >8P ;<~Iᶨ< cuGٰ'#`0F`! +;?O7nhp^9&]W^)kF/T,JbRDo;NX5rD"/gBA! &̖HiS?G(#Xq=L8@X97F#`xxB7hCOzee61G h9fʱLWNFm-2t4G> 77lgO:IⓘtPO1zx/V#p*UPfg18^cnܲlē&.g;|>/ҿWr۩ʶ0F#d%U"݆x:AOft/>ǑGʘѥsu 0>T)Sk&m`أM 6wHBRPhb*y+ބ~_gz?uו_~ 5#`0F#p2N ҎTn S;]\ȚPz)i[+B.+OYSi)ύ Gɇ8e;y,v*zӾćlKnEΣAO}2)d{ᗦ*!֍b,/̧iGXkOLQ^~ /oE]6F#`AVW=[pC@&XBfGG_6cnw}}P ;q2*ł"bfi 22ւfHa~H)^zQ׽4#`0F3<<]m`O&yu[՚ g_@1b{[&?EYJR5ЏE(Ob퐯vX2Ե| m^/d8ӆGݛ~O?úF6n*L[)C_A/<þ,L'$|a}RV˽/L¦IKF#`Xx}Knc<)npC΍!KNknp! ?YgGLɩd7Z 50Td$ØG.'!bhFT[^D]]>s#`0F#pn!p xj@==rO6 x̓cnSiQ&Io:}r*(Cs664N/9IO] Sй8[˶nΌ0F#r]N taTn  D98H,~}ӹ[ m-#e"wu #Ly|"dܺc}Yz ˙Ksu ȳ;ɬm-StTעL2[yo'r{;xr짖L0rKw4_kRf~o"΍0F#bUFW2 7p3meHäґT=}(#rj@t2gN^3R>}x`h ] axR# l621Kz+j0Q ^ xuOR^,#`0F1xd<7 Ʋr֫P&B?N9TYGQwtcqu2g˿·x& O'B>_8}[i7CBVڣ<#`0+ oXi߸tɍ\vsXk6-EG׊lic-{wo[WuL]f8Moa-y== >pR@vOzm'Ƌ+ߙ0F#`0/ z.4צZu<\yyqXJ?E5&[ry.9ӪDg{z OIh5mZiѯ &d0FVҷ{5'ݪ|ܟ\C>OdV7gWo+z!C {vƗ C UQ"+$Q)zOsOl{_]m*CѣS'߄|с;/KaΕ]  DugrgO MZ-揕{%/F#`XIxJ}F#xK" ;ЅN@$1^s <\:?"P y#^o'txFOZT U$쩞:=ofnnu2F#`0F#l(*DdGy| ,+܁huGO;5 leL><S~O=(d0F#`0υշ\9{v,c2Ef9?L_jTl+[f.}^ w\\wt|}KF#`Xx }F#pr\nUwV'M<`tOt0tJĐP`7DDq uP$C:HNHyhy`ׁuf0F#`0FZ.ܾQ*(b|-Ci%ӏ/< atICOӯ|a RЫi |7 5_0F7wj0'^|A2sXýy c}!Ib#tC!)铬6qVݝ.mGlmQ'2Le%0pn0F#`0 &ʯ|zjNhًfgLKQ_0Q9RI}}a2$H_X G_Xuo93F#`V bjߨ0F,?/+^{:8SFgx!>Q0ԓ#B V~U >iҩrj~xWj0F#`0Fy"pㅛʿz-edlϕo!i)~o!}hlB' P"!Uԗ/|esdfHU#`0F,_`~3#`80߾ {H.6lBr M2u+\@Eԑ"{RzRryRiY'ӳTq2F#`0F#yq[r(v-}L_8Wx ttGyVWPf #`0F`e +{]#`)B`˚219^fg `8bAa<♄|nMl蓯F |υ}%J&ڢ|݋f0F#`0FYw-rpw}܇xa_6]Os_8`c&ݯ{֯L#`0F`! #`G+?-7ёr`rgD) ?_IdsA$'bb ̍!jA0F#`0F#n/WOhC^ }wT٤#Oj{z =3Sk&I{n#`0oX߯#`N?Wn{g}AX.jtAyT&G/uZٔ~"ڕ"΍0F#`0F ݶ*l/e#eRzJs{o*|R䥗J~ȒZpm<꫘0/fPJ{O?7Ŝ#`0F`Y# #`D࿼e3xA3ڀHF,Rf!A:[n갑h6)G?;)_u |5F#`0F#^uy\ce6 Ec>\UVW,W^?h}V?8w= 񁢯F#`rG73Fӆ�L/-G*j 2d4<|@ngB!2HQ)#-[~8Sw>t/0F#`1\ߚ0F~ndK?~v,2Q3aE$>vy">2~VhS,gwCz'#`0F#`81L_Æ5kW}{G!+[ᕇM>H/ W8¦F/}eo{dF6}1F#`rF{3F3zU 8qvfs(ڬ .@{ 3M^24 Ė >r}u2F#`0F#pux#l?6KCNh/ܵ;"Z_{c,[" zV{k#`0F`" #`D}khǦr`rO0H PDB4D4W;>I Ѫ~s^VU=G#dG3GV0F#`0F^T 7?sP:U.w||aisφ;/'ɰpk;xR|1F#`9̿`ߞ0F֯/{_W&WOO>dB @dX/EPE `$OJͻT=;* ^#`0F#`!o\uֲt>K\ύN%7WgLgKwS)w3#`0F`9# #`Eʿ[ 6`zb!{@E.2L`D%FV 6:GA|#`0F#`N ɱ[Y~u2??:]R8eH*!+"pHҡO;"- S&}pJ50-ܗ#`0F`Y" kM#`B\Y='n1  t @!hfF" AF@b@5XO?\u#`0F#`0'[֖~WaC1Ç{ BeH/\7p/S@/l#ڡ/ =d9#`0F`" #`Bﺥ\z{C!4% \ xѧ yP [rzd0F#`0';()Gv~)\.Tmh~.03N|T-7/*,Ό0Fe7,ۯ7f0g 5c׾uebb\ fqѓ [TxaGȠL#$1e $jSs#`0F#`8y~חd{yOss{ d,Ve69ۅ-6>r`zM9#`0F`Y" kM#`F ֗ϫxA 쵒!‘ n`96;:ꑅ*H0S?r7w# :10F#`0F@&FGʯ255*!_|a0/WXaKIsir yx ;#`0F`Y" kM#`wtiy/Ww*PŚ}&~+ 6Sͩ!:}pl DsGg<[u_0F#`0F .0U>On+ņxpkZȇÇ k=UJ5]w*i0F#`U\f/#`X|[o*]ۋ'eB*60^A/q?eB!Kn9GB P&^-@yҏ9A0F#`0F"/^>k˞eeѲ|곲#p 69v/9`fn9#`0oX_o#` Vcx\'hi%N325HGI %}C|p琦F#`0F#`NʵWЫrC{ SF߸{._5]lLЀ}a6K4\4F#`Aw;1FsoY[>-{wT,2x \T&33eQvdP?:X}QNd0F#`0F"0M=-ce¾ֺ ^J.z/L`m|^_3S/~ #`Xx2R}KF#p!k/,~~,w >`:+kԈ2eBr.!}5+9OZT] 3#`0F#`8l]()Si‹. m TË }a0F#}n0FFz}tۦ23W R¾r.?ɡ|2[~TRi=n5~OèkF#`0F#xʁ}G i>UVg_/}t/:r-T_yPiⓏ=Sd'#`0F,3`}#`8wK/rXFR{S\ ىpGCK9'08K_? _0F#`0F)t6w.SCp>r '%6T_X2;|a'/v2F#`rC71Fs ׯ.|kʡGO' 0F#ࡀFH!FS##Q[9rhz-|p2F#`0F#pztY_K7!_XՉ^ fH 6y0F#ev0FG-Wo+{z@2@D < DE<\2ҋD@%)w=ظd0F#`0'ז# |T}C1vp#@ҤGM9Zʭ/\ʝ\ a0F#`:ԿA#`$?WkoF00E6r$)<ؿ3HG91<` #`0F#`8mWۮڮEa}Ë\O_X>0}6-a_X!ޯhs#`Xx2} F#48doy5S"hzB>R' dylQ0F#`0Ff\.ް&Smzo|auvG>nr=ha3|aj#|5F#`=bߠ0F|5 X '`F.S ybu@?땪5PV#`0F#`E`Xo{UY=1M_8]Y~aþ0v@ҩ(nЮ2[p#`0KoX_o0K}^zn @cD>O9 `0p E9TZhgC}j=j0F#`0F!p˿yӵj@l.Jf1_X $>ް!uǀ|FmPlcτF#`Xx2} F#ѷ\_.ۼSE~-CO%HZW5 0NOb0F#`0Ft!W\\dq}a:M2[+pQdm҇PB E#`0F,m`i0F,֭/?n'(1Q A 5@Uu H]u#`0F#`D_}wlz0|WĀ|aT_ґl/|:W6F#`pwF#`z_ Ђ~]W|EȆxbܨHdb0DYA?j0F#`0g\6NMO O/FN/v1_8gm ʲ_4NF#`Xx}F#x۵w\"ap>G\#B5FBBMI R=l;7F#`0F#phTw2:p/LU0rz_/zON]¬^}aTi.o\|5F#`<B߀0F,7tF)Єr$b4,C|,^PNo`nX>83F#`0F3W_|/^ }`z#z^z?^Q7wqh ’*·J 3#`0F`# K t0F`!0o*&"ȁ@E<π{Pd"SecX<1 1"rˇ}|n4#`0F#`Axi ïk'Зn Ǖw,*(e}/,t#F#`8`2xNgKm0F ƽmX^oxeN'*D +ܙ鹲x_N#`0F#`0]~W[yE{ ?(ډިKF#`XB%eF#pWqqe=n.>*I|FP()K-33?{VΌ0F#`0F^[3Y~[n*T;8Un2˫V?<[t_0F#HK0F>A~>\ $Q~"J3L# 1c 2}t-9T> SׇF#`0F#`N#]31^pV. }¬Q@[!6T=1H+jHCq "1>@ZG*s8*?z-x#`0F#`87\_.?o;߶>pGG|~C='2=7:o׶0F#pJS#`8t򃯿&f >Xh \Uk;;J ! &383Ό:##* *IH ttNyνIz%r=->眓/nBb[@XGHbm]UyC@@@`r#.H,l0baA^^D_]cN?H!d#_Q @Ӻ X8֤w.~ 븷qO}7K+ggR>θ$t6D6Wf>/7헙E28    @eaLT(zPQPcagYd)޵w/o;\P:Y"CIG,!yA^@@4  M~@\1Z~W[15X$o֌c^3GvSuĄc#[c^85zVMR&̪<    ,.c)Zb,lqu{KY"wdK,薐v{3Ĵ8 ba_w\~XN-z @H'H_gA@,ш7XŒu}F)I`dl\#FВmN.ꊻc5mw_m/ܷZV gl XnKYiilvǺ3.E%ynC;fGr 9pnvuKQ.wX8s{WNže[g# E~0Z @ ~6*%8\ӄ|2^F kзdG_w3iikY]ZD{RG+Wȭ_-i    #$_ WS3X{e1n 5`O;u3.X'|9l  0@)ɗ;oY 5a-;>;;?HF^Q֩ c ikA3je_kCmc|Qڵ,   #!VxKylOD,D]X s.AޝVUo/)%*# g`q G`AU|e^C:`$x)~U]y^eIAdD1n3 ~zJ>9qyR@@@jR7~TizJ,kÊmXZ/h3 o-~-+Rca;Gﯖ7 @Ӻո %ukЖA^z}LugkVk(nFgJHhAw/jcz]~a$rb`C@@@``yY2L~iƝvÍÑ~,'$OPO,Ǽ{}>y5wò`\:Jr5 @FK  nS%/}m!w :x}x5A/ =:1C 蚈^"vqK=vU;RQ+   >Pyjב-mo뒄`[؂"]@c[n.ӓ} ۹{ZLɥb|@@*@P( @ Xk䑍35TXcPZ1A"\7z}A#3XY?'@IDAT6z_N?ÊxOv#oH9tXV @@@ ,^.;6oڟV>uwyUCڸƅ%yKxSzm=(G 0hu9@8E tv  @F(ovjg5H}k kqAC?uѻ/>>;;n4w +or^S\s?Z'   ۖ-7pnN'] ]|/g`ct:^zUڃ(g! 0@?(B@ Sl r]\,X2/w+Z1]:OyZP=k`fn^26y}Rei@@@8@T_?w^Xدد7*r@ @ |^<iּ- ^~|¡ig\G9-( 4A@gp뿼NylOL[8&n'seɔIA^@@@8bvPw<%-m:#mA,X 2k޿o,߃[vv [=/  6A@ cruûoH r%AmưAckb R$X}n^ڇC/.)y+:G?T~JihOU!   p6|\9PƲ. }p+K#SbޱtXXO,VW-wN@8 g $0$_z]!:ۭϻyݟdD[ZtMijw .֒ѧ={ ZOt >=]&rdC@@@l_y%Ƣ^ges^'͍mg-X8u<6.># -wzs@Sݩ5 @ \2Rz׵we{݋k[cIyaw6 K[z+<(%`6'Y/rE@@@8@f/<].U@cƹptG{tuj0baۓYbA¼{l/;@@SŸA@LX6\NJ=~Ǻpm!^}g;_átt7y h4_A/O7m5v 3. -{Zie$S2   0L\[Q,|`бţtvtIJ^PXjspnV>zniҤ6ΘX2@@_S@@ W-w#6j?hw@+y;YG`sR' ),;cyW."3AF\{oy<,ͩR   @UŢBݱA˶6{Ky@%J&͟.V,멱U ؖr2e@@` ;@@ "!mIyaבnC-51qo_~~ĝ*%3wv:z)ܭ'hy{' @@@^{"b~MXv p.W,HHqwi2{~ܵVzEʽ@ +Ȟߛ" Ky<#qm$:F Km薸ߧ_!_#RF" |r _yV׶Dʟg*]U f?y l   4T>2XbѣM.齿X6IIV7sKSjG@6fȶ_" "pJv m:߭we/qoN X9)1] t彵;ACPX_lJ>b@@@H XoDr [,3.Mh|Cez xUb']?%&qΆ )nYuj &p2ojP4Fp}[- :b痲^CGs-p^9]ػkUϽ@7w/7]rɬJ( 6@@@L WN.l9( Ś~{X㴱l<-E%8q]ٽG婝GY  ]$dMm@Wr]Gdw' 6:<D>w5A+.E3S>O-@fAY)edZ ,P q r. :s3{~~T{)zZOkI"-Mԇv>%{-YGKOlN] @@@uMOgC;j,_#lwN~k0f hƅDݚARiʟC)oVkg @@@+dqͤi@|XK:bIJ.9Z^3ܿXy]xR~=$ ._G@ E8_Tou DkP͍PTnb NF&E\CEgP7uȃ: bBo ]Uȯ o   &ёo:F|@ZuԼmmksicaemV⼴ty'WfWJ.S ) R+@,pݹS#oX5ThÅwr `c:F'47K>S>9*8c|sM5h7/   +PS/ԥrŢܰ qc.D4-^xǦ d3dL@:`۱fyYltv6=n c>: X.Nt$&` !}[%#QHK<DC!YOW7@@@lI3 [!Zz7\v۬xnƲV[ X{y~G<.:>)S%QMz`C@  c~J* \>njso~U#%xם Ez>ǺRT);pOG|;!O>*vfVJYA>   @6 ,..MfXŢXbXg\ s%3 #utһȃdVy̯,Ɵ:# $dJ@@DG_F~4wtyvYwr܈v;uջ/ҝ薈.ܖR5VNӗՠa:_,Z=+   Y'`whv[2bVRbQ@ZOvX {Xbل6 @ k2ï7헺6lvԼ" Q|_a&  06%:i$kꠍr./u[3Qz #ԂZk˿nd ejihr}@@@4zǝ~t9nu͍s#'O+qKE,{Kהh,Lޠ11 L\(ĭO +`~b3$4qCrud\p\C vZtuƤQGG "~V, cT]G_l<"   Y$ɷoXfZH^ wsZO)0[,|d|g m LL&S# 0WίO]6DsCگ_?5bXsVI:m11. ZYƨ|.mG_?י@@@K0W}%R^wJ,[P,%m-SQ,2x[߇Kr}}6 L4uh" ,R& \kuK<}nzp:j_;쭍ýXB $+gg)..m7hyYge2Ğ @@@ K r™K.1]f@,’<ɱx~юeX| M\xHgų.U)l  0Hψ @\:J5˦FaU3Hs󤵹%>7立ְa[,Hf $7dﺝ|aŵdSݹneʷsF{6e'Kց @@@ ;ɥ-?݋[ÑL. -ҭ ﶝ.5-E*,V/+WdП9D@`" 0=@qx95Uirw/̏sptʦ AL6|hǼ73<n\kD?[ʻv1,_{Tfl   !0XfU[ySk,z{/.Y mMgcq i0u7t~|n;qu.N_9BBφ i+@@4< )`qNGdfiis-*/t4Mr3[?@l?*SF{݁~˻;?ho-1yX8)Ϊh`̆   t):is4&[ϭx<1wz4q^⺞M bae-a>(*a#8n,mB^=-'֒ u*`R  jn^'_Pթ! K GGmyZ@H ǷMw$.G6ȯ"]:#@`O !tRM֑o+f#rz0~ gy{ |aA]xG@@ǚ%M5_* GOz1oJ,+5Ś`Č-Ftd d$7w@S dv_'ݮ#c^ǽݚ:*ѣЛ:0gDOc] (Y~NyM,U&   :uBqbyҙg  cͭ@v GcX/Ϭ-4@(i%"\J~w2b"- H kJ3^y-7#p   ͺ4'~Q~Fiӥ,bB).gX6o8ȿl~??;  gϝ@Z5?%ye>N$$tF6@$wM%nDo d2ux_>Bq./ߘ#  8֦=A^]+흺@HʫP;3!./̓ϼv  c@@ UwOJ4/,U5%Dw[~JvAWrccP@+;   R!{UuפJ  {3So@Jty6@{ޜ;" @m<(yhiba]K $GHz;9ʿ~dNXS    &P{UNg[\3%)-pTc#  0@N/w$']}:A羕~ۂQAǿ?r4*_#9k/ϟ=;   /hO=l? a] SbPʹW'  cM@@`q:S[PS7йoz"RF[rMo[(Oo=w|溥R l    _}bPw,bṕrM+dԲ@FIQ  0  iS   p&crڭ{$֑o?›_vwy]/];FkE|[KHyY] - @@@3 //|Dá3Uc  "@@ @@ 3:bgv:));/exe ԵK(7?B@@1Mke 'ȗnZ!KĞ ! $dK@@і. ` ׇfp33INoWϳV343 !  E 薟klqeztn8G>zŹKx,U0 Y-@@VT@wUVٸ5|xS)O)  S>ʑ\2_ A yG@@8@{,-v&Z,l=ͨ;t6KgC@H߅  [4][mPI6xRG{ ɖ K7 @@@XlpvUMr)   LVo''L, ȇ_@ $/ї 5GV  0(Θ|r󻤥KSO:ݙ5@,0{ǫΗHhPd@@@L7uv2y:]:L+/ߴ\f#" u$dON@@`8 m'/ή7 k⧎ם,`ܹ\~^U|EA/hgv^'rF+^'N-yϋFx<Ύ í4@C[!  8&|j @ nσ  @+Gv9J淯ڳw}n :<+M\h8@%0   "v7# Ée-]s7,IQ/E@`$ V@@4h?SNwNƋ =\& T74RQ@@@2Mw[4L0cᖟR3㭔gVd3A$LGF@ 4j<]~ni늹îW#HhnoYPWj߶\.W\w@@@F\_O~Vetj, |HgG޸P @ `/  0 G[:䞵]& c?!k*Fۻ4$ %1`0Co:O!>̗ @@@`tbn߳njez7˧Wt6 F\@p # Q`c|KG@r~%.B))zНs ecnEDd@@@&@G,3=Cu 9 g@?wQ@2EL%  Џ-U[ڃ?r,njBa# w=A)K4Ѱ[; م   t|n4w^`bY7K=3{ </%r l g  8B@2E`ӡFZYY-)A6\$ MKfȧߺTryG@@@`Nuwu61M<Ʒ;Xx~Uy녲prIpa@UF# %Ҿsya1mF@xi#G?ڰ=kwowʐ)/޲ROdcC@@@`4wȷm^+qю#˻r@H@@ >"w=E6ԟ;}mi+IOpH>vz@@@1&w_nr]bcYxÈuM~EƠ@ [H_z# *5[df7ߦ=-XMtC Qy0ZP#aDݵxA@@@`,vkXV~W[/DbP=pbeWiecQU $dN@@TrS[ef7ߛ {+jntn{7%OT(_y\8».   CrZYИ‘pXCr@l  ~m A Z'D6Lu}w̛ :~wrW,?3<ͫ2(WsE/φ   io;uz Ps8j+,dfƻ:y|RSr@ Hߗ! ''ȿ&[;0?^ş[6@k€8WS#_і3_mҲyo^z   +nQkpĘSuf;oY)˧LݹjOk)2D~+$/hqC uˠP@@a uiޕs0/"5I{Wܻug{w_~=:jv3*l=&q+*ɓW6e #mI@@@ f;ϒk&Ɏcr#8C,ӰwpKGLzmG#bI w?!'tYCC9U U$dMe@@ D!pF{l͇+Tvok6}\ȩ囵㗯wVxϓrXuqi{vyɌ^@@@T`neU#M5pBi}`ӡFy&Gƒ6nX[ s%k?x :k Ɯ $C! #S^:J޵|Xfm'}w#,`mĿ7b>b?㉄<7|\1fSޱfr*u\|h^l %@@v@@xe#ZM` o/ߎ{Do9o(Z1]v]:oKfUJUљ;vy\:;tT+/RPw[\.̆   ,l=Oxs\b[:uIJÍڻ䡍0OOtuRX2FCWuy%$d`C槦  Fr3#&[{S{֧ ~"7_oxHkxc uRSR .yqD$7W=qv7ʟ\4Wl   k,{27#lDXXvN'Olr.  ߁WȽZ!{l<-5ya.i =$doMM@@()mj-.'tv;hwkHNtg;BzSVփrC.}>|ψOЫ|4KH]qQF!  duƯ^. rò͒*;uo>(O8,o3Y&٨C{^})*%Dן7-$@2]L  0ersɱNy}p#eDw̝o\mз^7ן5"vmj9}q ^PXL,vto    ]fhf{7ۜuwi|XvNyxc[`VyQͲ]XB|,m:#@@@IhX^ݾl4KtbaMgvǚeծyNb_\t>7sW@LёXAjו!  u뫷zZÄ7P7uߗn0zNXJƆ6i>v'I4W ߲d|vQ6@@@FM`wm:iJzJ,7M~@,4xBjOvhM/L¥QYk%ll /@@T@Hu7 ~ehミ`7AqqvLΑc'C?|yuF (UE߼.ņ   יv<>I~ ߨ3 |U9?7 ]N @ B_Ej  S%?Ye9)Ķ]~Ǻb?{f>65Dp$  \1Z~?/o~[=ɍ~F 5l 6m.!@뎞F ltYê}cb=   X *//ݸB'c[Xv ͗UIaq)# .@<  W/>x|ɂmF { 5vx#g~ srj{O-PRQF@@A`v햕]).*9>XؒtV|].5ç5HL `C|27  @Z U<+[/5ژ1`yւh$ D#a$pXOX/Ok@@@ ,oPr͢)CX s1]BK Od>,5Df@@gҿܩȟI#!UzsO>G}A"$Ju9ۑQ %tȮ\cn   ȢRm˃R:KXY&B+(s1Ox}˳ !!@@v@hq]"o^)+~wt J-he%l3L)$lzC?K:!   "p~Ms+ϩt,l(\$e5ICj,~?p< 09?l  j>g6//Qu^놿Oz'ۥ,ߝk6 iW_@@@xܽfq(Z\{X؎vx!F'O.ɗ5@2\   Z6Cשo-.1s+G;MN-72    = [~Tk< @ ?-C@2O6yu'X*0 T yw'D@@@2Mൃ'[k 9 w]*ϫ4 "@@ @@& u[eǑ&o){tYt& ! _#3 'FyJ@@@T`ӡP+oD,>L,OW/\2J!d@(CE@@2XiȟIzE5xhNmѩefm1A @@@ $p~M|붋^!o=wNA;'Яͣ"C @@m}\Ag6pgFksś@dcW'@@@@`K[/tvyT˳e\b-CE@&3L_gE@8 jg3+ulno9p   A`~e|_\-Z>Krá3'3"Qú @@H/W{.ZЇt۵a_[^    0 #-r;G;ܼ^,\V+Oa܁ .@@B<  vo?C~qwŬ͉xV F@@@ ;bӗ_-m wy];:^>Dݚ_ߝeut[w4ӟn|jv1uh|`mvu;12_ΟggW8F a˂}/c`9c*=V 3OKKg @{[{Y˩!ZN? }ic9XcK>SJ9)Ns1s5K(K)ƢTs-֘XuC=K gGyQGn L32l.\iWZyUW;λ۫nV}ojJ78!ITa1,jکL!8TŃ[H8ӪŰ`Xm_mfvSG5oXNtwlV?(D 'MktyV~gJZJcz%MWG7q*=J2)EVoݭ%lz6фt1J@"#:f k2?̵^ܞ&5a2|Űvj(.ƶZ71 ۖO3Mu;ȊkZ;.W56hoKg޵ۢv юfw,3oHq/=>3J [.`Xn; ]ek@F.H!&yƣQ 44%έ_W5ػ cLzE`p;6T!aȁC+T!Vlmԅ7pXWaIS `k c jVsj'tx9]U쑨QY%|PͥUj3l˖}Ĥl_3 dW™F׀ne:YBDױbp*gN_r/%bbFsj8w@*n:tL1:Hݮ  WfnI;ŸuڎG,uh@Ѷ:{T{IdV 'Xĭ>kS|""ڿkH=O_5HpC:䣐|K>vӯ9j|&p\X[yOySrʔ7m+ϻ?{X78gCMY aK}$D@o:+~2Bqf,=ޕ@M@+7T_Y\}-Ad˜Uɋ6[*<{7=W_1qli>,Lh)8@Y wych&;;ld!j.#!3y"j`!δCCk͚2I.fu&XJb*te$@v7 6n3hDT-'o8G!l(z$#Ld,FՓ 26#Cc--ڑap0[qkw5Y o+ʈM1>O3ģ7NۭotH|P[>N#T6p>|5)GG0-,&զY0L9IXن{Neעإ2S!21DSHt3lʐ3r E; tm@. ,'.0xא̚{g*n*'&1. AJ)OϨKjdBR/1(vfX [B?:qѲ o09}?N@Вb4r?L>^ 2 J<&1NZjQ| *\Cp[,ڧ*?(U ,x#eB6M(9cYD ¨q4,սݹ4yDK-Bfȓ}>5OPaDa~^. "U*@8q?ԡm0A!=MS pGGAEj73@?hCUu<ɧNUa. H~ XM^Į'CEA+<9Ǔ]2PGD2<[^״,)2dԣGŚ{y|v:x4\NUĨz#@vT|ideqxQ|]KI$GpX&S/r>شm~Ê ~.50+\GM_ɣ8 tDŒorE9GL12~G-֩Y*>Dʇ Y˚3XSz*5XzKДS--GJ0W:4rNip)>v;gLH[) R@AK#lG.b$ ŞbpM(Cܸ\WЇ[3@&[Q؆jspae L a*5g_=HD},5,5-%m* ,pC+9Pihkv:geu*ȴZM6 ;}i>2N<HŮNvX<̀F+l_v'!HA(sA&|~᧪IN9eth.ݸ1EթC lO7Nz '9΋R9Y'RyLrN[guZ KQGbN4t~C7tB$n@%`['F/p NF`0^f  KN)1:]Tㄲn:ȒVOLQ߹EnD?+sDJmK"G\5k<ۇ!cgzO' \Aw `(Egu஠>n eQ RUAJW[p CUW' v܅ݝniiȲVН&-ʘjQ*Uٯ@bJa5\uZ7L;텪TYPOU/p `r'0Lo5h gp>b33!`|=j櫸vX0x=˪&,(WT$p@WRQH؆t?Woo% coJ7WS[O: R@=p}mQ7eمOHWJZks{`jo)΢V4N5jJF _ݲ7u׷ S;:dg>OviC>'wT6U^SqEwJ/_JYz{SyO={"_{D9|!sC |Ako@*!~BT#r$ Qmo@!! _Չ@M] Z*VST\שׂ\!vs;*++1dZzS^:&#zpvJ~)76s?Z~>s?Z~>s?Z5ǩoqjy2ww6`G<1%) gi1 WK:?sipRׂ rJюd&H5u}=4>ɮNh.&64~Vf ޻ hPoόz=ނaC:i lRtˮ`S/][&`5flݵ#3^gQk݄v'okjE ˺{4ԏ'~:pʳ5t㩟+k*ْ%eų&% cjgO7^O*g>)ƐjG9'Ҫ޻bz D-kUThNwq+`D\na:Qzk%;nP`{B\Ti16J#Ah၂u"[&ݑZEgg~'4;U!~]}^z+QEG]yڝZm_VQޕ ɗ Nce<хXOt!ON>MOt!'x u<Ԇ:jC?q 2Bzd9l(qXC!Z:VcRo|ҋM}٬ga p dV?l#yvFΡȴ^OՅ؀px&s; %7)ڵQ 8=l$I;PXدÓ kqq2Z).sw]cӗz)`u=,hfz91A>D{ȼ^,Hz'cS:_Zۙ*Kku$Ч]^-z~Zl.asQ*MKbw70v)Ƽ.f.ѧ~"-`E]s窅5ydwr/fL):(x.f!]ުYn^pWyHKDi뮟΄V p^7# q=U $c!^Dx{Z= |.\/Bxc¶:}j~8sSך|ktKxnR:>E}n/aԲj}ecwz.Br͆W}w"8+q^Ǡx6N*oޏy?۟G{E:yK'ϔSk}O:9.T݀f%P@[ԝ`һŪ7׼v=Ctyri6E"8*9M=c%))}9MLHs׮%\ÛQB_ o>M%?| B~cm!1D[ԏ(Fj1xW{4nΎ*9}Ի>*N[l{eHpfڠJC*ke=[ZE!YzH4i0¼ [_ʶsN7VfEu_ Y c 42mjܥo /wt #OGy@7~Eוʇ>͋(hb>TNy:=~m[q|ϸ_VgܻjPsqzhԉI35OکY8KB&HRV ykg$Jճz*LRa]f{pn4GI@H<N)k\yVz=۵ ]ۈ|j~w1z\=ӓ[RyjuNhWz~݄SA/XORjg~.|/EO2'|(_D4ےCNBG#Aق[ IoP?@Đ(k,aBb+̊8gҁ/:R_y-mDY?1M9HdR!{~W}OlsRKP4CEG*Xh`eo6j >@}WzSE_Wr/0bB5ź>"oNUD7-8}_.2!3DOaiu'"^_Cmz6BdyjHTg]Ιh355049[}Q%eNZ 6z;r{w>~DP} ݮ+Z!1kc{iTfnOiY7Op/}[Eݯv7٢^-o7U o\}NvXzaKuS Qe\Pz}8&~kȁgAMA aiCCPICC profile(}=H@_S"-v(:Yq*BZu0 4$-.kŪ "%/)=B4kJLvU  `Xf1'IIx{zY!5g1'2ìoOoV aVUs1.Hu76өy0X`YԈSqYY+XuCH`K BA %QEV )ڏ{D.\%0r, Z 7)_lcͺmv?Wz_i3Z.ۚ\'C6eGy) }kno}>i*y^xwogoi/r 2abKGD pHYs.#.#x?vtIME 0 IDATxwuSO=MR H轪tcEWԵkƢߺ"*FZzgfsϹ)<|O3{fN\UUJ%AAayi*9T,?8rAAaSv44MkҩYʊ   DACC0]׵I   ^K\bmo]S0c1R8i<]Ӻy  6xXEEK$Hog 8q~o.lbӤ^9r!v{PU33fh$\m۶˯9ZZ4uR5Ο1G0"3WdnY^rW!P.<,OUUA.:uEkDŽ_Yf+,Թ$ׯ`GF58VD^Q-4g7ka9&#i^ĥSi6 ~^*%^26L5. ,>f͚هy.ㄙ;UUE4TUVrJZ1 cr١q<ϱ!(NQոynY֦s]<o;%zd 6bb44H cH0>"=ݓ>ɉ],R?]mgsΣ>틿gWSbo>K{>VעsgNJqL9~ `lBn 1Zݕl9s%JfIL}M}}+\6͎b3&Y6ۺ@7Z8W#ᙟ=e3AA8q]>>hƎC.ks+CfNy'ܗ؏ߣK;8w˟L~d )sGgDnӳs].nX'2 uI» t3s G1"UN/6njb]AtNh3p2R*pR[SFpƞA8 o]Og\)qNQ8b{,Ϡn z1cǎ,x#bn]{Zi6= }tglEE7c46HiJг}4cF&}Kғ)R]P4bjiC߮N2N"%3`fPud:ESj5v!Kwo)*eDD d'Anw-فL/gAŠc4?/7MܿAs'p‘hdx减eP1s`ӻ^Kew*M}_Ǜ u)FLJg1oWAJfT{1 l:cT gKzJ^le=|GS*/i7K.*A@Hqґ,8[ӷW/d 93Mm|p\gnۄt2_&<_O7|]Wqfo _+J$~fMDLPӘ.cL1ޕV_bHYoW9Pԛm/D R.C1Fbϸ+G_GM?ܸY%Y(2/sǡvuh;,M?:Bԅ| пc J F3.JkTR1sX>LM.0cqՏ[*D&pTb p@kTY˿Ϗ-䔓sC\>~Ա?]gyz˅L?&LrX:s$x!՘vg22 ԝqS|4-):x,L)x+F6W<<צ'bxJ"+kvKCSű&Ne(xD>16۞g2nl>8?.~^|hkШ$4j%W!KOC;?<>{h/QӠҠC swt|)(Y@$f>#Xxkj(V/۶n_h9ݡ~˥k\Qfj䎇  4õJJ3ULXhF +bCyyӛX<}ƨq*^V`e9:EJ dB.'NVȄfe@UUrIǜQ2 6, W jjl|ivw6_ lwO<®)o#( O-cyQ0lGT^(1z~{ ^)O Ƅ&'{ Z֍lraF\N]t7?NzH4%h(u#c7`v>KPCoIÉhI7?M`ἑ>0LASٵۅFl)R{쳽q@_Mq׾p;7:_5~YAIzxeYL{ǹ[^[} zYѨ>.ܲVI1ͤYcĐǓ~!Myl}e[4OXN=xX^u#ubg YC"W8K 4 vwuzh3k ?vґ 84b֦ա#Ppn0S8 xx'Hl;Y%TD~+^~.GOPxTX3ts:mltկ'dܶlOA3t`M>_XYQP1'F>\ 4)}@W_ d<+OWF9ER.?\s_|ˏyI۫ G&e츱477UBUU{zٶu+T UU/-yNH+Rޡ?I_y.駞 ΀ 0YSur#x|P^u} ?\cmwwNH 8ZZZ153vgnzHhulsq sqC8C+qk&_2!.i/΀ 00HKy 'ƨxGBS õ86Iz& y %\ϣd"f&ӟɠ( :{_-q|<@Ntl19\E9A&]pt}b6ㆅUAT ]G7|J pB3 4q@QňԨA i`y~_bRtLAAR   ΀AAqAAA8q7AA CeAAA8   ΀       ΀   `ג%ӦPRFnfs1{Ϋ?6g5|<9|=b^~FZF˱WpG#zڧ⥅Uw2/)  o>lG}N\׭QFqŤR7uPO--|'ŝ/K yooeoGg si~-4g-w]#6s-'zg÷#>ą({=ڼ ZnjaLkҟawXsW+r_&(cI8k'モ<:w1kR MU9չU?%KdˊL6ߖ,!\h=e%>I oh?Wi/f8ܙ8Hٟ3[}W_s15z}|<w>+LZcϱ+c5eI_+Oc{~o,gCCu g].V6DO>wnvt'9gNK iw포dSK)In?>E,|UFˎOy>E uUw׽s WTrq7x=S/o=D>{7<x~su<} \0y`jŧ]{GYlՏ7sΕW8 6lMj@Oؾ ֟;ٴq'ʄ|GtAZ3i2 U9R_zWȥ^u%>{Kcm ҥfXt)v^?tQ2ZU1ƀksg'MO0iLMUmɲhgssF=L5ᵬxLlRp敌M+(B uM yϡ%ѽnnO矎 wt&i>vB 6({#\:9OjԀ{9e7ﺙ_7G/-Qɭ IDAT#8}惭Y0⺏0+h ۂۻ\[K"VoFto󱙵^2+@i$*ue^GvѸ39>2h>grP٧Zi”'n9a:-b~'_ԩXe Uu4og͚imH, [PJ-ZĒ-!%KH,ZhLFLAs٠“{\}<\ݩ8v,$c4>-S988=$9Urd_bW`s_q59c#vRpSЀ;xkb܎es_7s. Uty'|vr}{)2ib=v˥qF+^6of|ONk+[s*S'bᵃk0Ϟ2p,ξ>`ɺrg1cOrşp*n{l-Lֹ}INj+#[]qiCAAx+fCc Jh1#Pv!AITN[=Ϻ'c9G|Vιxōd~z PFa9`ߎi\ɧ1TO??k^|a39=6i*Vv3o^;*6;P2Rym5qe}#m+P%CݾiA͘->a ?x],8P65E3ef3?^7n?o\\g^igeGq3ljkq/MIӷ}2UCuEif?|wL*.{g못%_o\9&}ӱc{hsyt=\H>i Fb'b./o؄,b1h(:16GӓOPw9s yGs/_IgrHCdó(Z`HAAxKa&sᐙ3Cbҥd_48f`o96F ǜEam氣ZMD 5c{mb.PsB:v?#y3:ذA{r-mG.dƵlfpڅsؔtLtgsgbDw m#$  o:Q_ŌoEiVѢE{+.8Ԥ΄IN>SoCW,2}$F52[YfN̉op1nֻsW) wr7{RYv3޽w#3F\0i_{J71ӏ䜓ȵNf= 8??hm.6gFqqӉ\WytLh%fwrKR MEǙ2uk,bw}ڜyey$_^O~c'3R+q7 /sټkq}Ow,iR+?z/ S9b^䪙fاs_Hww$gBZL+LnAAtF+?Bi.˟~Q#GGWҩTEܿ`$('"N ;~jϱ{Wڠ(7{{~uJ&FOY{? ҾC^:zB٠'u?liw˻n{qMck4G_^[Gp̻x*GOǡM_~p[ a,Gg6GSW$/oC G_Qǁ޻'~KK1ra-~,[NfL7ktPh?kFQdhF 3?p3KFS[oe]p6O~~s}t߀ `0c|{E 0g1#g> &U|)65=%tXA7c/Goh.~xKظ#r:3/NiF&1/cwkcNb2_ɯ}}ߥ?w?~T5|7;ƋLHAaPz{j=u,xPT_ݱ5iJyd-\u2foVB+ 6:'a槤AAWڇFn$ڦ0?GwÑxpPfsf8  LAAA5"AAqAAAg@AAau&   $L6 yƌ/="  Ðx"I2 /<J-H$AAae6)AAA8Hg@AAAAAAAAAAbǎ   SO??z.%  >eqwfڽ~8  psNnxIx}"AAl6ˏ1|QF{o3   0TO}J;wu]qAA`aԨQ\y'?׋3   ap饗p{$XAA 8kee@AA{+ϮxJzQAAs/7gO= ((Z%0IdbL&I67cR)TdKKqb4F< x9m  xn{c؅~B` r9J,nJ,LfQr9v >o}nX ?u~R6;#ohh^ohLhnL&1SmR$ZZNFa&zp,2AAz؀1i;eXV0.(SfF{6K'lٲ͆zl]ޗmWl{3p:<<6V *^Q&f"Qn&Z,F3$;cK M󝋊cBAAoUoۡ]mFz`]dpJ%R +/oeqQǑ8QZd[S4%4oQ_-_ˍD"6=XR)b4fbtOD/'tⳗ=9TjîmWA.e)%C)4s9bX͆vXqFO]zKob ]~EU^eW5rU8Yhnc,ODSz,VvJqT oAEu ܽ :UwXɄ], hy/P3H{(6ګ W]/5-c&e,l P"UrrqEU UE_W:U3WwTw<)/WYvT^0p$;Z v+]:P_gʣlvklF^մ1hkI"p BxXˆ0i4]x5 o $-e2X"y/xP4U>bW'w7jRI wru;bD54f*UN~UoмmEU3{a zkZDK銁*׶CG-cb@e-b-*gZ8CbevRՈjYF,V?"rJhREgkBg |0^ȣZ^#Ҏj?{Mu oѾ'TطU=^|F!@ވa$gDkZr֍ȬKK"u-Y6J,cP"3A}K 5Rm20HYA&2mT=qtorrWs5&{- |Bo;F],̸Ӫh7F+zj_|74#)%wa6Mtb 8p@lA2_’lm/Z ZRGqE1)S~EiKxPZJrhMĸ9\η€:ǂfys.X1߫1o&b 8 F4" R/^c~ V-Xkֶ{z?KD53DadMEcƃ@9"RRoi2TԶ ؎] ZᶕW|fv>2^*[ 7kjߡ"%΢EjFX} VtӬԼuPAA$  c?^o|'kQ߶ и(sGk]D sJc#>SS'bku,^чĦq)Q Y{bVgEef2߫TTM"  Vp*մ kظE }wP.i[ZiTuuR.GlT5ub8Z g}Wdo&ģG9M*ĈVpM&+gyxAAw.M'}DWPݻٲlWݻuuѳeˀPc";[&2^=4nVmmZ[+ڪ3mI;+8 oca+6Pd*rG30{{˩"m|G?_ 7S*/wweDap5 'XM ~-mW + 3 p*Dҧy}]IZ3ed.7P)B2R0 j3X_AA1=f`qFo.Vah]XUp^Fߊ\ ΀ њawfh1 |$(.z1}h=L *yރf*+ ERLmd RFu-݊d S!~.֢NYDzmN'jYDW-Y ӬַdA@:Fny4Ҕ3|#/Ul5k x[D༉"|@p~ Xv>O1pkQ!_ Ơb]~[30 0"Qp 36'VU"H:Rc@V+AAj5f))w9ޫp;=@l3LBhY*#f*McAi||]f 3A`#A*@T]+WؾU\z7ơH+OeUէ\T8}}إR:GNTpiyxS||o/}۷>~o#u,s3XQ3V%W0rvibDXW檉r 8Q+cxpAR)soCɯx.b؅@~_y]ǩ]K$4ޫE\Z3͊Qvh{6=iVl523Zۡ L߸ ~F S\ueN4(^O@*\>r=pGۮ5~(7?r?=(w4L&9q5_Ӛ5Lq ΀0\@WBu#hu݈!j8{FA] J "ZfEQZوl3t"HKZqxX*U6>t=4Du< %GwwfȒbb&ƻX"LP]ˢjϊ8V.GBjq:)^ou'cO1@HUbQor ؏ʖIDAT ,9 3  |~@SlZL< X 4V.>ہ>AU|]Ѿ}6#x 4ź 8jߣzd.EǴXR `΅/ 0 `8:N8.EY~;k\X\EEw.֌nR(QU(6c[mٝD" -dr ƢF\Oa ?_Ag@xG⚙_hus0{`כqPW ] E;:RCΜՓ(H5M2au z3hT# %+ cЊ]p8~EKz̅X '2q`GQb.W!JY|WnK^$UWw8:"UyCU7TZ`qAhtzud#TcZdlQQ^uUxAg 5+c )Z׶A L _y}7*=5oXk|͠* Ybz^s(cCkj&s z/k1ہ:XOԐ]r-?(<Kžf*0brĪkYDڱh F$сeZqk!΀1rTFMH!Yf˙k|MյKՀ쇊~% _10Ѭ~Df2HARM+ r,s*b-*d'W6ֿ2hEs"qAhښX*')c*P*b="ʿԺG1=,^Eip0ࣟ/5+G#cwcb>nuZb?~Jj0AQ2R'Hz+ lOT5&'‚b()U(ocĔ_bA,YЎ761hSa9 & j=Xg? sUJؖUY:.F3Hރe0vQ!: M( ?{ 7ShDssG;2_6A5Jb {wKM8+M)_O͆{N$}mvzԷQۢ}_QQE6kL==2 M y3 >/§jZՒ~`8UATk#whMX:ϻs^am9U푋45. 4 afg*Z\PF"1S#! kgZۮWeW֪8u'Xh8A==be`xĉp0pEʭW4o/maXw˙f*V#bZtHd8yhQ\<j/J|5fkwwwP(kރо^z5 }AKpjWD_潹y@Nj{4 -c- osQ#BcPvI$±mHy SQX>*+vW& a XV@otf)m#}3(.*TtBTQ E W/TgUâ+50^ʼn#À N!ahÝh[rZDTt%2[.Oj  {yZaAS=8 V%r9JLYU5Z kF؅Bxa`)"ܯ>\]|ĬSp$ dKkt5L^gY*;j*z=9k;/A"j{Ў76#SU/qރeHiv5b]AMLxcE )v4"&6?UUH(*۾4u`ٽqjQfUߎkb-%f>θ topp:/TXkC{3V 4hFAd@$"vҚ Z }}k8mZºD5("o^;A_+{X),/^$gj4PUT"   '9sRHAARAAqAk,uGa lQ.f /f%li Z6C(sk|>מ 1@ b 1@ b 1@ b 1@ b 1@ MhN.;W7/1@ b 1@ b 1@ b@ b 1@ b* @ǖw뛗HJ  1@ b 1@ b 1@ b 1@ b 1@ Mhօc$%@ b 1@ b 1@ b 1b 1@ b J4nlY @Rb 1@ b 1@ b 1@ b 1@ b 1@ bhDwmV7/1@ b 1@ b 1@ b@ b 1@ b* 2b 1@ b 1@ b 1 1@ b 1`x%\\zU @Rb 1@ b 1@ b 1@ b 1@ b 1@ bhDLNHJ  1@ b 1@ b 1@ b 1@ b 1@ Mp(/ 1@ b 1@ b 1@ b1@ b 1@ bWʅc+e 1@ b 1@ b 1@ b1@ b 1@ b@S%/n]f 1@ b 1@ b 1@  1@ b 1@ +nɅgW$%@ b 1@ b 1@ b 1b 1@ b 1 DSo,T7/1@ b 1@ b 1@ b@ b 1@ b0_g.=XN^ )1b 1@ b 1@ b 1 1@ b 1`x%oq!zu2I 1@ b 1@ b 1@  1@ b 1@ +xtB-\foB L^ -_d󳿮7uPSIENDB`hypothesis-hypothesis-python-4.36.2/build.sh000077500000000000000000000024351354103617500211700ustar00rootroot00000000000000#!/usr/bin/env bash # This script is here to bootstrap the Hypothesis build process into a working # version of Python, then hand over to the actual Hypothesis build runner (which # is written in Python instead of bash). set -o xtrace set -o errexit set -o nounset ROOT="$(git -C "$(dirname "$0")" rev-parse --show-toplevel)" export HYPOTHESIS_ROOT="$ROOT" SCRIPTS="$ROOT/tooling/scripts" # shellcheck source=tooling/scripts/common.sh source "$SCRIPTS/common.sh" if [ -n "${PIPELINE_WORKSPACE-}" ] ; then # We're on Azure Pipelines and already set up a suitable Python PYTHON=$(command -v python) else # Otherwise, we install it from scratch "$SCRIPTS/ensure-python.sh" 3.6.8 PYTHON=$(pythonloc 3.6.8)/bin/python fi TOOL_REQUIREMENTS="$ROOT/requirements/tools.txt" TOOL_HASH=$("$PYTHON" "$SCRIPTS/tool-hash.py" < "$TOOL_REQUIREMENTS") TOOL_VIRTUALENV="$VIRTUALENVS/build-$TOOL_HASH" TOOL_PYTHON="$TOOL_VIRTUALENV/bin/python" export PYTHONPATH="$ROOT/tooling/src" if ! "$TOOL_PYTHON" -m hypothesistooling check-installed ; then rm -rf "$TOOL_VIRTUALENV" "$PYTHON" -m pip install --upgrade virtualenv "$PYTHON" -m virtualenv "$TOOL_VIRTUALENV" "$TOOL_PYTHON" -m pip install --no-warn-script-location -r requirements/tools.txt fi "$TOOL_PYTHON" -m hypothesistooling "$@" hypothesis-hypothesis-python-4.36.2/conjecture-rust/000077500000000000000000000000001354103617500226625ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/conjecture-rust/CHANGELOG.md000066400000000000000000000023021354103617500244700ustar00rootroot00000000000000# Conjecture for Rust 0.4.0 (2018-10-24) This release extends Conjecture for Rust with support for saving examples it discovers on disk in an example database, in line with Hypothesis for Python's existing functionality for this. # Conjecture for Rust 0.3.0 (2018-07-16) This release adds support for annotating interesting examples to indicate that they are logically distinct. When multiple distinct reasons for being interesting are found, Conjecture will attempt to shrink all of them. # Conjecture for Rust 0.2.1 (2018-06-25) This release fixes an occasional assertion failure that could occur when shrinking a failing test. # Conjecture for Rust 0.2.0 (2018-06-25) This release brings over all of the core code and API that was previously in hypothesis-ruby. # Conjecture for Rust 0.1.1 (2018-06-23) This is an essentially no-op release that just updates the package homepage and puts this package under the Hypothesis continuous release system. # Conjecture for Rust 0.1.0 (2018-06-19) This is an initial empty package release of Conjecture for Rust, solely to start fleshing out the release system and package dependency architecture between this and Hypothesis for Ruby. It literally does nothing. hypothesis-hypothesis-python-4.36.2/conjecture-rust/Cargo.toml000066400000000000000000000007141354103617500246140ustar00rootroot00000000000000[package] name = "conjecture" version = '0.4.0' authors = ["David R. MacIver "] homepage = "https://github.com/HypothesisWorks/hypothesis/tree/master/conjecture-rust" repository = "https://github.com/HypothesisWorks/hypothesis/" description = "Core engine for Hypothesis implementations" readme = "README.md" license = "MPL-2.0" [dependencies] rand = '0.3' crypto-hash = '0.3.1' byteorder = '1.2' [dev-dependencies] tempdir = "0.3" hypothesis-hypothesis-python-4.36.2/conjecture-rust/README.md000066400000000000000000000005741354103617500241470ustar00rootroot00000000000000# Conjecture for Rust Conjecture is the core engine for Hypothesis. This is a Rust implementation of it, designed to be shared between multiple library implementations. Currently only Hypothesis for Ruby uses it. It is unlikely that you want to use this library directly if you are not working on a Hypothesis implementation, and it exists primarily to share code between them. hypothesis-hypothesis-python-4.36.2/conjecture-rust/src/000077500000000000000000000000001354103617500234515ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/conjecture-rust/src/data.rs000066400000000000000000000130451354103617500247330ustar00rootroot00000000000000// Module representing core data types that Hypothesis // needs. use rand::{ChaChaRng, Rng}; use std::collections::HashSet; use std::cmp::Ordering; pub type DataStream = Vec; #[derive(Debug, Clone)] pub struct FailedDraw; #[derive(Debug, Clone)] enum BitGenerator { Random(ChaChaRng), Recorded(DataStream), } // Records information corresponding to a single draw call. #[derive(Debug, Clone)] pub struct DrawInProgress { depth: usize, start: usize, end: Option, } // Records information corresponding to a single draw call. #[derive(Debug, Clone)] pub struct Draw { pub depth: usize, pub start: usize, pub end: usize, } // Main entry point for running a test: // A test function takes a DataSource, uses it to // produce some data, and the DataSource records the // relevant information about what they did. #[derive(Debug, Clone)] pub struct DataSource { bitgenerator: BitGenerator, record: DataStream, sizes: Vec, draws: Vec, draw_stack: Vec, written_indices: HashSet, } impl DataSource { fn new(generator: BitGenerator) -> DataSource { return DataSource { bitgenerator: generator, record: DataStream::new(), sizes: Vec::new(), draws: Vec::new(), draw_stack: Vec::new(), written_indices: HashSet::new(), }; } pub fn from_random(random: ChaChaRng) -> DataSource { return DataSource::new(BitGenerator::Random(random)); } pub fn from_vec(record: DataStream) -> DataSource { return DataSource::new(BitGenerator::Recorded(record)); } pub fn start_draw(&mut self) { let i = self.draws.len(); let depth = self.draw_stack.len(); let start = self.record.len(); self.draw_stack.push(i); self.draws.push(DrawInProgress { start: start, end: None, depth: depth, }); } pub fn stop_draw(&mut self) { assert!(self.draws.len() > 0); assert!(self.draw_stack.len() > 0); let i = self.draw_stack.pop().unwrap(); let end = self.record.len(); self.draws[i].end = Some(end); } pub fn write(&mut self, value: u64) -> Result<(), FailedDraw> { match self.bitgenerator { BitGenerator::Recorded(ref mut v) if self.record.len() >= v.len() => Err(FailedDraw), _ => { self.sizes.push(0); self.record.push(value); Ok(()) } } } pub fn bits(&mut self, n_bits: u64) -> Result { self.sizes.push(n_bits); let mut result = match self.bitgenerator { BitGenerator::Random(ref mut random) => random.next_u64(), BitGenerator::Recorded(ref mut v) => if self.record.len() >= v.len() { return Err(FailedDraw); } else { v[self.record.len()] }, }; if n_bits < 64 { let mask = (1 << n_bits) - 1; result &= mask; }; self.record.push(result); return Ok(result); } pub fn to_result(mut self, status: Status) -> TestResult { TestResult { record: self.record, status: status, written_indices: self.written_indices, sizes: self.sizes, draws: self.draws .drain(..) .filter_map(|d| match d { DrawInProgress { depth, start, end: Some(end), } if start < end => { Some(Draw { start: start, end: end, depth: depth, }) } DrawInProgress { end: None, .. } => { assert!(status == Status::Invalid || status == Status::Overflow); None } _ => None, }) .collect(), } } } // Status indicates the result that we got from completing // a single test execution. #[derive(Debug, Clone, Eq, PartialEq, Copy)] pub enum Status { // The test tried to read more data than we had for it. Overflow, // Some important precondition of the test was not // satisfied. Invalid, // This test ran successfully to completion without // anything of note happening. Valid, // This was an interesting test execution! (Usually this // means failing, but for things like find it may not). Interesting(u64), } // Once a data source is finished it "decays" to a // TestResult, that retains a trace of all the information // we needed from the DataSource. It is these we keep around, // not the original DataSource objects. #[derive(Debug, Clone)] pub struct TestResult { pub record: DataStream, pub status: Status, pub draws: Vec, pub sizes: Vec, pub written_indices: HashSet, } impl Ord for TestResult { fn cmp(&self, other: &TestResult) -> Ordering { self.record.len().cmp(&other.record.len()). then(self.record.cmp(&other.record)) } } impl PartialOrd for TestResult { fn partial_cmp(&self, other: &TestResult) -> Option { Some(self.cmp(other)) } } impl PartialEq for TestResult { fn eq(&self, other: &TestResult) -> bool { self.record == other.record } } impl Eq for TestResult { } hypothesis-hypothesis-python-4.36.2/conjecture-rust/src/database.rs000066400000000000000000000101121354103617500255560ustar00rootroot00000000000000use crypto_hash::{hex_digest, Algorithm}; use std::fs; use std::io; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::fmt::Debug; pub type Key = str; pub trait Database: Debug + Send { fn save(&mut self, key: &Key, value: &[u8]) -> (); fn delete(&mut self, key: &Key, value: &[u8]) -> (); fn fetch(&mut self, key: &Key) -> Vec>; } pub type BoxedDatabase = Box; #[derive(Debug)] pub struct NoDatabase; impl Database for NoDatabase { fn save(&mut self, _key: &Key, _value: &[u8]) -> () {} fn delete(&mut self, _key: &Key, _value: &[u8]) -> () {} fn fetch(&mut self, _key: &Key) -> Vec> { vec![] } } #[derive(Debug)] pub struct DirectoryDatabase { path: PathBuf, } fn expect_io_error(expected: io::ErrorKind, result: io::Result<()>) { match result { Ok(()) => (), Err(error) => { if error.kind() != expected { panic!("IO Error: {:?}", error.kind()); } } } } impl DirectoryDatabase { pub fn new>(path: P) -> DirectoryDatabase { let mut result = DirectoryDatabase { path: PathBuf::new(), }; result.path.push(path); result } fn path_for_key(&self, key: &Key) -> PathBuf { let hashed_key = hex_digest(Algorithm::SHA1, key.as_bytes()); let mut result = PathBuf::new(); result.push(&self.path); result.push(&hashed_key[0..7]); expect_io_error(io::ErrorKind::AlreadyExists, fs::create_dir_all(&result)); result } fn path_for_entry(&self, key: &Key, value: &[u8]) -> PathBuf { let mut result = self.path_for_key(key); result.push(&hex_digest(Algorithm::SHA1, value)[0..7]); result } } impl Database for DirectoryDatabase { fn save(&mut self, key: &Key, value: &[u8]) -> () { let mut target = fs::File::create(self.path_for_entry(key, &value)).unwrap(); target.write_all(value).unwrap(); target.sync_all().unwrap(); } fn delete(&mut self, key: &Key, value: &[u8]) -> () { let target = self.path_for_entry(key, &value); expect_io_error(io::ErrorKind::NotFound, fs::remove_file(target)); } fn fetch(&mut self, key: &Key) -> Vec> { let mut results = Vec::new(); for entry_result in fs::read_dir(self.path_for_key(key)).unwrap() { let path = entry_result.unwrap().path(); let file = fs::File::open(path).unwrap(); let mut buf_reader = io::BufReader::new(file); let mut contents = Vec::new(); buf_reader.read_to_end(&mut contents).unwrap(); results.push(contents); } results } } #[cfg(test)] mod tests { use super::*; use tempdir::TempDir; #[derive(Debug)] struct TestDatabase { _temp: TempDir, db: DirectoryDatabase, } impl TestDatabase { pub fn new() -> TestDatabase { let dir = TempDir::new("test-db").unwrap(); let db = DirectoryDatabase::new(dir.path()); TestDatabase { _temp: dir, db: db } } } impl Database for TestDatabase { fn save(&mut self, key: &Key, value: &[u8]) -> () { self.db.save(key, value) } fn delete(&mut self, key: &Key, value: &[u8]) -> () { self.db.delete(key, value) } fn fetch(&mut self, key: &Key) -> Vec> { self.db.fetch(key) } } #[test] fn can_delete_non_existing_key() { let mut db = TestDatabase::new(); db.delete("foo", b"bar"); } #[test] fn appears_in_listing_after_saving() { let mut db = TestDatabase::new(); db.save("foo", b"bar"); let results = db.fetch("foo"); assert!(results.len() == 1); assert!(results[0].as_slice() == b"bar"); } #[test] fn can_delete_key() { let mut db = TestDatabase::new(); db.save("foo", b"bar"); db.delete("foo", b"bar"); let results = db.fetch("foo"); assert!(results.len() == 0); } } hypothesis-hypothesis-python-4.36.2/conjecture-rust/src/distributions.rs000066400000000000000000000163441354103617500267310ustar00rootroot00000000000000use data::{DataSource, FailedDraw}; use std::cmp::{Ord, Ordering, PartialOrd, Reverse}; use std::collections::BinaryHeap; use std::mem; use std::u64::MAX as MAX64; type Draw = Result; pub fn weighted(source: &mut DataSource, probability: f64) -> Result { // TODO: Less bit-hungry implementation. let truthy = (probability * (u64::max_value() as f64 + 1.0)).floor() as u64; let probe = source.bits(64)?; Ok(match (truthy, probe) { (0, _) => false, (MAX64, _) => true, (_, 0) => false, (_, 1) => true, _ => probe >= MAX64 - truthy, }) } pub fn bounded_int(source: &mut DataSource, max: u64) -> Draw { let bitlength = 64 - max.leading_zeros() as u64; if bitlength == 0 { source.write(0)?; return Ok(0); } loop { let probe = source.bits(bitlength)?; if probe <= max { return Ok(probe); } } } #[derive(Debug, Clone)] pub struct Repeat { min_count: u64, max_count: u64, p_continue: f64, current_count: u64, } impl Repeat { pub fn new(min_count: u64, max_count: u64, expected_count: f64) -> Repeat { Repeat { min_count: min_count, max_count: max_count, p_continue: 1.0 - 1.0 / (1.0 + expected_count), current_count: 0, } } pub fn reject(&mut self) { assert!(self.current_count > 0); self.current_count -= 1; } pub fn should_continue(&mut self, source: &mut DataSource) -> Result { if self.min_count == self.max_count { if self.current_count < self.max_count { self.current_count += 1; return Ok(true); } else { return Ok(false); } } else if self.current_count < self.min_count { source.write(1)?; self.current_count += 1; return Ok(true); } else if self.current_count >= self.max_count { source.write(0)?; return Ok(false); } let result = weighted(source, self.p_continue)?; if result { self.current_count += 1; } else { } return Ok(result); } } #[derive(Debug, Clone)] struct SamplerEntry { primary: usize, alternate: usize, use_alternate: f32, } impl SamplerEntry { fn single(i: usize) -> SamplerEntry { SamplerEntry { primary: i, alternate: i, use_alternate: 0.0, } } } impl Ord for SamplerEntry { fn cmp(&self, other: &SamplerEntry) -> Ordering { return self.primary .cmp(&other.primary) .then(self.alternate.cmp(&other.alternate)); } } impl PartialOrd for SamplerEntry { fn partial_cmp(&self, other: &SamplerEntry) -> Option { return Some(self.cmp(other)); } } impl PartialEq for SamplerEntry { fn eq(&self, other: &SamplerEntry) -> bool { return self.cmp(other) == Ordering::Equal; } } impl Eq for SamplerEntry {} #[derive(Debug, Clone)] pub struct Sampler { table: Vec, } impl Sampler { pub fn new(weights: Vec) -> Sampler { // FIXME: The correct thing to do here is to allow this, // return early, and make this reject the data, but we don't // currently have the status built into our data properly... assert!(weights.len() > 0); let mut table = Vec::new(); let mut small = BinaryHeap::new(); let mut large = BinaryHeap::new(); let total: f32 = weights.iter().sum(); let mut scaled_probabilities = Vec::new(); let n = weights.len() as f32; for (i, w) in weights.iter().enumerate() { let scaled = n * w / total; scaled_probabilities.push(scaled); if scaled == 1.0 { table.push(SamplerEntry::single(i)) } else if scaled > 1.0 { large.push(Reverse(i)); } else { assert!(scaled < 1.0); small.push(Reverse(i)); } } while !(small.is_empty() || large.is_empty()) { let Reverse(lo) = small.pop().unwrap(); let Reverse(hi) = large.pop().unwrap(); assert!(lo != hi); assert!(scaled_probabilities[hi] > 1.0); assert!(scaled_probabilities[lo] < 1.0); scaled_probabilities[hi] = (scaled_probabilities[hi] + scaled_probabilities[lo]) - 1.0; table.push(SamplerEntry { primary: lo, alternate: hi, use_alternate: 1.0 - scaled_probabilities[lo], }); if scaled_probabilities[hi] < 1.0 { small.push(Reverse(hi)) } else if scaled_probabilities[hi] > 1.0 { large.push(Reverse(hi)) } else { table.push(SamplerEntry::single(hi)) } } for &Reverse(i) in small.iter() { table.push(SamplerEntry::single(i)) } for &Reverse(i) in large.iter() { table.push(SamplerEntry::single(i)) } for ref mut entry in table.iter_mut() { if entry.alternate < entry.primary { mem::swap(&mut entry.primary, &mut entry.alternate); entry.use_alternate = 1.0 - entry.use_alternate; } } table.sort(); assert!(table.len() > 0); return Sampler { table: table }; } pub fn sample(&self, source: &mut DataSource) -> Draw { assert!(self.table.len() > 0); let i = bounded_int(source, self.table.len() as u64 - 1)? as usize; let entry = &self.table[i]; let use_alternate = weighted(source, entry.use_alternate as f64)?; if use_alternate { Ok(entry.alternate) } else { Ok(entry.primary) } } } pub fn good_bitlengths() -> Sampler { let weights = vec![ 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, // 1 byte 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, // 2 bytes 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, // 3 bytes 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, // 4 bytes 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, // 5 bytes 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, // 6 bytes 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, // 7 bytes 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, // 8 bytes (last bit spare for sign) ]; assert!(weights.len() == 63); Sampler::new(weights) } pub fn integer_from_bitlengths(source: &mut DataSource, bitlengths: &Sampler) -> Draw { let bitlength = bitlengths.sample(source)? as u64 + 1; let base = source.bits(bitlength)? as i64; let sign = source.bits(1)?; if sign > 0 { Ok(-base) } else { Ok(base) } } hypothesis-hypothesis-python-4.36.2/conjecture-rust/src/engine.rs000066400000000000000000000640501354103617500252710ustar00rootroot00000000000000// Core module that provides a main execution loop and // the API that can be used to get test data from it. use rand::{ChaChaRng, Rng, SeedableRng}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use std::cmp::Reverse; use std::collections::{HashMap, HashSet}; use std::mem; use std::sync::mpsc::{sync_channel, Receiver, SyncSender}; use std::thread; use std::io; use data::{DataSource, DataStream, Status, TestResult}; use database::BoxedDatabase; use intminimize::minimize_integer; #[derive(Debug, Clone)] enum LoopExitReason { Complete, MaxExamples, Shutdown, } #[derive(Debug)] enum LoopCommand { RunThis(DataSource), Finished(LoopExitReason, MainGenerationLoop), } #[derive(Debug)] struct MainGenerationLoop { name: String, database: BoxedDatabase, receiver: Receiver, sender: SyncSender, max_examples: u64, random: ChaChaRng, best_example: Option, minimized_examples: HashMap, fully_minimized: HashSet, valid_examples: u64, invalid_examples: u64, interesting_examples: u64, } type StepResult = Result<(), LoopExitReason>; impl MainGenerationLoop { fn run(mut self) { let result = self.loop_body(); match result { // Silent shutdown when the main thread terminates Err(LoopExitReason::Shutdown) => (), Err(reason) => { // Must clone because otherwise it is borrowed. let shutdown_sender = self.sender.clone(); shutdown_sender .send(LoopCommand::Finished(reason, self)) .unwrap() } Ok(_) => panic!("BUG: Generation loop was not supposed to return normally."), } } fn run_previous_examples(&mut self) -> Result<(), LoopExitReason>{ for v in self.database.fetch(&self.name) { let result = self.execute(DataSource::from_vec(bytes_to_u64s(&v)))?; let should_delete = match &result.status { Status::Interesting(_) => u64s_to_bytes(&result.record) != v , _ => true }; if should_delete { println!("Deleting!"); self.database.delete(&self.name, v.as_slice()); } } Ok(()) } fn loop_body(&mut self) -> StepResult { self.run_previous_examples()?; if self.interesting_examples == 0 { self.generate_examples()?; } // At the start of this loop we usually only have one example in // self.minimized_examples, but as we shrink we may find other ones. // Additionally, we may have multiple different failing examples from // a previous run. // // The reason why we loop is twofold: // a) This allows us to include newly discovered examples. Labels that // are not found in self.minimized_examples at the beginning of the // loop will be added for the next iteration around. // b) If we've previously marked a label as finished it can become // unfinished again if when shrinking another label, as when trying // to shrink one label we might accidentally find an improved shrink // for another. // // In principle this might cause us to loop for a very long time before // eventually settling on a fixed point, but when that happens we // should hit limits on shrinking (which we haven't implemented yet). while self.minimized_examples.len() > self.fully_minimized.len() { let keys: Vec = self.minimized_examples.keys().map(|i| *i).collect(); for label in keys.iter() { if self.fully_minimized.insert(*label) { let target = self.minimized_examples[label].clone(); let mut shrinker = Shrinker::new( self, target, |r| { r.status == Status::Interesting(*label) }); shrinker.run()?; } } } return Err(LoopExitReason::Complete); } fn generate_examples(&mut self) -> Result { while self.valid_examples < self.max_examples && self.invalid_examples < 10 * self.max_examples { let r = self.random.gen(); let result = self.execute(DataSource::from_random(r))?; match result.status { Status::Interesting(_) => return Ok(result), _ => (), } } return Err(LoopExitReason::MaxExamples); } fn execute(&mut self, source: DataSource) -> Result { let result = match self.sender.send(LoopCommand::RunThis(source)) { Ok(_) => match self.receiver.recv() { Ok(t) => t, Err(_) => return Err(LoopExitReason::Shutdown), }, Err(_) => return Err(LoopExitReason::Shutdown), }; match result.status { Status::Overflow => (), Status::Invalid => self.invalid_examples += 1, Status::Valid => self.valid_examples += 1, Status::Interesting(n) => { self.best_example = Some(result.clone()); let mut changed = false; let mut minimized_examples = &mut self.minimized_examples; let mut database = &mut self.database; let name = &self.name; minimized_examples.entry(n).or_insert_with(|| {result.clone()}); minimized_examples.entry(n).and_modify(|e| { if result < *e { changed = true; database.delete(name, &u64s_to_bytes(&(*e.record))); *e = result.clone() }; }); if changed { self.fully_minimized.remove(&n); } self.interesting_examples += 1; database.save(&self.name, &u64s_to_bytes(result.record.as_slice())); } } Ok(result) } } struct Shrinker<'owner, Predicate> { _predicate: Predicate, shrink_target: TestResult, changes: u64, expensive_passes_enabled: bool, main_loop: &'owner mut MainGenerationLoop, } impl<'owner, Predicate> Shrinker<'owner, Predicate> where Predicate: Fn(&TestResult) -> bool, { fn new( main_loop: &'owner mut MainGenerationLoop, shrink_target: TestResult, predicate: Predicate, ) -> Shrinker<'owner, Predicate> { assert!(predicate(&shrink_target)); Shrinker { main_loop: main_loop, _predicate: predicate, shrink_target: shrink_target, changes: 0, expensive_passes_enabled: false, } } fn predicate(&mut self, result: &TestResult) -> bool { let succeeded = (self._predicate)(result); if succeeded && ( // In the presence of writes it may be the case that we thought // we were going to shrink this but didn't actually succeed because // the written value was used. result.record.len() < self.shrink_target.record.len() || ( result.record.len() == self.shrink_target.record.len() && result.record < self.shrink_target.record ) ) { self.changes += 1; self.shrink_target = result.clone(); } succeeded } fn run(&mut self) -> StepResult { let mut prev = self.changes + 1; while prev != self.changes { prev = self.changes; self.adaptive_delete()?; self.minimize_individual_blocks()?; self.minimize_duplicated_blocks()?; if prev == self.changes { self.expensive_passes_enabled = true; } if !self.expensive_passes_enabled { continue; } self.reorder_blocks()?; self.lower_and_delete()?; self.delete_all_ranges()?; } Ok(()) } fn lower_and_delete(&mut self) -> StepResult { let mut i = 0; while i < self.shrink_target.record.len() { if self.shrink_target.record[i] > 0 { let mut attempt = self.shrink_target.record.clone(); attempt[i] -= 1; let (succeeded, result) = self.execute(&attempt)?; if !succeeded && result.record.len() < self.shrink_target.record.len() { let mut j = 0; while j < self.shrink_target.draws.len() { // Having to copy this is an annoying consequence of lexical lifetimes - // if we borrowed it immutably then we'd not be allowed to call self.incorporate // down below. Fortunately these things are tiny structs of integers so it doesn't // really matter. let d = self.shrink_target.draws[j].clone(); if d.start > i { let mut attempt2 = attempt.clone(); attempt2.drain(d.start..d.end); if self.incorporate(&attempt2)? { break; } } j += 1; } } } i += 1; } Ok(()) } fn reorder_blocks(&mut self) -> StepResult { let mut i = 0; while i < self.shrink_target.record.len() { let mut j = i + 1; while j < self.shrink_target.record.len() { assert!(i < self.shrink_target.record.len()); if self.shrink_target.record[i] == 0 { break; } if self.shrink_target.record[j] < self.shrink_target.record[i] { let mut attempt = self.shrink_target.record.clone(); attempt.swap(i, j); self.incorporate(&attempt)?; } j += 1; } i += 1; } Ok(()) } fn try_delete_range( &mut self, target: &TestResult, i: usize, k: usize, ) -> Result { // Attempts to delete k non-overlapping draws starting from the draw at index i. let mut stack: Vec<(usize, usize)> = Vec::new(); let mut j = i; while j < target.draws.len() && stack.len() < k { let m = target.draws[j].start; let n = target.draws[j].end; assert!(m < n); if m < n && (stack.len() == 0 || stack[stack.len() - 1].1 <= m) { stack.push((m, n)) } j += 1; } let mut attempt = target.record.clone(); while stack.len() > 0 { let (m, n) = stack.pop().unwrap(); attempt.drain(m..n); } if attempt.len() >= self.shrink_target.record.len() { Ok(false) } else { self.incorporate(&attempt) } } fn adaptive_delete(&mut self) -> StepResult { let mut i = 0; let target = self.shrink_target.clone(); while i < target.draws.len() { // This is an adaptive pass loosely modelled after timsort. If // little or nothing is deletable here then we don't try any more // deletions than the naive greedy algorithm would, but if it looks // like we have an opportunity to delete a lot then we try to do so. // What we're trying to do is to find a large k such that we can // delete k but not k + 1 draws starting from this point, and we // want to do that in O(log(k)) rather than O(k) test executions. // We try a quite careful sequence of small shrinks here before we // move on to anything big. This is because if we try to be // aggressive too early on we'll tend to find that we lose out when // the example is "nearly minimal". if self.try_delete_range(&target, i, 2)? { if self.try_delete_range(&target, i, 3)? && self.try_delete_range(&target, i, 4)? { let mut hi = 5; // At this point it looks like we've got a pretty good // opportunity for a long run here. We do an exponential // probe upwards to try and find some k where we can't // delete many intervals. We do this rather than choosing // that upper bound to immediately be large because we // don't really expect k to be huge. If it turns out that // it is, the subsequent example is going to be so tiny that // it doesn't really matter if we waste a bit of extra time // here. while self.try_delete_range(&target, i, hi)? { assert!(hi <= target.draws.len()); hi *= 2; } // We now know that we can delete the first lo intervals but // not the first hi. We preserve that property while doing // a binary search to find the point at which we stop being // able to delete intervals. let mut lo = 4; while lo + 1 < hi { let mid = lo + (hi - lo) / 2; if self.try_delete_range(&target, i, mid)? { lo = mid; } else { hi = mid; } } } } else { self.try_delete_range(&target, i, 1)?; } // We unconditionally bump i because we have always tried deleting // one more example than we succeeded at deleting, so we expect the // next example to be undeletable. i += 1; } return Ok(()); } fn delete_all_ranges(&mut self) -> StepResult { let mut i = 0; while i < self.shrink_target.record.len() { let start_length = self.shrink_target.record.len(); let mut j = i + 1; while j < self.shrink_target.record.len() { assert!(j > i); let mut attempt = self.shrink_target.record.clone(); attempt.drain(i..j); assert!(attempt.len() + (j - i) == self.shrink_target.record.len()); let deleted = self.incorporate(&attempt)?; if !deleted { j += 1; } } if start_length == self.shrink_target.record.len() { i += 1; } } Ok(()) } fn try_lowering_value(&mut self, i: usize, v: u64) -> Result { if v >= self.shrink_target.record[i] { return Ok(false); } let mut attempt = self.shrink_target.record.clone(); attempt[i] = v; let (succeeded, result) = self.execute(&attempt)?; assert!(result.record.len() <= self.shrink_target.record.len()); let lost_bytes = self.shrink_target.record.len() - result.record.len(); if !succeeded && result.status == Status::Valid && lost_bytes > 0 { attempt.drain(i + 1..i + lost_bytes + 1); assert!(attempt.len() + lost_bytes == self.shrink_target.record.len()); self.incorporate(&attempt) } else { Ok(succeeded) } } fn minimize_individual_blocks(&mut self) -> StepResult { let mut i = 0; while i < self.shrink_target.record.len() { if !self.shrink_target.written_indices.contains(&i) { minimize_integer(self.shrink_target.record[i], |v| { self.try_lowering_value(i, v) })?; } i += 1; } Ok(()) } fn calc_duplicates(&self) -> Vec> { assert!(self.shrink_target.record.len() == self.shrink_target.sizes.len()); let mut duplicates: HashMap<(u64, u64), Vec> = HashMap::new(); for (i, (u, v)) in self.shrink_target .record .iter() .zip(self.shrink_target.sizes.iter()) .enumerate() { if !self.shrink_target.written_indices.contains(&i) { duplicates .entry((*u, *v)) .or_insert_with(|| Vec::new()) .push(i); } } let mut result: Vec> = duplicates .drain() .filter_map(|(_, elements)| { if elements.len() > 1 { Some(elements) } else { None } }) .collect(); result.sort_by_key(|v| Reverse(v.len())); result } fn minimize_duplicated_blocks(&mut self) -> StepResult { let mut i = 0; let mut targets = self.calc_duplicates(); while i < targets.len() { let target = mem::replace(&mut targets[i], Vec::new()); let max_target = *target.iter().max().unwrap(); i += 1; assert!(target.len() > 0); let v = self.shrink_target.record[target[0]]; let w = minimize_integer(v, |t| { if max_target >= self.shrink_target.record.len() { return Ok(false); } let mut attempt = self.shrink_target.record.clone(); for i in &target { attempt[*i] = t } self.incorporate(&attempt) })?; if w != v { targets = self.calc_duplicates(); } } Ok(()) } fn execute(&mut self, buf: &DataStream) -> Result<(bool, TestResult), LoopExitReason> { // TODO: Later there will be caching here let result = self.main_loop.execute(DataSource::from_vec(buf.clone()))?; Ok((self.predicate(&result), result)) } fn incorporate(&mut self, buf: &DataStream) -> Result { assert!( buf.len() <= self.shrink_target.record.len(), "Expected incorporate to not increase length, but buf.len() = {} \ while shrink target was {}", buf.len(), self.shrink_target.record.len() ); if buf.len() == self.shrink_target.record.len() { assert!(buf < &self.shrink_target.record); } if self.shrink_target.record.starts_with(buf) { return Ok(false); } let (succeeded, _) = self.execute(buf)?; Ok(succeeded) } } #[derive(Debug, Clone, Eq, PartialEq)] enum EngineState { AwaitingCompletion, ReadyToProvide, } #[derive(Debug)] pub struct Engine { // The next response from the main loop. Once // this is set to Some(Finished(_)) it stays that way, // otherwise it is cleared on access. loop_response: Option, state: EngineState, // Communication channels with the main testing loop handle: Option>, receiver: Receiver, sender: SyncSender, } impl Clone for Engine { fn clone(&self) -> Engine { panic!("BUG: The Engine was unexpectedly cloned"); } } fn bytes_to_u64s(bytes: &[u8]) -> Vec{ let mut reader = io::Cursor::new(bytes); let mut result = Vec::new(); while let Ok(n) = reader.read_u64::() { result.push(n); } result } fn u64s_to_bytes(ints: &[u64]) -> Vec{ let mut result = Vec::new(); for n in ints { result.write_u64::(*n).unwrap(); } result } impl Engine { pub fn new(name: String, max_examples: u64, seed: &[u32], db: BoxedDatabase) -> Engine { let (send_local, recv_remote) = sync_channel(1); let (send_remote, recv_local) = sync_channel(1); let main_loop = MainGenerationLoop { database: db, name: name, max_examples: max_examples, random: ChaChaRng::from_seed(seed), sender: send_remote, receiver: recv_remote, best_example: None, minimized_examples: HashMap::new(), fully_minimized: HashSet::new(), valid_examples: 0, invalid_examples: 0, interesting_examples: 0, }; let handle = thread::Builder::new() .name("Hypothesis main loop".to_string()) .spawn(move || { main_loop.run(); }) .unwrap(); Engine { loop_response: None, sender: send_local, receiver: recv_local, handle: Some(handle), state: EngineState::ReadyToProvide, } } pub fn mark_finished(&mut self, source: DataSource, status: Status) -> () { self.consume_test_result(source.to_result(status)) } pub fn next_source(&mut self) -> Option { assert!(self.state == EngineState::ReadyToProvide); self.state = EngineState::AwaitingCompletion; self.await_loop_response(); let mut local_result = None; mem::swap(&mut local_result, &mut self.loop_response); match local_result { Some(LoopCommand::RunThis(source)) => return Some(source), None => panic!("BUG: Loop response should not be empty at this point"), _ => { self.loop_response = local_result; return None; } } } pub fn list_minimized_examples(&self) -> Vec { match &self.loop_response { &Some(LoopCommand::Finished( _, MainGenerationLoop { ref minimized_examples, .. }, )) => { let mut results: Vec = minimized_examples.values().map(|v| v.clone()).collect(); results.sort(); results }, _ => Vec::new(), } } pub fn best_source(&self) -> Option { match &self.loop_response { &Some(LoopCommand::Finished( _, MainGenerationLoop { best_example: Some(ref result), .. }, )) => Some(DataSource::from_vec(result.record.clone())), _ => None, } } fn consume_test_result(&mut self, result: TestResult) -> () { assert!(self.state == EngineState::AwaitingCompletion); self.state = EngineState::ReadyToProvide; if self.has_shutdown() { return (); } // NB: Deliberately not matching on result. If this fails, // that's OK - it means the loop has shut down and when we ask // for data from it we'll get its shutdown response. let _ = self.sender.send(result); } pub fn was_unsatisfiable(&self) -> bool { match &self.loop_response { &Some(LoopCommand::Finished(_, ref main_loop)) => { main_loop.interesting_examples == 0 && main_loop.valid_examples == 0 } _ => false, } } fn has_shutdown(&mut self) -> bool { match &self.loop_response { &Some(LoopCommand::Finished(..)) => true, _ => false, } } fn await_thread_termination(&mut self) { let mut maybe_handle = None; mem::swap(&mut self.handle, &mut maybe_handle); if let Some(handle) = maybe_handle { if let Err(boxed_msg) = handle.join() { // FIXME: This is awful but as far as I can tell this is // genuinely the only way to get the actual message out of the // panic in the child thread! It's boxed as an Any, and the // debug of Any just says "Any". Fortunately the main loop is // very much under our control so this doesn't matter too much // here, but yuck! if let Some(msg) = boxed_msg.downcast_ref::<&str>() { panic!(msg.to_string()); } else if let Some(msg) = boxed_msg.downcast_ref::() { panic!(msg.clone()); } else { panic!("BUG: Unexpected panic format in main loop"); } } } } fn await_loop_response(&mut self) -> () { if self.loop_response.is_none() { match self.receiver.recv() { Ok(response) => { self.loop_response = Some(response); if self.has_shutdown() { self.await_thread_termination(); } } Err(_) => { self.await_thread_termination(); panic!("BUG: Unexpected silent termination of generation loop.") } } } } } #[cfg(test)] mod tests { use super::*; use data::FailedDraw; use database::NoDatabase; fn run_to_results(mut f: F) -> Vec where F: FnMut(&mut DataSource) -> Result { let seed: [u32; 2] = [0, 0]; let mut engine = Engine::new( "run_to_results".to_string(), 1000, &seed, Box::new(NoDatabase), ); while let Some(mut source) = engine.next_source() { if let Ok(status) = f(&mut source) { engine.mark_finished(source, status); } else { engine.mark_finished(source, Status::Overflow); } } engine.list_minimized_examples() } #[test] fn minimizes_all_examples(){ let results = run_to_results(|source| { let n = source.bits(64)?; if n >= 100 { Ok(Status::Interesting(n % 2)) } else { Ok(Status::Valid) } }); assert!(results.len() == 2); assert_eq!(results[0].record[0], 100); assert_eq!(results[1].record[0], 101); } } hypothesis-hypothesis-python-4.36.2/conjecture-rust/src/intminimize.rs000066400000000000000000000061561354103617500263630ustar00rootroot00000000000000use std::cmp::min; const SMALL: u64 = 5; struct Minimizer<'a, F: 'a> { criterion: &'a mut F, best: u64, } impl<'a, F, T> Minimizer<'a, F> where F: 'a + FnMut(u64) -> Result, { fn test(&mut self, candidate: u64) -> Result { if candidate == self.best { return Ok(true); } if candidate > self.best { return Ok(false); } let result = (self.criterion)(candidate)?; if result { self.best = candidate; } Ok(result) } fn modify(&mut self, g: G) -> Result where G: Fn(u64) -> u64, { let x = g(self.best); self.test(x) } } pub fn minimize_integer(start: u64, mut criterion: F) -> Result where F: FnMut(u64) -> Result, { if start == 0 { return Ok(start); } for i in 0..min(start, SMALL) { if criterion(i)? { return Ok(i); } } if start <= SMALL { return Ok(start); } let mut minimizer = Minimizer { best: start, criterion: &mut criterion, }; loop { if !minimizer.modify(|x| x >> 1)? { break; } } for i in 0..64 { minimizer.modify(|x| x ^ (1 << i))?; } assert!(minimizer.best >= SMALL); for i in 0..64 { let left_mask = 1 << i; let mut right_mask = left_mask >> 1; while right_mask != 0 { minimizer.modify(|x| { if x & left_mask == 0 || x & right_mask != 0 { x } else { x ^ (right_mask | left_mask) } })?; right_mask >>= 1; } } if !minimizer.modify(|x| x - 1)? { return Ok(minimizer.best); } let mut lo = 0; let mut hi = minimizer.best; while lo + 1 < hi { let mid = lo + (hi - lo) / 2; if minimizer.test(mid)? { hi = mid; } else { lo = mid; } } Ok(minimizer.best) } #[cfg(test)] mod tests { use super::*; fn non_failing_minimize(start: u64, criterion: F) -> u64 where F: Fn(u64) -> bool, { let mut best = start; loop { let ran: Result = minimize_integer(best, |x| Ok(criterion(x))); let result = ran.unwrap(); assert!(result <= best); if result == best { return best; } best = result; } } #[test] fn minimize_down_to() { let n = non_failing_minimize(100, |x| x >= 10); assert_eq!(n, 10); } #[test] fn unset_relevant_bits() { let x = 0b101010101010; let y = 0b111111111111; let n = non_failing_minimize(y, |k| k & x == x); assert_eq!(n, x); } #[test] fn sort_bits() { let x: u64 = 0b1011011011000111; let y: u64 = 0b0000001111111111; let c = x.count_ones(); assert_eq!(c, y.count_ones()); let n = non_failing_minimize(x, |k| k.count_ones() == c); assert_eq!(y, n); } } hypothesis-hypothesis-python-4.36.2/conjecture-rust/src/lib.rs000066400000000000000000000003311354103617500245620ustar00rootroot00000000000000extern crate core; extern crate crypto_hash; extern crate byteorder; extern crate rand; #[cfg(test)] extern crate tempdir; pub mod data; pub mod database; pub mod distributions; pub mod engine; pub mod intminimize; hypothesis-hypothesis-python-4.36.2/guides/000077500000000000000000000000001354103617500210065ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/guides/README.rst000066400000000000000000000010111354103617500224660ustar00rootroot00000000000000================================= Guides for Hypothesis Development ================================= This is a general collection of useful documentation for people working on Hypothesis. It is separate from the main documentation because it is not much use if you are merely *using* Hypothesis. It's purely for working on it, and aimed more at maintainers than casual contributors. Most of these are currently written with the Python version of Hypothesis in mind, but will evolve over time to be more multilingual. hypothesis-hypothesis-python-4.36.2/guides/api-style.rst000066400000000000000000000177151354103617500234620ustar00rootroot00000000000000=============== House API Style =============== Note: Currently this guide is very specific to the *Python* version of Hypothesis. It needs updating for the Ruby version (and, in future, other versions). Here are some guidelines for how to write APIs so that they "feel" like a Hypothesis API. This is particularly focused on writing new strategies, as that's the major place where we add APIs, but also applies more generally. Note that it is not a guide to *code* style, only API design. The Hypothesis style evolves over time, and earlier strategies in particular may not be consistent with this style, and we've tried some experiments that didn't work out, so this style guide is more normative than descriptive and existing APIs may not match it. Where relevant, backwards compatibility is much more important than conformance to the style. ~~~~~~~~~~~~~~~~~~ General Guidelines ~~~~~~~~~~~~~~~~~~ * When writing extras modules, consistency with Hypothesis trumps consistency with the library you're integrating with. * *Absolutely no subclassing as part of the public API* * We should not strive too hard to be pythonic, but if an API seems weird to a normal Python user we should see if we can come up with an API we like as much but is less weird. * Code which adds a dependency on a third party package should be put in a hypothesis.extra module. * Complexity should not be pushed onto the user. An easy to use API is more important than a simple implementation. ~~~~~~~~~~~~~~~~~~~~~~~~~ Guidelines for strategies ~~~~~~~~~~~~~~~~~~~~~~~~~ * A strategy function should be somewhere between a recipe for how to build a value and a range of valid values. * It should not include distribution hints. The arguments should only specify how to produce a valid value, not statistical properties of values. * Strategies should try to paper over non-uniformity in the underlying types as much as possible (e.g. ``hypothesis.extra.numpy`` has a number of workarounds for numpy's odd behaviour around object arrays). ~~~~~~~~~~~~~~~~~ Argument handling ~~~~~~~~~~~~~~~~~ We have a reasonably distinctive style when it comes to handling arguments: * Arguments must be validated to the greatest extent possible. Hypothesis should reject bad arguments with an InvalidArgument error, not fail with an internal exception. * We make extensive use of default arguments. If an argument could reasonably have a default, it should. * Exception to the above: Strategies for collection types should *not* have a default argument for element strategies. * Interacting arguments (e.g. arguments that must be in a particular order, or where at most one is valid, or where one argument restricts the valid range of the other) are fine, but when this happens the behaviour of defaults should automatically be adjusted. e.g. if the normal default of an argument would become invalid, the function should still do the right thing if that default is used. * Where the actual default used depends on other arguments, the default parameter should be None. * It's worth thinking about the order of arguments: the first one or two arguments are likely to be passed positionally, so try to put values there where this is useful and not too confusing. * When adding arguments to strategies, think carefully about whether the user is likely to want that value to vary often. If so, make it a strategy instead of a value. In particular if it's likely to be common that they would want to write ``some_strategy.flatmap(lambda x: my_new_strategy(argument=x))`` then it should be a strategy. * Arguments should not be "a value or a strategy for generating that value". If you find yourself inclined to write something like that, instead make it take a strategy. If a user wants to pass a value they can wrap it in a call to ``just``. * If a combination of arguments make it impossible to generate anything, ``raise InvalidArgument`` instead of ``return nothing()``. Returning the null strategy is conceptually nice, but can lead to silently dropping parts from composed strategies and thus unexpectedly weak tests. ~~~~~~~~~~~~~~ Function Names ~~~~~~~~~~~~~~ We don't have any real consistency here. The rough approach we follow is: * Names are `snake_case` as is standard in Python. * Strategies for a particular type are typically named as a plural name for that type. Where that type has some truncated form (e.g. int, str) we use a longer form name. * Other strategies have no particular common naming convention. ~~~~~~~~~~~~~~ Argument Names ~~~~~~~~~~~~~~ We should try to use the same argument names and orders across different strategies wherever possible. In particular: * For collection types, the element strategy (or strategies) should always be the first arguments. Where there is only one element strategy it should be called ``elements`` (but e.g. ``dictionaries`` has element strategies named ``keys`` and ``values`` and that's fine). * For ordered types, the first two arguments should be a lower and an upper bound. They should be called ``min_value`` and ``max_value``. * Collection types should have a ``min_size`` and a ``max_size`` parameter that controls the range of their size. ``min_size`` should default to zero and ``max_size`` to ``None`` (even if internally it is bounded). ~~~~~~~~~~~~~~~ Deferred Errors ~~~~~~~~~~~~~~~ As far as is reasonable, functions should raise errors when the test is run (typically by deferring them until you try to draw from the strategy), not when they are called. This mostly applies to strategy functions and some error conditions in ``@given`` itself. Generally speaking this should be taken care of automatically by use of the ``@defines_strategy`` decorator. We do not currently do this for the ``TypeError`` that you will get from calling the function incorrectly (e.g. with invalid keyword arguments or missing required arguments). In principle we could, but it would result in much harder to read function signatures, so we would be trading off one form of comprehensibility for another, and so far that hasn't seemed to be worth it. The main reasons for preferring this style are: * Errors at test import time tend to throw people and be correspondingly hard for them to debug. There's an expectation that errors in your test code result in failures in your tests, and the fact that that test code happens to be defined in a decorator doesn't seem to change that expectation for people. * Things like deprecation warnings etc. localize better when they happen inside the test - test runners will often swallow them or put them in silly places if they're at import time, but will attach any output that happens in the test to the test itself. * There are a lot of cases where raising an error, deprecation warning, etc. is *only* possible in a test - e.g. if you're using the inline style with `data `_, or if you're using `flatmap `_ or `@composite `_ then the strategy won't actually get evaluated until we run the test, so that's the only place they can happen. It's nice to be consistent, and it's weird if sometimes strategy errors result in definition time errors and sometimes they result in test errors. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A catalogue of current violations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following are places where we currently deviate from this style. Some of these should be considered targets for deprecation and/or improvement. * ``hypothesis.extra.numpy`` has some arguments which can be either strategies or values. * ``hypothesis.extra.numpy`` assumes arrays are fixed size and doesn't have ``min_size`` and ``max_size`` arguments (but this is probably OK because of more complicated shapes of array). * ``hypothesis.stateful`` is a great big subclassing based train wreck. hypothesis-hypothesis-python-4.36.2/guides/documentation.rst000066400000000000000000000103221354103617500244070ustar00rootroot00000000000000===================================== The Hypothesis Documentation Handbook ===================================== Note: Currently this guide is very specific to the *Python* version of Hypothesis. It needs updating for the Ruby version (and, in future, other versions). Good documentation can make the difference between good code and useful code - and Hypothesis is written to be used, as widely as possible. This is a working document-in-progress with some tips for how we try to write our docs, with a little of the what and a bigger chunk of the how. If you have ideas about how to improve these suggestions, meta issues or pull requests are just as welcome as for docs or code :D ---------------------------- What docs should be written? ---------------------------- All public APIs should be comprehensively described. If the docs are confusing to new users, incorrect or out of date, or simply incomplete - we consider all of those to be bugs; if you see them please raise an issue and perhaps submit a pull request. That's not much advice, but it's what we have so far. ------------ Using Sphinx ------------ We use `the Sphinx documentation system `_ to convert the .rst files into html with formatting and cross-references. Without repeating the docs for Sphinx, here are some tips: - When documenting a Python object (function, class, module, etc.), you can use autodoc to insert and interpret the docstring. - When referencing a function, you can insert a reference to a function as (eg) ``:func:`hypothesis.given`\``, which will appear as ``hypothesis.given()`` with a hyperlink to the appropriate docs. You can show only the last part (unqualified name) by adding a tilde at the start, like ``:func:`~hypothesis.given`\ `` -> ``given()``. Finally, you can give it alternative link text in the usual way: ``:func:`other text `\ `` -> ``other text``. - For the formatting and also hyperlinks, all cross-references should use the Sphinx cross-referencing syntax rather than plain text. ----------------- Changelog Entries ----------------- `Hypothesis does continuous deployment `_, where every pull request that touches ``./src`` results in a new release. That means every contributor gets to write their changelog! A changelog entry should be written in a new ``RELEASE.rst`` file in the `hypothesis-python` directory (other projects will have a release file in their corresponding directory, but the system is currently not functioning for `hypothesis-ruby`, which is the only other such project at the moment). The first line of the file specifies the component of the version number that will be updated, according to our `semantic versioning `_ policy. - ``RELEASE_TYPE: major`` is for breaking changes, and will only be used by the core team after extensive discussion. - ``RELEASE_TYPE: minor`` is for anything that adds to the public (ie documented) API, changes an argument signature, or adds a new deprecation or health check. Minor (or patch) releases **must not** cause errors in any code that runs without errors on an earlier version of Hypothesis, using only the public API. Silent errors *may* be converted to noisy errors, but generally we prefer to issue a deprecation warning and use the new behaviour if possible. This stability policy only applies to use of Hypothesis itself, not the results of user-written tests that use Hypothesis. - ``RELEASE_TYPE: patch`` is for changes that are not visible in the public interface, from improving a docstring to backwards-compatible improvements in shrinking behaviour. This first line will be removed from the final change log entry. The remaining lines are the actual changelog text for this release, which should: - concisely describe what changed and why - use Sphinx cross-references to any functions or classes mentioned - if closing an issue, mention it with the ``:issue:`` role to generate a link - finish with a note of thanks from the maintainers: "Thanks to for this bug fix / feature / contribution" (depending on which it is). If this is your first contribution, don't forget to add yourself to contributors.rst! hypothesis-hypothesis-python-4.36.2/guides/internals.rst000066400000000000000000000376601354103617500235530ustar00rootroot00000000000000=================================== How to Work on Hypothesis Internals =================================== Note: Currently this guide is very specific to the *Python* version of Hypothesis. Over time the core will be factored out into a small separate set of libraries - the current migration plan is to move all of the Python code into Rust and have the Python and Ruby versions both depend on this. Eventually we will likely need to have more than one core library - e.g. a Java one as well. This is a guide to how to work on Hypothesis internals, with a particular focus on helping people who are new to it. Right now it is very rudimentary and is intended primarily for people who are looking to get started writing shrink passes as part of our `current outreach program to get more people doing that `_, but it will expand over time. ------------------------ Bird's Eye View Concepts ------------------------ The core engine of Hypothesis is called Conjecture. The "fundamental idea" of Conjecture is that you can represent an arbitrary randomized test case as the sequence of bytes read from the pseudo-random number generator (PRNG) that produced it. Whenever the test did something "random" it actually read the next bytes and did what they told it to do. But those bytes didn't *have* to come from a PRNG, and we can run the test given any byte sequence we like. By manipulating the choice of bytes, we can achieve more interesting effects than pure randomness would allow us to do, while retaining the power and ease of use of random testing. The greatest strength of this idea is that we have a single source of truth for what an example should look like: Every byte sequence is one that *could* have come from a PRNG, and thus is a valid thing to try for our test. The only ways it can fail to be a valid test input are for it to be too short or for it to not satisfy one of the test's preconditions, and both are easily detectable. The idea of shrinking in particular is that once we have this representation, we can shrink arbitrary test cases based on it. We try to produce a string that is *shortlex minimal*. What this means is that it has the shortest possible length and among those strings of minimal length is lexicographically (i.e. the normal order on strings - find the first byte at which they differ and use that to decide) smallest. Ideally we could think of the shrinker as a generic function that takes a string satisfying some predicate and returns the shortlex minimal string that also satisfies it. We depart from this ideal in two ways: * we can only *approximate* such a minimal string. Finding the actual minimum is intractable in general. * we are only interested in minimizing things where the predicate goes through the Hypothesis API, which lets us track how the data is used and use that to guide the process. We then use a number of different transformations of the string to try and reduce our input. These vary from principled general transformations to shameless hacks that special case something we need to work well. One such example of a hack is the handling of floating point numbers. There are a couple of lexicographic shrinks that are always valid but only really make sense for our particular encoding of floats. We check if we're working on something that is of the right size to be a float and apply those transformations regardless of whether it is actually meant to be a float. Worst case scenario it's not a float and they don't work, and we've run a few extra test cases. -------------------------- Useful Files to Know About -------------------------- The code associated with Conjecture lives in `src/hypothesis/internal/conjecture `_. There are a number of files in there, but the most important ones are ``engine.py`` and ``data.py``. ``data.py`` defines the core type that is used to represent test cases, and ``engine.py`` contains the main driver for deciding what test cases to run. There is also ``minimizer.py``, which contains a general purpose lexicographic minimizer. This is responsible for taking some byte string and a predicate over byte strings and producing a string of the same length which is lexicographically smaller. Unlike the shrinker in general, this *is* supposed to work on arbitrary predicates and doesn't know anything about the testing API. We typically apply this to subsets of the bytes for a test input with a predicate that knows how to integrate those subsets into a larger test. This is the part of the code that means we can do things like replacing an integer with a smaller one. ------- Testing ------- For general information about how to test Hypothesis, take a look at the `testing guide `_, but there are a couple of areas that it's worth specifically highlighting for making changes to the engine: The first is `tests/cover/test_conjecture_engine.py `_, which is a set of unit tests designed to put the engine into particular scenarios to exercise specific behaviours, with a goal of achieving 100% coverage on it in isolation (though it currently does not quite achieve that for some specific edge cases. We may fix and enforce this later). The other set of tests that are worth knowing about are the quality tests, in `tests/quality `_. These assert specific hard to satisfy properties about the examples that Hypothesis finds - either their existence, or something about the final shrunk result. ----------------------- Engine Design Specifics ----------------------- There are a couple of code patterns that are mostly peculiar to Conjecture that you may not have encountered before and are worth being aware of. ~~~~~~~~~~~~~~~~~~~~ Search State Objects ~~~~~~~~~~~~~~~~~~~~ There are a number of cases where we find ourself with a user-provided function (where the "user" might still be something that is entirely our code) and we want to pass a whole bunch of different examples to it in order to achieve some result. Currently this includes each of the main engine, the Shrinker (in ``engine.py``) and the minimizer, but there are likely to be more in future. We typically organise such things in terms of an object that you create with the function and possibly an initial argument that stores these on self and has some ``run`` or similar method. They then run for a while, repeatedly calling the function they were given. Generally speaking they do not call the function directly, but instead wrap calls to it. This allows them to implement a certain amount of decision caching, e.g. avoiding trying the same shrink twice, but also gives us a place where we can update metadata about the search process. For objects whose goal is some form of optimisation (Shrinker, Minimizer) one of the pieces of metadata they will typically track is a "current target". This is typically the best example they have seen so far. By wrapping every call to the predicate, we ensure that we never miss an example even when we're passing through other things. For objects whose goal is some broader form of search (currently only ``ConjectureRunner``) this also allows them to keep track of *other* examples of interest. For example, as part of our multiple bug discovery, ``ConjectureRunner`` keeps track of the smallest example of each distinct failure that it has seen, and updates this automatically each time the test function is called. This means that if during shrinking we "slip" and find a different bug than the one we started with, we will *not* shrink to that, but it will get remembered by the runner if it was either novel or better than our current example. ~~~~~~~~~~~ Weird Loops ~~~~~~~~~~~ The loops inside a lot of the engine look very strange and unidiomatic. For example: .. code-block:: python i = 0 while i < len(self.intervals): u, v = self.intervals[i] if not self.incorporate_new_buffer( self.shrink_target.buffer[:u] + self.shrink_target.buffer[v:] ): i += 1 The more natural way to write this in Python would be: .. code-block:: python for u, v in self.intervals: self.incorporate_new_buffer( self.shrink_target.buffer[:u] + self.shrink_target.buffer[v:] ) This is not equivalent in this case, and would exhibit the wrong behaviour. Every time ``incorporate_new_buffer`` succeeds, it changes the shape of the current shrink target. This consequently changes the shape of intervals, both its particular values and its current length - on each loop iteration the loop might stop either because ``i`` increases or because ``len(self.intervals)`` decreases. We do not reset ``i`` to zero on success, as this would cause us to retry deleting things that we have already tried. This *might* work, but is less likely to. In the event that none of the earlier deletions succeed, this causes us to do retry the entire prefix uselessly, which can result in a pass taking O(n^2) time to do O(n) deletions. An additional quirk is that we only increment ``i`` on failure. The reason for this is that if we successfully deleted the current interval then the interval in position ``i`` has been replaced with something else, which is probably the next thing we would have tried deleting if we hadn't succeeded (or something like it), so we don't want to advance past it. This is specific to deletion: If we are just replacing the contents of something then we expect it to still be in the same place, so there we increment unconditionally. Examples of this include ``zero_draws`` and ``minimize_individual_blocks``. ------------ The Shrinker ------------ The shrinking part of Hypothesis is organised into a single class called ``Shrinker`` that lives in ``hypothesis/internal/conjecture/shrinker.py``. Its job is to take an initial ``ConjectureData`` object and some predicate that it satisfies, and to try to produce a simpler ``ConjectureData`` object that also satisfies that predicate. The search process mostly happens in the ``shrink`` method, which tries various shrink passes in the ``greedy_shrink`` method and then reports on the outcome. For details, you are strongly encouraged to read the source code. It is very well commented, and as the subject of active research often has newer techniques than are documented here. ~~~~~~~~~~~~~ Search Passes ~~~~~~~~~~~~~ Search passes are methods on the ``Shrinker`` class. They are designed to take the current shrink target and try a number of things that might be sensible shrinks of it. Typically the design of a search pass is that it should always try to run to completion rather than exiting as soon as it's found something good, but that it shouldn't retry things that are too like stuff it has already tried just because something worked. So for example in the above loop, we try deleting each interval (these roughly correspond to regions of the input that are responsible for some particular value or small number of adjacent values). When we succeed, we keep going and try deleting more intervals, but we don't try to delete any intervals before the current index. The reason for this is that retrying things from the beginning might work but probably won't. Thus if we restarted every time we made a change we would end up doing a lot of useless work. Additionally, they are *more* likely to work after other shrink passes have run because frequently other changes are likely to unlock changes in the current pass that were previously impossible. e.g. when we reorder some examples we might make a big region deletable that previously contained something critical to the relevant behaviour of the test but is now just noise. Because the shrinker runs in a big loop, if we've made progress the shrink pass will always be run again (assuming we don't hit some limit that terminates the shrink early, but by making the shrinker better we try to ensure that that never happens). This means that we will always get an opportunity to start again later if we made progress, and if we didn't make progress we've tried everything anyway. ~~~~~~~~~~~~~~~~~~~~~~~ Expensive Shrink Passes ~~~~~~~~~~~~~~~~~~~~~~~ We have a bunch of search passes that are considered "expensive". Typically this means "quadratic or worse complexity". When shrinking we initially don't run these, and the first time that we get to the end of our main passes and have failed to make the input any smaller, we then turn them on. This allows the shrinker to switch from a good but slightly timid mode while its input is large into a more aggressive DELETE ALL THE THINGS mode once that stops working. By that point we've usually made our input small enough that quadratic complexity is acceptable. We turn these on once and then they stay on. The reason for this is to avoid a "flip-flopping" scenario where an expensive pass unlocks one trivial change that the cheap passes can find and then they get stuck again and have to do an extra useless run through the passes to prove that. ~~~~~~~~~~~~~~~~~~~~~~ Adaptive Shrink Passes ~~~~~~~~~~~~~~~~~~~~~~ A useful trick that some of the shrink passes use is to try a thing and if it doesn't work take a look at what the test function did to guess *why* it didn't work and try to repair that. Two example such passes are ``zero_examples`` and the various passes that try to minimize individual blocks lexicographically. What happens in ``zero_examples`` is that we try replacing the region corresponding to a draw with all zero bytes. If that doesn't work, we check if that was because of changing the size of the example (e.g. doing that with a list will make the list much shorter) and messing up the byte stream after that point. If this was what happened then we try again with a sequence of zeroes that corresponds to the size of the draw call in the version we tried that didn't work. The logic for what we do with block minimization is in ``try_shrinking_blocks``. When it tries shrinking a block and it doesn't work, it checks if the sized changed. If it does then it tries deleting the number of bytes that were lost immediately after the shrunk block to see if it helps. -------------- Playing Around -------------- I often find that it is informative to watch the shrink process in action using Hypothesis's verbosity settings. This can give you an idea of what the format of your data is, and how the shrink process transforms it. In particular, it is often useful to run a test with the flag ``-s`` to tell it not to hide output and the environment variable ``HYPOTHESIS_VERBOSITY_LEVEL=debug``. This will give you a very detailed log of what the testing process is running, along with information about what passes in the shrinker rare running and how they transform it. --------------- Getting Started --------------- The best way of getting started on working on the engine is to work on the shrinker. This is because it has the most well defined problems, the best documented code among the engine, and it's generally fun to work on. If you have not already done so, check out `Issue #1093 `_, which collates a number of other issues about shrink quality that are good starting points for people. The best place to get started thus is to take a look at those linked issues and jump in and try things! Find one that you think sounds fun. Note that some of them suggest not doing these as your first foray into the shrinker, as some are harder than others. *Please* ask questions if you have any - either the main issue for general purpose questions or specific issues for questions about a particular problem - if you get stuck or if anything doesn't make sense. We're trying to make this process easier for everyone to work on, so asking us questions is actively helpful to us and we will be very grateful to you for doing so. hypothesis-hypothesis-python-4.36.2/guides/review.rst000066400000000000000000000333331354103617500230460ustar00rootroot00000000000000=================================== The Hypothesis Code Review Handbook =================================== Note: This review guide was written with the Python version in mind, but should apply to *all* versions. If you find a place where it's a bit too Python specific, please fix it or file an issue. This document outlines the process for reviewing changes to Hypothesis. It's partly descriptive, partly prescriptive, and entirely prone to change in response to circumstance and need. We're still figuring this thing out! ----------------- What Needs Review ----------------- The repository includes Hypothesis implementations for multiple languages, which have different review requirements due to different levels of project maturity: - all changes to hypothesis-python and the language-independent build infrastructure must be signed off by at least one person with write access to the repo other than the author of the change. (These requirements will apply to any Hypothesis implementations with a 1.0 release.) - changes by `DRMacIver `_ to hypothesis-ruby do not require review, but will be posted as pull requests, often for long enough that if someone wants to review and ask questions, they can. ---------------- How Review Works ---------------- Once the build is green and a reviewer has approved the change, anyone on the maintainer team may merge the request. More than one maintainer *may* review a change if they wish to, but it's not required. Any maintainer may block a pull request by requesting changes. Consensus on a review is best but not required. If some reviewers have approved a pull request and some have requested changes, ideally you would try to address all of the changes, but it is OK to dismiss dissenting reviews if you feel it appropriate. We've not tested the case of differing opinions much in practice yet, so we may grow firmer guidelines on what to do there over time. ------------ Review Goals ------------ At a high level, the two things we're looking for in review are answers to the following questions: 1. Is this change going to make users' lives worse? 2. Is this change going to make the maintainers' lives worse? Code review is a collaborative process between the author and the reviewer to try to ensure that the answer to both of those questions is no. Ideally of course the change should also make one or both of the users' and our lives *better*, but it's OK for changes to be mostly neutral. The author should be presumed to have a good reason for submitting the change in the first place, so neutral is good enough! -------------- Social Factors -------------- * Always thank external contributors. Thank maintainers too, ideally! * Remember that the `Code of Conduct `_ applies to pull requests and issues too. Feel free to throw your weight around to enforce this if necessary. * Anyone, maintainer or not, is welcome to do a code review. Only official maintainers have the ability to actually approve and merge a pull request, but outside review is also welcome. ------------ Requirements ------------ The rest of this document outlines specific things reviewers should focus on in aid of this, broken up by sections according to their area of applicability. All of these conditions must be satisfied for merge. Where the reviewer thinks this conflicts with the above higher level goals, they may make an exception if both the author and another maintainer agree. ~~~~~~~~~~~~~ Orthogonality ~~~~~~~~~~~~~ For all minor or patch releases, we enforce a hard and fast rule that they contain no more than one user-visible change. Major releases are allowed to bundle multiple changes together, but these should be structured as smaller pull requests into some tracking branch. We are currently very bad at this, so reviewers should feel empowered to be extra strict and provide a lot of push back on this. What counts as a user visible change is somewhat up to individual judgement, but you should err in the direction of assuming that if it might count then it does count. A good rule of thumb is that the ``RELEASE.rst`` uses the words "additionally" or needs bullet points to be clear, it is likely too large. Ideally changes that are not user visible should also be self-contained into their own releases, but a certain amount of leniency is permitted - it's certainly OK to do a moderate amount of refactoring while you're in the area, and if a pull request involves no release at all then the same level of orthogonality is not required (but is still desirable). ~~~~~~~~~~~~~~~~~~~~~~ Clarity of Description ~~~~~~~~~~~~~~~~~~~~~~ The ``RELEASE.rst`` should contain a description of the change that makes clear: 1. The motivation for the change 2. The likely consequences of the change This doesn't have to be an essay. If you're following the orthogonality requirements a paragraph or two is likely sufficient. Any additional information that is useful to reviewers should be provided in the pull request comment. This can include e.g. background, why the particular approach was taken, references to internals that are unlikely to be of interest to users. ~~~~~~~~~~~~~~~~~~~~~ Functionality Changes ~~~~~~~~~~~~~~~~~~~~~ This section applies to any changes in Hypothesis's behaviour, regardless of their nature. A good rule of thumb is that if it touches a file in src then it counts. 1. The code should be clear in its intent and behaviour. 2. Behaviour changes should come with appropriate tests to demonstrate the new behaviour. 3. Hypothesis must never be *flaky*. Flakiness here is defined as anything where a test fails and this does not indicate a bug in Hypothesis or in the way the user wrote the code or the test. 4. The changelog (in ``RELEASE.rst``) should bump the minor or patch version (see guides/documentation.rst for details), accurately describe the changes, and shouldn't refer to internal-only APIs. For complicated markup, consider building the docs and manually checking the changelog for formatting errors that didn't result in a compilation error. ~~~~~~~~~~~ API Changes ~~~~~~~~~~~ Public API changes require the most careful scrutiny of all reviews, because they are the ones we are stuck with for the longest: Hypothesis follows semantic versioning, and we don't release new major versions very often. Public API changes must satisfy the following: 1. All public API changes must be well documented. If it's not documented, it doesn't count as public API! 2. Changes must be backwards compatible. Where this is not possible, they must first introduce a deprecation warning, then once the major version is bumped the deprecation warning and the functionality may be removed. 3. If an API is deprecated, the deprecation warning must make it clear how the user should modify their code to adapt to this change ( possibly by referring to documentation). 4. If it is likely that we will want to make backwards incompatible changes to an API later, to whatever extent possible these should be made immediately when it is introduced instead. 5. APIs should give clear and helpful error messages in response to invalid inputs. In particular error messages should always display the value that triggered the error, and ideally be specific about the relevant feature of it that caused this failure (e.g. the type). 6. Incorrect usage should never "fail silently" - when a user accidentally misuses an API this should result in an explicit error. 7. Functionality should be limited to that which is easy to support in the long-term. In particular functionality which is very tied to the current Hypothesis internals should be avoided. 8. `DRMacIver `_ must approve the changes though other maintainers are welcome and likely to chip in to review as well. 9. We have a separate guide for `house API style `_ which should be followed. Note that currently this only covers the API style for the Python version. We are still figuring out the API style for the Ruby version. ~~~~~~~~~ Bug Fixes ~~~~~~~~~ 1. All bug fixes must come with a test that demonstrates the bug on master and which is fixed in this branch. An exception *may* be made here if the submitter can convincingly argue that testing this would be prohibitively difficult. 2. Where possible, a fix that makes it impossible for similar bugs to occur is better. 3. Where possible, a test that will catch both this bug and a more general class of bug that contains it is better. ~~~~~~~~~~~~~~~~ Settings Changes ~~~~~~~~~~~~~~~~ Note: This section currently only applies to the Python version. It is tempting to use the Hypothesis settings object as a dumping ground for anything and everything that you can think of to control Hypothesis. This rapidly gets confusing for users and should be carefully avoided. New settings should: 1. Be something that the user can meaningfully have an opinion on. Many of the settings that have been added to Hypothesis are just cases where Hypothesis is abdicating responsibility to do the right thing to the user. 2. Make sense without reference to Hypothesis internals. 3. Correspond to behaviour which can meaningfully differ between tests - either between two different tests or between two different runs of the same test (e.g. one use case is the profile system, where you might want to run Hypothesis differently in CI and development). If you would never expect a test suite to have more than one value for a setting across any of its runs, it should be some sort of global configuration, not a setting. When deprecating a setting for later removal, we prefer to change the default value of the setting to a private singleton (``not_set``), and implement the future behaviour immediately. Passing any other value triggers a deprecation warning, but is otherwise a no-op (i.e. we still use the future behaviour). For settings where this would be especially disruptive, we have also prefixed that deprecation process with a process where we emit a warning, add a special value that can be passed to opt-in to the future behaviour, and then in the following major release we deprecate *that*, make it an no-op, and make it an error to pass any other value. ~~~~~~~~~~~~~~ Engine Changes ~~~~~~~~~~~~~~ Engine changes are anything that change a "fundamental" of how Hypothesis works. A good rule of thumb is that an engine change is anything that touches a file in hypothesis.internal.conjecture (Python version) or Rust code (Ruby version). All such changes should: 1. Be approved (or authored) by DRMacIver. 2. Be approved (or authored) by someone who *isn't* DRMacIver (a major problem with this section of the code is that there is too much that only DRMacIver understands properly and we want to fix this). 3. If appropriate, come with a test in test_discovery_ability.py showing new examples that were previously hard to discover. 4. If appropriate, come with a test in test_shrink_quality.py showing how they improve the shrinker. ~~~~~~~~~~~~~~~~~~~~~~ Non-Blocking Questions ~~~~~~~~~~~~~~~~~~~~~~ These questions should *not* block merge, but may result in additional issues or changes being opened, either by the original author or by the reviewer. 1. Is this change well covered by the review items and is there anything that could usefully be added to the guidelines to improve that? 2. Were any of the review items confusing or annoying when reviewing this change? Could they be improved? 3. Are there any more general changes suggested by this, and do they have appropriate issues and/or pull requests associated with them? ~~~~~~~~~~~~~~~~~~~~ Asking for more work ~~~~~~~~~~~~~~~~~~~~ Reviewers should in general not request changes that expand the scope of a pull request beyond its original intended goal. The primary design philosophy of our work-flow is that making correct changes should be cheap, and scope creep on pull requests works against that - If you can't touch something without having to touch a number of related areas as well, changing things becomes expensive again. This of course doesn't cover things where additional work is required to ensure the change is actually correct - for example, if you change public functionality you certainly need to update its documentation. That isn't scope creep, that's just the normal scope. If a pull request suggests additional work then between the reviewer and the author people should ensure that there are relevant tracking issues for that work (as per question 3 in "Non-Blocking Questions" above), but there is no obligation for either of them to actually do any of the work on those issues. By default it is the reviewer who should open these issues, but the author is welcome to as well. That being said, it's legitimate to expand the scope of a pull request in some cases. For example: * If not doing so is likely to cause problems later. For example, because of backwards compatibility requirements it might make sense to ask for some additional functionality that is likely to be added later so that the arguments to a function are in a more sensible order. * Cases where the added functionality feels extremely incomplete in some way without an additional change. The litmus test here should be "this will almost never be useful because...". This is still fairly subjective, but at least one good use case where the change is a clear improvement over the status quo is enough to indicate that this doesn't apply. If it's unclear, the reviewer should feel free to suggest additional work (but if the author is someone new, please make sure that it's clear that this is a suggestion and not a requirement!), but the author of the pull request should feel equally free to decline the suggestion. hypothesis-hypothesis-python-4.36.2/guides/strategies-that-shrink.rst000066400000000000000000000310541354103617500261470ustar00rootroot00000000000000=================================== Designing strategies to shrink well =================================== Reducing test cases to a minimal example is a great feature of Hypothesis, the implementation of which depends on both the shrinking engine and the structure of the strategy (or combination of strategies) which created the example to reduce. This document is organised into three parts: 1. How to tell if you need to think about shrinking (you probably don't!) 2. Designing for shrinking 'above' the Hypothesis public API 3. Implementation tricks used in our internals, for interested contributors It is written for people implementing complex third-party strategies (such as `hypothesis-networkx `__), current or potential contributors to Hypothesis itself, and anyone interested in how this works under the hood. ------------------------------------ Do you need to design for shrinking? ------------------------------------ You should only attempt to tune custom strategies for better shrinking behaviour if more time would otherwise be spent reducing examples by hand or debugging more complex examples. It *may* be worthwhile if: - Your custom strategy will be used by many people, so that spending the same effort tuning the strategy has much larger benefits, or - You have personally spent time debugging failures which better example shrinking could have avoided and think this might happen again. If neither of these apply to you, relax! Hypothesis' test-case reduction is among the best in the world, and our built-in strategies are carefully designed to work well with it as discussed below. ------------------------------------ Shrinking for third-party strategies ------------------------------------ That is, strategies built out of other strategies until you get down to Hypothesis' public API. These often but not always use ``@composite``. Composition of shrinking ~~~~~~~~~~~~~~~~~~~~~~~~ The first and most important rule is that Hypothesis shrinks from the 'bottom up'. If any component of your strategy is replaced with a simpler example, the end result should also become simpler. We usually try to define "simpler" here to match a reasonable intuition about the strategy, and avoid weird edge cases when it's combined with another strategy or predicate. `Issue #1076 `_, where magnitude constraints were added to the ``complex_numbers`` strategy, makes a nice case study. We wanted to continue shrinking the real and imaginary parts like ``builds(complex, floats(), floats())``. In a worst-case scenario, the performance of filtering could be arbitrarily bad, while a 'generate and scale' approach would mean that simple inputs could lead to irrational outputs. Instead, we choose an imaginary part between +/- max_magnitude, then calculate the resulting bounds on the real part and draw it from a strategy that will always be valid. This ensures that the imaginary part shrinks to zero first, as we think real-valued complex numbers are simpler than imaginary-valued complex numbers. Let generation be lucky ~~~~~~~~~~~~~~~~~~~~~~~ Sometimes, it's worth searching for a particularly nasty value to try. This trick should be used sparingly, and always behind a branch that the shrinker can decide not to take such as ``if draw(booleans()):``, but might occasionally worth trying. Measure the results before you keep it! `Issue #69 `_ provides a nice case study: when generating tz-aware datetimes, we would like to generate instants that are skipped or repeated due to a daylight-savings transition more often than by chance. Of course, there may or may not be any such moments allowed by the bounds and tz strategy! Eliding much of the detail, a key part is to find such a moment between two endpoints, when we can only check whether one or more exists. The traditional approach would be to use a binary search, but this would be relatively expensive to shrink as we would pay the log-n cost on every attempted shrink. Instead of choosing the midpoint, we draw a *random* point between our known endpoints, and repeat this until we find a satisfactory moment. This allows the shrinker to delete all the intermediate draws - and appear lucky enough to find the moment we were looking for on the first guess! Keep things local ~~~~~~~~~~~~~~~~~ Hypothesis' shrinking engine sees every example as a labelled tree of choices, with possible reductions represented as operations on the tree. An attempted shrink succeeds if the new tree can be converted into an example, and the resulting example triggers the same bug in the test function. The most common way we see users breaking data locality is by drawing a size, then drawing a collection of that size. This is tempting because it's simple and it _works_, but it's often much slower than the alternatives. .. code:: python # Both of these strategies can generate exactly the same kind of examples, # but the second has better performance as well as style. integers(0, 10).flatmap(lambda n: st.lists(..., min_size=n, max_size=n)) st.lists(..., min_size=1, max_size=10) Another easy way to keep things local is to ensure that any ``.filter(...)`` or ``assume(...)`` calls you use are as close as possible to the relevant part of the strategy. That way, Hypothesis can retry just the part that failed instead of the entire strategy, which might be much slower. For efficient shrinking, local operations on the tree should correspond with valid (and preferably local) shrinks to the final example. For example: .. code:: python # This form of loop is hard to shrink, because we'd have to reduce `n` and # delete something in the loop simultaneously. It's equivalent to the # `.flatmap` example above. We _do_ shrink this, but much more slowly. n = draw(integers(0, 10)) for _ in range(n): ... draw(...) ... # In this form, the shrinker can see a repeated structure of labels # and delete one loop iteration without touching anything else. # We use a variant of this trick to generate collections internally! while draw(integers(0, x)) > threshold: ... draw(...) ... Similarly, it's better to draw all the attributes or inputs you need for an object at the same time, again so they can be modified or deleted together. The exact behaviour of the shrinking is a topic of active research and development, so if you are interested in the details we recommend reading the `internals guide `_ and the well-commented source code in ``hypothesis.internal.conjecture``. An earlier (mid-2018) version is illustrated in David's draft paper *Test-Case Reduction for Free*, along with an extensive evaluation. Contact him if you would like a copy. ------------------------------------- Shrinking in the Hypothesis internals ------------------------------------- The last section is for current or prospective Hypothesis contributors only. These tricks rely on implementation details that are not available to third-party libraries or users, **and can change in any patch release**. Occasionally they are also indispensable to get good performance in underlying primitives, so please contact us if the public API is not enough and we may be able to work something out. What do internals get you? ~~~~~~~~~~~~~~~~~~~~~~~~~~ Using the low-level, internal APIs complements, rather than changing, the principles above. The bytestream-level view has some important advantages: Because we operate at the level of bits, the relationship between a value and the corresponding buffer is much more obvious. If we're careful, that means we can calculate the value we want and then write the corresponding buffer to recreate it when the test case is shrunk or replayed. A small step up from bits, we can also see the spans that indicate a subset of the buffer to consider for various transformations such as transposition or deletion. Sometimes these features are the only way to maintain acceptable performance in very rare or even pathological cases - consider shrinking a complex number with a single allowed magnitude - but it's almost certain that someone will need the core strategies to do just that. However, using low-level APIs also comes at a cost - they are verbose and generally more difficult to use, and can violate key invariants of the engine if misused. Internally, our strategies mostly use the public API or something that looks a lot like ``@composite``, so it's fairly easy to follow along. There are just a few tricks enabled by those low-level advantages that we wanted to name and document, so we can recognise them discuss them and invent more... Make your own luck ~~~~~~~~~~~~~~~~~~ This is the simplest trick that uses our ability to write choices to the buffer. We use it for ``sampled_from(...).filter(...)``, after trying an initial draw with the usual rejection sampling technique, and added the ``SearchStrategy.do_filtered_draw`` method so other strategies can opt-in as we design similar tricks for their structure. It was originally designed for stateful testing, where "lucky generation" might be inefficient if there are many rules but only a few allowed by their preconditions. Here's how it works for stateful testing: 1. Draw an index into the unfiltered list of rules. Return the corresponding rule if it's allowed - we got lucky! (or someone set us up...) 2. Create a list of allowed rules, and choose one from that shortlist instead. 3. Find the index of the chosen rule *in the unfiltered list*, and write that index to the buffer. Finally, return the chosen rule. When the shrinker tries to delete the first two draws, the resulting buffer will lead to the same rule being chosen at step *one* instead. We've made our own luck! This trick is especially useful when we want to avoid rejection sampling (the ``.filter`` method, ``assume``) for performance reasons, but also need to give the shrinker the same low-level representation for each instance of a repeated choice. Flags "shrink open" ~~~~~~~~~~~~~~~~~~~ An important insight from `Swarm Testing (PDF) `__ is that randomly disabling some features can actually reduce the expected time before finding a bug, because some bugs may be suppressed by otherwise common features or attributes of the data. As discussed on `issue #1401 `__, there are a few points to keep in mind when implementing shrinkable swarm testing: - You need swarm flags to "shrink open" so that once the shrinker has run to completion, all flags are enabled. e.g. you could do this by generating a set of banned flags. - You need to use rejection sampling rather than anything more clever, or at least look like it to the shrinker. (see e.g. *Make your own luck*, above) Taking Unicode as an example, we'd like to use our knowledge of Unicode categories to generate more complex examples, but shrink the generated string without reference to categories. While we haven't actually implemented this yet - it's pretty hairy - the simple version of the idea goes like this: 1. Generate a set of banned categories. 2. Use ``characters().filter(category_is_not_banned)`` When shrinking, we start by removing categories from the banned set, after which characters in the string can be reduced as usual. In a serious version, the make-your-own-luck approach would be essential to make the filter reasonably efficient, but that's not a problem internally. In more complicated structures, it would be nice to generate the flags on first use rather than up front before we know if we need them. The trick there is to write each flag to the buffer every time we check it, in such a way that if we delete the first use the second turns into an initialisation. Explicit example boundaries ~~~~~~~~~~~~~~~~~~~~~~~~~~~ This is almost always handled implicitly, e.g. by ``cu.many``, but *sometimes* it can be useful to explicitly insert boundaries around draws that should be deleted simultaneously using ``data.start_example``. This is used to group the value and sign of floating-point numbers, for example, which we split up in order to provide a more natural shrinking order. Explicit example management can also be useful to delineate variably-sized draws, such as our internal helper ``cu.biased_coin``, which makes eliminating dead bytes much cheaper. Finally, labelling otherwise indistinguishable draws means the shrinker can attempt to swap only the like values. hypothesis-hypothesis-python-4.36.2/guides/testing-hypothesis.rst000066400000000000000000000156731354103617500254260ustar00rootroot00000000000000================== Testing Hypothesis ================== Note: This guide is currently entirely specific to the Python version of Hypothesis. This is a guide to the process of testing Hypothesis itself, both how to run its tests and how to to write new ones. -------------------------- General Testing Philosophy -------------------------- The test suite for Hypothesis is unusually powerful - as you might hope! - but the secret is actually more about attitude than technology. The key is that we treat any bug in Hypothesis as a bug in our test suite too - and think about the kinds of bugs that might not be caught, then write tests that would catch them. We also use a variety of tools to check our code automatically, including formatting, import order, linting, and typing our API with Mypy. All of this is checked in CI - which means that once the build is green, humans can all focus on meaningful review rather than nitpicking operator spacing. Similarly, we require all code to have tests with 100% branch coverage - as a starting point, not the final goal. - Requiring full coverage can't guarantee that we've written all the tests worth writing (for example, maybe we left off a useful assertion about the result), but less than full coverage guarantees that there's some code we're not testing at all. - Tests beyond full coverage generally aim to demonstrate that a particular feature works, or that some subtle failure case is not present - often because when it was found and fixed, someone wrote a test to make sure it couldn't come back! The ``tests/`` directory has some notes in the README file on where various kinds of tests can be found or added. Go there for the practical stuff, or just ask one of the maintainers for help on a pull request! Further reading: How `SQLite is tested `_, `how the Space Shuttle was tested `_, `how to misuse code coverage `_ (for inspiration, *not* implementation). Dan Luu writes about `fuzz testing `_ and `broken processes `_, among other things. --------------------------------------- Setting up a virtualenv to run tests in --------------------------------------- If you want to run individual tests rather than relying on the make tasks (which you probably will), it's easiest to do this in a virtualenv. The following will give you a working virtualenv for running tests in: .. code-block:: bash pip install virtualenv python -m virtualenv testing-venv # On Windows: testing-venv\Scripts\activate source testing-venv/bin/activate # Can also use pip install -e .[all] to get # all optional dependencies pip install -e . # Test specific dependencies. pip install pytest-xdist flaky mock Now whenever you want to run tests you can just activate the virtualenv using ``source testing-venv/bin/activate`` or ``testing-venv\Scripts\activate`` and all of the dependencies will be available to you and your local copy of Hypothesis will be on the path (so any edits will be picked up automatically and you don't need to reinstall it in the local virtualenv). ------------- Running Tests ------------- In order to run tests outside of the make/tox/etc set up, you'll need an environment where Hypothesis is on the path and all of the testing dependencies are installed. We recommend doing this inside a virtualenv as described in the previous section. All testing is done using `pytest `_, with a couple of plugins installed. For advanced usage we recommend reading the pytest documentation, but this section will give you a primer in enough of the common commands and arguments to get started. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Selecting Which Files to Run ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following invocation runs all of the tests in the file `tests/cover/test_conjecture_engine.py`: .. code-block:: python -m pytest tests/cover/test_conjecture_engine.py If you want to run multiple files you can pass them all as arguments, and if you pass a directory then it will run all files in that directory. For example the following runs all the files in `test_conjecture_engine.py` and `test_slippage.py` .. code-block:: python -m pytest tests/cover/test_conjecture_engine.py tests/cover/test_slippage.py If you were running this in bash (if you're not sure: if you're not on Windows you probably are) you could also use the syntax: .. code-block:: python -m pytest tests/cover/test_{conjecture_engine,slippage}.py And the following would run all tests under `tests/cover`: .. code-block:: python -m pytest tests/cover ~~~~~~~~~~~ Test Layout ~~~~~~~~~~~ The top level structure of the tests in Hypothesis looks as follows: * ``cover`` contains tests that we measure coverage for. This is intended to be a fairly minimal and fast set of tests that still gives pretty good confidence in the behaviour of the test suite. It is currently failing at both "minimal" and "fast", but we're trying to move it back in that direction. Try not to add tests to this unless they're actually to cover some specific target. * ``nocover`` is a general dumping ground for slower tests that aren't needed to achieve coverage. * ``quality`` is for expensive tests about the distribution or shrinking of examples. These will only be run on one Python version. * ``py2`` and ``py3`` are for tests which only run on one major version of Python. You can also write these in other directories using ``pytest.mark.skipif``, but these directories are useful for things that require a version-specific syntax. * The remaining test directories are for testing specific extras modules and should have the same name. As a rule of thumb when writing new tests, they should go in nocover unless they are for a specific extras module or to deliberately target a particular line for coverage. In the latter case, prefer fast unit tests over larger and slower integration tests (we are not currently very good at this). ~~~~~~~~~~~~~~~~ Useful Arguments ~~~~~~~~~~~~~~~~ Some useful arguments to pytest include: * You can pass ``-n 0`` to turn off ``pytest-xdist``'s parallel test execution. Sometimes for running just a small number of tests its startup time is longer than the time it saves (this will vary from system to system), so this can be helpful if you find yourself waiting on test runners to start a lot. * You can use ``-k`` to select a subset of tests to run. This matches on substrings of the test names. For example ``-kfoo`` will only run tests that have "foo" as a substring of their name. You can also use composite expressions here. e.g. ``-k'foo and not bar'`` will run anything containing foo that doesn't also contain bar. `More information on how to select tests to run can be found in the pytest documentation `__. hypothesis-hypothesis-python-4.36.2/hypothesis-python/000077500000000000000000000000001354103617500232445ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/.coveragerc000066400000000000000000000010201354103617500253560ustar00rootroot00000000000000[run] branch = True include = **/.tox/*/lib/*/site-packages/hypothesis/*.py **/.tox/*/lib/*/site-packages/hypothesis/**/*.py omit = **/pytestplugin.py **/strategytests.py **/compat*.py **/extra/__init__.py **/.tox/*/lib/*/site-packages/hypothesis/internal/coverage.py [report] exclude_lines = @abc.abstractmethod @abc.abstractproperty NotImplementedError pragma: no cover __repr__ __ne__ __copy__ __deepcopy__ except ImportError: if PY2: assert all\(.+\) hypothesis-hypothesis-python-4.36.2/hypothesis-python/LICENSE.txt000066400000000000000000000413651354103617500251000ustar00rootroot00000000000000Copyright (c) 2013, David R. MacIver All code in this repository except where explicitly noted otherwise is released under the Mozilla Public License v 2.0. You can obtain a copy at https://mozilla.org/MPL/2.0/. Some code in this repository comes from other projects. Where applicable, the original copyright and license are noted and any modifications made are released dual licensed with the original license. Mozilla Public License Version 2.0 ================================== 1. Definitions -------------- 1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. 1.3. "Contribution" means Covered Software of a particular Contributor. 1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. "Incompatible With Secondary Licenses" means (a) that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or (b) that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. "Executable Form" means any form of the work other than Source Code Form. 1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" means this document. 1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. "Modifications" means any of the following: (a) any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or (b) any new file in Source Code Form that contains any Covered Software. 1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. "Source Code Form" means the form of the work preferred for making modifications. 1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions -------------------------------- 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: (a) under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and (b) under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: (a) for any code that a Contributor has removed from Covered Software; or (b) for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or (c) under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities ------------------- 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: (a) such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and (b) You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation --------------------------------------------------- If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination -------------- 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. ************************************************************************ * * * 6. Disclaimer of Warranty * * ------------------------- * * * * Covered Software is provided under this License on an "as is" * * basis, without warranty of any kind, either expressed, implied, or * * statutory, including, without limitation, warranties that the * * Covered Software is free of defects, merchantable, fit for a * * particular purpose or non-infringing. The entire risk as to the * * quality and performance of the Covered Software is with You. * * Should any Covered Software prove defective in any respect, You * * (not any Contributor) assume the cost of any necessary servicing, * * repair, or correction. This disclaimer of warranty constitutes an * * essential part of this License. No use of any Covered Software is * * authorized under this License except under this disclaimer. * * * ************************************************************************ ************************************************************************ * * * 7. Limitation of Liability * * -------------------------- * * * * Under no circumstances and under no legal theory, whether tort * * (including negligence), contract, or otherwise, shall any * * Contributor, or anyone who distributes Covered Software as * * permitted above, be liable to You for any direct, indirect, * * special, incidental, or consequential damages of any character * * including, without limitation, damages for lost profits, loss of * * goodwill, work stoppage, computer failure or malfunction, or any * * and all other commercial damages or losses, even if such party * * shall have been informed of the possibility of such damages. This * * limitation of liability shall not apply to liability for death or * * personal injury resulting from such party's negligence to the * * extent applicable law prohibits such limitation. Some * * jurisdictions do not allow the exclusion or limitation of * * incidental or consequential damages, so this exclusion and * * limitation may not apply to You. * * * ************************************************************************ 8. Litigation ------------- Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. 9. Miscellaneous ---------------- This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License --------------------------- 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice ------------------------------------------- This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - "Incompatible With Secondary Licenses" Notice --------------------------------------------------------- This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. hypothesis-hypothesis-python-4.36.2/hypothesis-python/MANIFEST.in000066400000000000000000000000571354103617500250040ustar00rootroot00000000000000# Include the license file include LICENSE.txt hypothesis-hypothesis-python-4.36.2/hypothesis-python/README.rst000066400000000000000000000041271354103617500247370ustar00rootroot00000000000000========== Hypothesis ========== Hypothesis is an advanced testing library for Python. It lets you write tests which are parametrized by a source of examples, and then generates simple and comprehensible examples that make your tests fail. This lets you find more bugs in your code with less work. e.g. .. code-block:: python @given(st.lists( st.floats(allow_nan=False, allow_infinity=False), min_size=1)) def test_mean(xs): assert min(xs) <= mean(xs) <= max(xs) .. code-block:: Falsifying example: test_mean( xs=[1.7976321109618856e+308, 6.102390043022755e+303] ) Hypothesis is extremely practical and advances the state of the art of unit testing by some way. It's easy to use, stable, and powerful. If you're not using Hypothesis to test your project then you're missing out. ------------------------ Quick Start/Installation ------------------------ If you just want to get started: .. code-block:: pip install hypothesis ----------------- Links of interest ----------------- The main Hypothesis site is at `hypothesis.works `_, and contains a lot of good introductory and explanatory material. Extensive documentation and examples of usage are `available at readthedocs `_. If you want to talk to people about using Hypothesis, `we have both an IRC channel and a mailing list `_. If you want to receive occasional updates about Hypothesis, including useful tips and tricks, there's a `TinyLetter mailing list to sign up for them `_. If you want to contribute to Hypothesis, `instructions are here `_. If you want to hear from people who are already using Hypothesis, some of them `have written about it `_. If you want to create a downstream package of Hypothesis, please read `these guidelines for packagers `_. hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/000077500000000000000000000000001354103617500241745ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/_static/000077500000000000000000000000001354103617500256225ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/_static/.empty000066400000000000000000000000001354103617500267470ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/changes.rst000066400000000000000000007541561354103617500263600ustar00rootroot00000000000000========= Changelog ========= This is a record of all past Hypothesis releases and what went into them, in reverse chronological order. All previous releases should still be available on pip. Hypothesis APIs come in three flavours: * Public: Hypothesis releases since 1.0 are `semantically versioned `_ with respect to these parts of the API. These will not break except between major version bumps. All APIs mentioned in this documentation are public unless explicitly noted otherwise. * Semi-public: These are APIs that are considered ready to use but are not wholly nailed down yet. They will not break in patch releases and will *usually* not break in minor releases, but when necessary minor releases may break semi-public APIs. * Internal: These may break at any time and you really should not use them at all. You should generally assume that an API is internal unless you have specific information to the contrary. .. _v4.36.2: ------------------- 4.36.2 - 2019-09-20 ------------------- This patch disables part of the :mod:`typing`-based inference for the :pypi:`attrs` package under Python 3.5.0, which has some incompatible internal details (:issue:`2095`). .. _v4.36.1: ------------------- 4.36.1 - 2019-09-17 ------------------- This patch fixes a bug in strategy inference for :pypi:`attrs` classes where Hypothesis would fail to infer a strategy for attributes of a generic type such as ``Union[int, str]`` or ``List[bool]`` (:issue:`2091`). Thanks to Jonathan Gayvallet for the bug report and this patch! .. _v4.36.0: ------------------- 4.36.0 - 2019-09-09 ------------------- This patch deprecates ``min_len`` or ``max_len`` of 0 in :func:`~hypothesis.extra.numpy.byte_string_dtypes` and :func:`~hypothesis.extra.numpy.unicode_string_dtypes`. The lower limit is now 1. Numpy uses a length of 0 in these dtypes to indicate an undetermined size, chosen from the data at array creation. However, as the :func:`~hypothesis.extra.numpy.arrays` strategy creates arrays before filling them, strings were truncated to 1 byte. .. _v4.35.1: ------------------- 4.35.1 - 2019-09-09 ------------------- This patch improves the messaging that comes from invalid size arguments to collection strategies such as :func:`~hypothesis.strategies.lists`. .. _v4.35.0: ------------------- 4.35.0 - 2019-09-04 ------------------- This release improves the :func:`~hypothesis.extra.lark.from_lark` strategy, tightening argument validation and adding the ``explicit`` argument to allow use with terminals that use ``@declare`` instead of a string or regular expression. This feature is required to handle features such as indent and dedent tokens in Python code, which can be generated with the :pypi:`hypothesmith` package. .. _v4.34.0: ------------------- 4.34.0 - 2019-08-23 ------------------- The :func:`~hypothesis.strategies.from_type` strategy now knows to look up the subclasses of abstract types, which cannot be instantiated directly. This is very useful for :pypi:`hypothesmith` to support :pypi:`libCST`. .. _v4.33.1: ------------------- 4.33.1 - 2019-08-21 ------------------- This patch works around a crash when an incompatible version of Numpy is installed under PyPy 5.10 (Python 2.7). If you are still using Python 2, please upgrade to Python 3 as soon as possible - it will be unsupported at the end of this year. .. _v4.33.0: ------------------- 4.33.0 - 2019-08-20 ------------------- This release improves the :func:`~hypothesis.provisional.domains` strategy, as well as the :func:`~hypothesis.provisional.urls` and the :func:`~hypothesis.strategies.emails` strategies which use it. These strategies now use the full IANA list of Top Level Domains and are correct as per :rfc:`1035`. Passing tests using these strategies may now fail. Thanks to `TechDragon `__ for this improvement. .. _v4.32.3: ------------------- 4.32.3 - 2019-08-05 ------------------- This patch tidies up the repr of several ``settings``-related objects, at runtime and in the documentation, and deprecates the undocumented edge case that ``phases=None`` was treated like ``phases=tuple(Phase)``. It *also* fixes :func:`~hypothesis.extra.lark.from_lark` with :pypi:`lark 0.7.2 ` and later. .. _v4.32.2: ------------------- 4.32.2 - 2019-07-30 ------------------- This patch updates some internal comments for :pypi:`mypy` 0.720. There is no user-visible impact. .. _v4.32.1: ------------------- 4.32.1 - 2019-07-29 ------------------- This release changes how the shrinker represents its progress internally. For large generated test cases this should result in significantly less memory usage and possibly faster shrinking. Small generated test cases may be slightly slower to shrink but this shouldn't be very noticeable. .. _v4.32.0: ------------------- 4.32.0 - 2019-07-28 ------------------- This release makes :func:`~hypothesis.extra.numpy.arrays` more pedantic about ``elements`` strategies that cannot be exactly represented as array elements. In practice, you will see new warnings if you were using a ``float16`` or ``float32`` dtype without passing :func:`~hypothesis.strategies.floats` the ``width=16`` or ``width=32`` arguments respectively. The previous behaviour could lead to silent truncation, and thus some elements being equal to an explicitly excluded bound (:issue:`1899`). .. _v4.31.1: ------------------- 4.31.1 - 2019-07-28 ------------------- This patch changes an internal use of MD5 to SHA hashes, to better support users subject to FIPS-140. There is no user-visible or API change. Thanks to Alex Gaynor for this patch. .. _v4.31.0: ------------------- 4.31.0 - 2019-07-24 ------------------- This release simplifies the logic of the :attr:`~hypothesis.settings.print_blob` setting by removing the option to set it to ``PrintSettings.INFER``. As a result the ``print_blob`` setting now takes a single boolean value, and the use of ``PrintSettings`` is deprecated. .. _v4.28.2: ------------------- 4.28.2 - 2019-07-14 ------------------- This patch improves the docstrings of several Hypothesis strategies, by clarifying markup and adding cross-references. There is no runtime change. Thanks to Elizabeth Williams and Serah Njambi Rono for their contributions at the SciPy 2019 sprints! .. _v4.28.1: ------------------- 4.28.1 - 2019-07-12 ------------------- This patch improves the behaviour of the :func:`~hypothesis.strategies.text` strategy when passed an ``alphabet`` which is not a strategy. The value is now interpreted as ``whitelist_characters`` to :func:`~hypothesis.strategies.characters` instead of a sequence for :func:`~hypothesis.strategies.sampled_from`, which standardises the distribution of examples and the shrinking behaviour. You can get the previous behaviour by using ``lists(sampled_from(alphabet)).map("".map)`` instead. .. _v4.28.0: ------------------- 4.28.0 - 2019-07-11 ------------------- This release deprecates ``find()``. The ``.example()`` method is a better replacement if you want *an* example, and for the rare occasions where you want the *minimal* example you can get it from :func:`@given `. :func:`@given ` has steadily outstripped ``find()`` in both features and performance over recent years, and as we do not have the resources to maintain and test both we think it is better to focus on just one. .. _v4.27.0: ------------------- 4.27.0 - 2019-07-08 ------------------- This release refactors the implementation of the ``.example()`` method, to more accurately represent the data which will be generated by :func:`@given `. As a result, calling ``s.example()`` on an empty strategy ``s`` (such as :func:`~hypothesis.strategies.nothing`) now raises ``Unsatisfiable`` instead of the old ``NoExamples`` exception. .. _v4.26.4: ------------------- 4.26.4 - 2019-07-07 ------------------- This patch ensures that the Pandas extra will keep working when Python 3.8 removes abstract base classes from the top-level :obj:`python:collections` namespace. This also fixes the relevant warning in Python 3.7, but there is no other difference in behaviour and you do not need to do anything. .. _v4.26.3: ------------------- 4.26.3 - 2019-07-05 ------------------- This release fixes :issue:`2027`, by changing the way Hypothesis tries to generate distinct examples to be more efficient. This may result in slightly different data distribution, and should improve generation performance in general, but should otherwise have minimal user impact. .. _v4.26.2: ------------------- 4.26.2 - 2019-07-04 ------------------- This release fixes :issue:`1864`, where some simple tests would perform very slowly, because they would run many times with each subsequent run being progressively slower. They will now stop after a more reasonable number of runs without hitting this problem. Unless you are hitting exactly this issue, it is unlikely that this release will have any effect, but certain classes of custom generators that are currently very slow may become a bit faster, or start to trigger health check failures. .. _v4.26.1: ------------------- 4.26.1 - 2019-07-04 ------------------- This release adds the strategy :func:`~hypothesis.extra.numpy.integer_array_indices`, which generates tuples of Numpy arrays that can be used for `advanced indexing `_ to select an array of a specified shape. .. _v4.26.0: ------------------- 4.26.0 - 2019-07-04 ------------------- This release significantly improves the performance of drawing unique collections whose elements are drawn from :func:`~hypothesis.strategies.sampled_from` strategies. As a side effect, this detects an error condition that would previously have passed silently: When the ``min_size`` argument on a collection with distinct elements is greater than the number of elements being sampled, this will now raise an error. .. _v4.25.1: ------------------- 4.25.1 - 2019-07-03 ------------------- This release removes some defunct internal functionality that was only being used for testing. It should have no user visible impact. .. _v4.25.0: ------------------- 4.25.0 - 2019-07-03 ------------------- This release deprecates and disables the ``buffer_size`` setting, which should have been treated as a private implementation detail all along. We recommend simply deleting this settings argument. .. _v4.24.6: ------------------- 4.24.6 - 2019-06-26 ------------------- This patch makes :func:`~hypothesis.strategies.datetimes` more efficient, as it now handles short months correctly by construction instead of filtering. .. _v4.24.5: ------------------- 4.24.5 - 2019-06-23 ------------------- This patch improves the development experience by simplifying the tracebacks you will see when e.g. you have used the ``.map(...)`` method of a strategy and the mapped function raises an exception. No new exceptions can be raised, nor existing exceptions change anything but their traceback. We're simply using if-statements rather than exceptions for control flow in a certain part of the internals! .. _v4.24.4: ------------------- 4.24.4 - 2019-06-21 ------------------- This patch fixes :issue:`2014`, where our compatibility layer broke with version 3.7.4 of the :pypi:`typing` module backport on PyPI. This issue only affects Python 2. We remind users that Hypothesis, like many other packages, `will drop Python 2 support on 2020-01-01 `__ and already has several features that are only available on Python 3. .. _v4.24.3: ------------------- 4.24.3 - 2019-06-07 ------------------- This patch improves the implementation of an internal wrapper on Python 3.8 beta1 (and will break on the alphas; but they're not meant to be stable). On other versions, there is no change at all. Thanks to Daniel Hahler for the patch, and Victor Stinner for his work on :bpo:`37032` that made it possible. .. _v4.24.2: ------------------- 4.24.2 - 2019-06-06 ------------------- Deprecation messages for functions in ``hypothesis.extra.django.models`` now explicitly name the deprecated function to make it easier to track down usages. Thanks to Kristian Glass for this contribution! .. _v4.24.1: ------------------- 4.24.1 - 2019-06-04 ------------------- This patch fixes :issue:`1999`, a spurious bug raised when a :func:`@st.composite ` function was passed a keyword-only argument. Thanks to Jim Nicholls for his fantastic bug report. .. _v4.24.0: ------------------- 4.24.0 - 2019-05-29 ------------------- This release deprecates ``GenericStateMachine``, in favor of :class:`~hypothesis.stateful.RuleBasedStateMachine`. Rule-based stateful testing is significantly faster, especially during shrinking. If your use-case truly does not fit rule-based stateful testing, we recommend writing a custom test function which drives your specific control-flow using :func:`~hypothesis.strategies.data`. .. _v4.23.9: ------------------- 4.23.9 - 2019-05-28 ------------------- This patch fixes a very rare example database issue with file permissions. When running a test that uses both :func:`@given ` and ``pytest.mark.parametrize``, using :pypi:`pytest-xdist` on Windows, with failing examples in the database, two attempts to read a file could overlap and we caught ``FileNotFound`` but not other ``OSError``\ s. .. _v4.23.8: ------------------- 4.23.8 - 2019-05-26 ------------------- This patch has a minor cleanup of the internal engine. There is no user-visible impact. .. _v4.23.7: ------------------- 4.23.7 - 2019-05-26 ------------------- This patch clarifies some error messages when the test function signature is incompatible with the arguments to :func:`@given `, especially when the :obj:`@settings() ` decorator is also used (:issue:`1978`). .. _v4.23.6: ------------------- 4.23.6 - 2019-05-19 ------------------- This release adds the :pypi:`pyupgrade` fixer to our code style, for consistent use of dict and set literals and comprehensions. .. _v4.23.5: ------------------- 4.23.5 - 2019-05-16 ------------------- This release slightly simplifies a small part of the core engine. There is no user-visible change. .. _v4.23.4: ------------------- 4.23.4 - 2019-05-09 ------------------- Fixes a minor formatting issue the docstring of :func:`~hypothesis.strategies.from_type` .. _v4.23.3: ------------------- 4.23.3 - 2019-05-09 ------------------- Adds a recipe to the docstring of :func:`~hypothesis.strategies.from_type` that describes a means for drawing values for "everything except" a specified type. This recipe is especially useful for writing tests that perform input-type validation. .. _v4.23.2: ------------------- 4.23.2 - 2019-05-08 ------------------- This patch uses :pypi:`autoflake` to remove some pointless ``pass`` statements, which improves our workflow but has no user-visible impact. .. _v4.23.1: ------------------- 4.23.1 - 2019-05-08 ------------------- This patch fixes an OverflowError in :func:`from_type(xrange) ` on Python 2. It turns out that not only do the ``start`` and ``stop`` values have to fit in a C long, but so does ``stop - start``. We now handle this even on 32bit platforms, but remind users that Python2 will not be supported after 2019 without specific funding. .. _v4.23.0: ------------------- 4.23.0 - 2019-05-08 ------------------- This release implements the :func:`~hypothesis.strategies.slices` strategy, to generate slices of a length-``size`` sequence. Thanks to Daniel J. West for writing this patch at the PyCon 2019 sprints! .. _v4.22.3: ------------------- 4.22.3 - 2019-05-07 ------------------- This patch exposes :class:`~hypothesis.strategies.DataObject`, *solely* to support more precise type hints. Objects of this type are provided by :func:`~hypothesis.strategies.data`, and can be used to draw examples from strategies intermixed with your test code. .. _v4.22.2: ------------------- 4.22.2 - 2019-05-07 ------------------- This patch fixes the very rare :issue:`1798` in :func:`~hypothesis.extra.numpy.array_dtypes`, which caused an internal error in our tests. .. _v4.22.1: ------------------- 4.22.1 - 2019-05-07 ------------------- This patch fixes a rare bug in :func:`from_type(range) `. Thanks to Zebulun Arendsee for fixing the bug at the PyCon 2019 Sprints. .. _v4.22.0: ------------------- 4.22.0 - 2019-05-07 ------------------- The ``unique_by`` argument to :obj:`~hypothesis.strategies.lists` now accepts a tuple of callables such that every element of the generated list will be unique with respect to each callable in the tuple (:issue:`1916`). Thanks to Marco Sirabella for this feature at the PyCon 2019 sprints! .. _v4.21.1: ------------------- 4.21.1 - 2019-05-06 ------------------- This patch cleans up the internals of :func:`~hypothesis.strategies.one_of`. You may see a slight change to the distribution of examples from this strategy but there is no change to the public API. Thanks to Marco Sirabella for writing this patch at the PyCon 2019 sprints! .. _v4.21.0: ------------------- 4.21.0 - 2019-05-05 ------------------- The :func:`~hypothesis.strategies.from_type` strategy now supports :class:`python:slice` objects. Thanks to Charlie El. Awbery for writing this feature at the `PyCon 2019 Mentored Sprints `__. .. _v4.20.0: ------------------- 4.20.0 - 2019-05-05 ------------------- This release improves the :func:`~hypothesis.extra.numpy.array_shapes` strategy, to choose an appropriate default for ``max_side`` based on the ``min_side``, and ``max_dims`` based on the ``min_dims``. An explicit error is raised for dimensions greater than 32, which are not supported by Numpy, as for other invalid combinations of arguments. Thanks to Jenny Rouleau for writing this feature at the `PyCon 2019 Mentored Sprints `__. .. _v4.19.0: ------------------- 4.19.0 - 2019-05-05 ------------------- The :func:`~hypothesis.strategies.from_type` strategy now supports :class:`python:range` objects (or ``xrange`` on Python 2). Thanks to Katrina Durance for writing this feature at the `PyCon 2019 Mentored Sprints `__. .. _v4.18.3: ------------------- 4.18.3 - 2019-04-30 ------------------- This release fixes a very rare edge case in the test-case mutator, which could cause an internal error with certain unusual tests. .. _v4.18.2: ------------------- 4.18.2 - 2019-04-30 ------------------- This patch makes Hypothesis compatible with the Python 3.8 alpha, which changed the representation of code objects to support positional-only arguments. Note however that Hypothesis does not (yet) support such functions as e.g. arguments to :func:`~hypothesis.strategies.builds` or inputs to :func:`@given `. Thanks to Paul Ganssle for identifying and fixing this bug. .. _v4.18.1: ------------------- 4.18.1 - 2019-04-29 ------------------- This patch improves the performance of unique collections such as :func:`~hypothesis.strategies.sets` when the elements are drawn from a :func:`~hypothesis.strategies.sampled_from` strategy (:issue:`1115`). .. _v4.18.0: ------------------- 4.18.0 - 2019-04-24 ------------------- This release adds the :func:`~hypothesis.strategies.functions` strategy, which can be used to imitate your 'real' function for callbacks. .. _v4.17.2: ------------------- 4.17.2 - 2019-04-19 ------------------- This release refactors stateful rule selection to share the new machinery with :func:`~hypothesis.strategies.sampled_from` instead of using the original independent implementation. .. _v4.17.1: ------------------- 4.17.1 - 2019-04-16 ------------------- This patch allows Hypothesis to try a few more examples after finding the first bug, in hopes of reporting multiple distinct bugs. The heuristics described in :issue:`847` ensure that we avoid wasting time on fruitless searches, while still surfacing each bug as soon as possible. .. _v4.17.0: ------------------- 4.17.0 - 2019-04-16 ------------------- This release adds the strategy :func:`~hypothesis.extra.numpy.broadcastable_shapes`, which generates array shapes that are `broadcast-compatible `_ with a provided shape. .. _v4.16.0: ------------------- 4.16.0 - 2019-04-12 ------------------- This release allows :func:`~hypothesis.strategies.register_type_strategy` to be used with :obj:`python:typing.NewType` instances. This may be useful to e.g. provide only positive integers for :func:`from_type(UserId) ` with a ``UserId = NewType('UserId', int)`` type. Thanks to PJCampi for suggesting and writing the patch! .. _v4.15.0: ------------------- 4.15.0 - 2019-04-09 ------------------- This release supports passing a :class:`~python:datetime.timedelta` as the :obj:`~hypothesis.settings.deadline` setting, so you no longer have to remember that the number is in milliseconds (:issue:`1900`). Thanks to Damon Francisco for this change! .. _v4.14.7: ------------------- 4.14.7 - 2019-04-09 ------------------- This patch makes the type annotations on ``hypothesis.extra.dateutil`` compatible with :pypi:`mypy` 0.700. .. _v4.14.6: ------------------- 4.14.6 - 2019-04-07 ------------------- This release fixes a bug introduced in :ref:`Hypothesis 4.14.3 ` that would sometimes cause :func:`sampled_from(...).filter(...) ` to encounter an internal assertion failure when there are three or fewer elements, and every element is rejected by the filter. .. _v4.14.5: ------------------- 4.14.5 - 2019-04-05 ------------------- This patch takes the previous efficiency improvements to :func:`sampled_from(...).filter(...) ` strategies that reject most elements, and generalises them to also apply to ``sampled_from(...).filter(...).filter(...)`` and longer chains of filters. .. _v4.14.4: ------------------- 4.14.4 - 2019-04-05 ------------------- This release fixes a bug that prevented :func:`~hypothesis.strategies.random_module` from correctly restoring the previous state of the ``random`` module. The random state was instead being restored to a temporary deterministic state, which accidentally caused subsequent tests to see the same random values across multiple test runs. .. _v4.14.3: ------------------- 4.14.3 - 2019-04-03 ------------------- This patch adds an internal special case to make :func:`sampled_from(...).filter(...) ` much more efficient when the filter rejects most elements (:issue:`1885`). .. _v4.14.2: ------------------- 4.14.2 - 2019-03-31 ------------------- This patch improves the error message if the function ``f`` in :ref:`s.flatmap(f) ` does not return a strategy. Thanks to Kai Chen for this change! .. _v4.14.1: ------------------- 4.14.1 - 2019-03-30 ------------------- This release modifies how Hypothesis selects operations to run during shrinking, by causing it to deprioritise previously useless classes of shrink until others have reached a fixed point. This avoids certain pathological cases where the shrinker gets very close to finishing and then takes a very long time to finish the last small changes because it tries many useless shrinks for each useful one towards the end. It also should cause a more modest improvement (probably no more than about 30%) in shrinking performance for most tests. .. _v4.14.0: ------------------- 4.14.0 - 2019-03-19 ------------------- This release blocks installation of Hypothesis on Python 3.4, which :PEP:`reached its end of life date on 2019-03-18 <429>`. This should not be of interest to anyone but downstream maintainers - if you are affected, migrate to a secure version of Python as soon as possible or at least seek commercial support. .. _v4.13.0: ------------------- 4.13.0 - 2019-03-19 ------------------- This release makes it an explicit error to call :func:`floats(min_value=inf, exclude_min=True) ` or :func:`floats(max_value=-inf, exclude_max=True) `, as there are no possible values that can be generated (:issue:`1859`). :func:`floats(min_value=0.0, max_value=-0.0) ` is now deprecated. While `0. == -0.` and we could thus generate either if comparing by value, violating the sequence ordering of floats is a special case we don't want or need. .. _v4.12.1: ------------------- 4.12.1 - 2019-03-18 ------------------- This release should significantly reduce the amount of memory that Hypothesis uses for representing large test cases, by storing information in a more compact representation and only unpacking it lazily when it is first needed. .. _v4.12.0: ------------------- 4.12.0 - 2019-03-18 ------------------- This update adds the :obj:`~hypothesis.settings.report_multiple_bugs` setting, which you can use to disable multi-bug reporting and only raise whichever bug had the smallest minimal example. This is occasionally useful when using a debugger or tools that annotate tracebacks via introspection. .. _v4.11.7: ------------------- 4.11.7 - 2019-03-18 ------------------- This change makes a tiny improvement to the core engine's bookkeeping. There is no user-visible change. .. _v4.11.6: ------------------- 4.11.6 - 2019-03-15 ------------------- This release changes some of Hypothesis's internal shrinking behaviour in order to reduce memory usage and hopefully improve performance. .. _v4.11.5: ------------------- 4.11.5 - 2019-03-13 ------------------- This release adds a micro-optimisation to how Hypothesis handles debug reporting internally. Hard to shrink test may see a slight performance improvement, but in most common scenarios it is unlikely to be noticeable. .. _v4.11.4: ------------------- 4.11.4 - 2019-03-13 ------------------- This release removes some redundant code that was no longer needed but was still running a significant amount of computation and allocation on the hot path. This should result in a modest speed improvement for most tests, especially those with large test cases. .. _v4.11.3: ------------------- 4.11.3 - 2019-03-13 ------------------- This release adds a micro-optimisation to how Hypothesis caches test cases. This will cause a small improvement in speed and memory usage for large test cases, but in most common scenarios it is unlikely to be noticeable. .. _v4.11.2: ------------------- 4.11.2 - 2019-03-13 ------------------- This release removes some internal code that populates a field that is no longer used anywhere. This should result in some modest performance and speed improvements and no other user visible effects. .. _v4.11.1: ------------------- 4.11.1 - 2019-03-13 ------------------- This is a formatting-only patch, enabled by a new version of :pypi:`isort`. .. _v4.11.0: ------------------- 4.11.0 - 2019-03-12 ------------------- This release deprecates :func:`~hypothesis.strategies.sampled_from` with empty sequences. This returns :func:`~hypothesis.strategies.nothing`, which gives a clear error if used directly... but simply vanishes if combined with another strategy. Tests that silently generate less than expected are a serious problem for anyone relying on them to find bugs, and we think reliability more important than convenience in this case. .. _v4.10.0: ------------------- 4.10.0 - 2019-03-11 ------------------- This release improves Hypothesis's to detect flaky tests, by noticing when the behaviour of the test changes between runs. In particular this will notice many new cases where data generation depends on external state (e.g. external sources of randomness) and flag those as flaky sooner and more reliably. The basis of this feature is a considerable reengineering of how Hypothesis stores its history of test cases, so on top of this its memory usage should be considerably reduced. .. _v4.9.0: ------------------ 4.9.0 - 2019-03-09 ------------------ This release adds the strategy :func:`~hypothesis.extra.numpy.valid_tuple_axes`, which generates tuples of axis-indices that can be passed to the ``axis`` argument in NumPy's sequential functions (e.g. :func:`numpy:numpy.sum`). Thanks to Ryan Soklaski for this strategy. .. _v4.8.0: ------------------ 4.8.0 - 2019-03-06 ------------------ This release significantly tightens validation in :class:`hypothesis.settings`. :obj:`~hypothesis.settings.max_examples`, ``buffer_size``, and :obj:`~hypothesis.settings.stateful_step_count` must be positive integers; :obj:`~hypothesis.settings.deadline` must be a positive number or ``None``; and :obj:`~hypothesis.settings.derandomize` must be either ``True`` or ``False``. As usual, this replaces existing errors with a more helpful error and starts new validation checks as deprecation warnings. .. _v4.7.19: ------------------- 4.7.19 - 2019-03-04 ------------------- This release makes some micro-optimisations to certain calculations performed in the shrinker. These should particularly speed up large test cases where the shrinker makes many small changes. It will also reduce the amount allocated, but most of this is garbage that would have been immediately thrown away, so you probably won't see much effect specifically from that. .. _v4.7.18: ------------------- 4.7.18 - 2019-03-03 ------------------- This patch removes some overhead from :func:`~hypothesis.extra.numpy.arrays` with a constant shape and dtype. The resulting performance improvement is modest, but worthwile for small arrays. .. _v4.7.17: ------------------- 4.7.17 - 2019-03-01 ------------------- This release makes some micro-optimisations within Hypothesis's internal representation of test cases. This should cause heavily nested test cases to allocate less during generation and shrinking, which should speed things up slightly. .. _v4.7.16: ------------------- 4.7.16 - 2019-02-28 ------------------- This changes the order in which Hypothesis runs certain operations during shrinking. This should significantly decrease memory usage and speed up shrinking of large examples. .. _v4.7.15: ------------------- 4.7.15 - 2019-02-28 ------------------- This release allows Hypothesis to calculate a number of attributes of generated test cases lazily. This should significantly reduce memory usage and modestly improve performance, especially for large test cases. .. _v4.7.14: ------------------- 4.7.14 - 2019-02-28 ------------------- This release reduces the number of operations the shrinker will try when reordering parts of a test case. This should in some circumstances significantly speed up shrinking. It *may* result in different final test cases, and if so usually slightly worse ones, but it should not generally have much impact on the end result as the operations removed were typically useless. .. _v4.7.13: ------------------- 4.7.13 - 2019-02-27 ------------------- This release changes how Hypothesis reorders examples within a test case during shrinking. This should make shrinking considerably faster. .. _v4.7.12: ------------------- 4.7.12 - 2019-02-27 ------------------- This release slightly improves the shrinker's ability to replace parts of a test case with their minimal version, by allowing it to do so in bulk rather than one at a time. Where this is effective, shrinker performance should be modestly improved. .. _v4.7.11: ------------------- 4.7.11 - 2019-02-25 ------------------- This release makes some micro-optimisations to common operations performed during shrinking. Shrinking should now be slightly faster, especially for large examples with relatively fast test functions. .. _v4.7.10: ------------------- 4.7.10 - 2019-02-25 ------------------- This release is a purely internal refactoring of Hypothesis's API for representing test cases. There should be no user visible effect. .. _v4.7.9: ------------------ 4.7.9 - 2019-02-24 ------------------ This release changes certain shrink passes to make them more efficient when they aren't making progress. .. _v4.7.8: ------------------ 4.7.8 - 2019-02-23 ------------------ This patch removes some unused code, which makes the internals a bit easier to understand. There is no user-visible impact. .. _v4.7.7: ------------------ 4.7.7 - 2019-02-23 ------------------ This release reduces the number of operations the shrinker will try when reordering parts of a test case. This should in some circumstances significantly speed up shrinking. It *may* result in different final test cases, and if so usually slightly worse ones, but it should not generally have much impact on the end result as the operations removed were typically useless. .. _v4.7.6: ------------------ 4.7.6 - 2019-02-23 ------------------ This patch removes some unused code from the shrinker. There is no user-visible change. .. _v4.7.5: ------------------ 4.7.5 - 2019-02-23 ------------------ This release changes certain shrink passes to make them *adaptive* - that is, in cases where they are successfully making progress they may now do so significantly faster. .. _v4.7.4: ------------------ 4.7.4 - 2019-02-22 ------------------ This is a docs-only patch, noting that because the :pypi:`lark-parser` is under active development at version 0.x, ``hypothesis[lark]`` APIs may break in minor releases if necessary to keep up with the upstream package. .. _v4.7.3: ------------------ 4.7.3 - 2019-02-22 ------------------ This changes Hypothesis to no longer import various test frameworks by default (if they are installed). which will speed up the initial ``import hypothesis`` call. .. _v4.7.2: ------------------ 4.7.2 - 2019-02-22 ------------------ This release changes Hypothesis's internal representation of a test case to calculate some expensive structural information on demand rather than eagerly. This should reduce memory usage a fair bit, and may make generation somewhat faster. .. _v4.7.1: ------------------ 4.7.1 - 2019-02-21 ------------------ This release refactors the internal representation of previously run test cases. The main thing you should see as a result is that Hypothesis becomes somewhat less memory hungry. .. _v4.7.0: ------------------ 4.7.0 - 2019-02-21 ------------------ This patch allows :func:`~hypothesis.extra.numpy.array_shapes` to generate shapes with side-length or even dimension zero, though the minimum still defaults to one. These shapes are rare and have some odd behavior, but are particularly important to test for just that reason! In a related bigfix, :func:`~hypothesis.extra.numpy.arrays` now supports generating zero-dimensional arrays with `dtype=object` and a strategy for iterable elements. Previously, the array element would incorrectly be set to the first item in the generated iterable. Thanks to Ryan Turner for continuing to improve our Numpy support. .. _v4.6.1: ------------------ 4.6.1 - 2019-02-19 ------------------ This release is a trivial micro-optimisation inside Hypothesis which should result in it using significantly less memory. .. _v4.6.0: ------------------ 4.6.0 - 2019-02-18 ------------------ This release changes some inconsistent behavior of :func:`~hypothesis.extra.numpy.arrays` from the Numpy extra when asked for an array of ``shape=()``. :func:`~hypothesis.extra.numpy.arrays` will now always return a Numpy :class:`~numpy:numpy.ndarray`, and the array will always be of the requested dtype. Thanks to Ryan Turner for this change. .. _v4.5.12: ------------------- 4.5.12 - 2019-02-18 ------------------- This release fixes a minor typo in an internal comment. There is no user-visible change. .. _v4.5.11: ------------------- 4.5.11 - 2019-02-15 ------------------- This release fixes :issue:`1813`, a bug introduced in :ref:`3.59.1 `, which caused :py:meth:`~hypothesis.strategies.random_module` to no longer affect the body of the test: Although Hypothesis would claim to be seeding the random module in fact tests would always run with a seed of zero. .. _v4.5.10: ------------------- 4.5.10 - 2019-02-14 ------------------- This patch fixes an off-by-one error in the maximum length of :func:`~hypothesis.strategies.emails`. Thanks to Krzysztof Jurewicz for :pull:`1812`. .. _v4.5.9: ------------------ 4.5.9 - 2019-02-14 ------------------ This patch removes some unused code from the shrinker. There is no user-visible change. .. _v4.5.8: ------------------ 4.5.8 - 2019-02-12 ------------------ This release fixes an internal ``IndexError`` in Hypothesis that could sometimes be triggered during shrinking. .. _v4.5.7: ------------------ 4.5.7 - 2019-02-11 ------------------ This release modifies the shrinker to interleave different types of reduction operations, e.g. switching between deleting data and lowering scalar values rather than trying entirely deletions then entirely lowering. This may slow things down somewhat in the typical case, but has the major advantage that many previously difficult to shrink examples should become much faster, because the shrinker will no longer tend to stall when trying some ineffective changes to the shrink target but will instead interleave it with other more effective operations. .. _v4.5.6: ------------------ 4.5.6 - 2019-02-11 ------------------ This release makes a number of internal changes to the implementation of :func:`hypothesis.extra.lark.from_lark`. These are primarily intended as a refactoring, but you may see some minor improvements to performance when generating large strings, and possibly to shrink quality. .. _v4.5.5: ------------------ 4.5.5 - 2019-02-10 ------------------ This patch prints an explanatory note when :issue:`1798` is triggered, because the error message from Numpy is too terse to locate the problem. .. _v4.5.4: ------------------ 4.5.4 - 2019-02-08 ------------------ In Python 2, ``long`` integers are not allowed in the shape argument to :func:`~hypothesis.extra.numpy.arrays`. Thanks to Ryan Turner for fixing this. .. _v4.5.3: ------------------ 4.5.3 - 2019-02-08 ------------------ This release makes a small internal refactoring to clarify how Hypothesis instructs tests to stop running when appropriate. There is no user-visible change. .. _v4.5.2: ------------------ 4.5.2 - 2019-02-06 ------------------ This release standardises all of the shrinker's internal operations on running in a random order. The main effect you will see from this that it should now be much less common for the shrinker to stall for a long time before making further progress. In some cases this will correspond to shrinking more slowly, but on average it should result in faster shrinking. .. _v4.5.1: ------------------ 4.5.1 - 2019-02-05 ------------------ This patch updates some docstrings, but has no runtime changes. .. _v4.5.0: ------------------ 4.5.0 - 2019-02-03 ------------------ This release adds ``exclude_min`` and ``exclude_max`` arguments to :func:`~hypothesis.strategies.floats`, so that you can easily generate values from `open or half-open intervals `_ (:issue:`1622`). .. _v4.4.6: ------------------ 4.4.6 - 2019-02-03 ------------------ This patch fixes a bug where :func:`~hypothesis.strategies.from_regex` could throw an internal error if the :obj:`python:re.IGNORECASE` flag was used (:issue:`1786`). .. _v4.4.5: ------------------ 4.4.5 - 2019-02-02 ------------------ This release removes two shrink passes that Hypothesis runs late in the process. These were very expensive when the test function was slow and often didn't do anything useful. Shrinking should get faster for most failing tests. If you see any regression in example quality as a result of this release, please let us know. .. _v4.4.4: ------------------ 4.4.4 - 2019-02-02 ------------------ This release modifies the way that Hypothesis deletes data during shrinking. It will primarily be noticeable for very large examples, which should now shrink faster. The shrinker is now also able to perform some deletions that it could not previously, but this is unlikely to be very noticeable. .. _v4.4.3: ------------------ 4.4.3 - 2019-01-25 ------------------ This release fixes an open file leak that used to cause ``ResourceWarning``\ s. .. _v4.4.2: ------------------ 4.4.2 - 2019-01-24 ------------------ This release changes Hypothesis's internal approach to caching the results of executing test cases. The result should be that it is now significantly less memory hungry, especially when shrinking large test cases. Some tests may get slower or faster depending on whether the new or old caching strategy was well suited to them, but any change in speed in either direction should be minor. .. _v4.4.1: ------------------ 4.4.1 - 2019-01-24 ------------------ This patch tightens up some of our internal heuristics to deal with shrinking floating point numbers, which will now run in fewer circumstances. You are fairly unlikely to see much difference from this, but if you do you are likely to see shrinking become slightly faster and/or producing slightly worse results. .. _v4.4.0: ------------------ 4.4.0 - 2019-01-24 ------------------ This release adds the :func:`~hypothesis.extra.django.from_form` function, which allows automatic testing against Django forms. (:issue:`35`) Thanks to Paul Stiverson for this feature, which resolves our oldest open issue! .. _v4.3.0: ------------------ 4.3.0 - 2019-01-24 ------------------ This release deprecates ``HealthCheck.hung_test`` and disables the associated runtime check for tests that ran for more than five minutes. Such a check is redundant now that we enforce the ``deadline`` and ``max_examples`` setting, which can be adjusted independently. .. _v4.2.0: ------------------ 4.2.0 - 2019-01-23 ------------------ This release adds a new module, ``hypothesis.extra.lark``, which you can use to generate strings matching a context-free grammar. In this initial version, only :pypi:`lark-parser` EBNF grammars are supported, by the new :func:`hypothesis.extra.lark.from_lark` function. .. _v4.1.2: ------------------ 4.1.2 - 2019-01-23 ------------------ This patch fixes a very rare overflow bug (:issue:`1748`) which could raise an ``InvalidArgument`` error in :func:`~hypothesis.strategies.complex_numbers` even though the arguments were valid. .. _v4.1.1: ------------------ 4.1.1 - 2019-01-23 ------------------ This release makes some improvements to internal code organisation and documentation and has no impact on behaviour. .. _v4.1.0: ------------------ 4.1.0 - 2019-01-22 ------------------ This release adds :func:`~hypothesis.register_random`, which registers ``random.Random`` instances or compatible objects to be seeded and reset by Hypothesis to ensure that test cases are deterministic. We still recommend explicitly passing a ``random.Random`` instance from :func:`~hypothesis.strategies.randoms` if possible, but registering a framework-global state for Hypothesis to manage is better than flaky tests! .. _v4.0.2: ------------------ 4.0.2 - 2019-01-22 ------------------ This patch fixes :issue:`1387`, where bounded :func:`~hypothesis.strategies.integers` with a very large range would almost always generate very large numbers. Now, we usually use the same tuned distribution as unbounded :func:`~hypothesis.strategies.integers`. .. _v4.0.1: ------------------ 4.0.1 - 2019-01-16 ------------------ This release randomizes the order in which the shrinker tries some of its initial normalization operations. You are unlikely to see much difference as a result unless your generated examples are very large. In this case you may see some performance improvements in shrinking. .. _v4.0.0: ------------------ 4.0.0 - 2019-01-14 ------------------ Welcome to the next major version of Hypothesis! There are no new features here, as we release those in minor versions. Instead, 4.0 is a chance for us to remove deprecated features (many already converted into no-ops), and turn a variety of warnings into errors. If you were running on the last version of Hypothesis 3.x *without any Hypothesis deprecation warnings* (or using private APIs), this will be a very boring upgrade. **In fact, nothing will change for you at all.** Per :ref:`our deprecation policy `, warnings added in the last six months (after 2018-07-05) have not been converted to errors. Removals ~~~~~~~~ - ``hypothesis.extra.datetime`` has been removed, replaced by the core date and time strategies. - ``hypothesis.extra.fakefactory`` has been removed, replaced by general expansion of Hypothesis' strategies and the third-party ecosystem. - The SQLite example database backend has been removed. Settings ~~~~~~~~ - The :obj:`~hypothesis.settings.deadline` is now enforced by default, rather than just emitting a warning when the default (200 milliseconds per test case) deadline is exceeded. - The ``database_file`` setting has been removed; use :obj:`~hypothesis.settings.database`. - The ``perform_health_check`` setting has been removed; use :obj:`~hypothesis.settings.suppress_health_check`. - The ``max_shrinks`` setting has been removed; use :obj:`~hypothesis.settings.phases` to disable shrinking. - The ``min_satisfying_examples``, ``max_iterations``, ``strict``, ``timeout``, and ``use_coverage`` settings have been removed without user-configurable replacements. Strategies ~~~~~~~~~~ - The ``elements`` argument is now required for collection strategies. - The ``average_size`` argument was a no-op and has been removed. - Date and time strategies now only accept ``min_value`` and ``max_value`` for bounds. - :func:`~hypothesis.strategies.builds` now requires that the thing to build is passed as the first positional argument. - Alphabet validation for :func:`~hypothesis.strategies.text` raises errors, not warnings, as does category validation for :func:`~hypothesis.strategies.characters`. - The ``choices()`` strategy has been removed. Instead, you can use :func:`~hypothesis.strategies.data` with :func:`~hypothesis.strategies.sampled_from`, so ``choice(elements)`` becomes ``data.draw(sampled_from(elements))``. - The ``streaming()`` strategy has been removed. Instead, you can use :func:`~hypothesis.strategies.data` and replace iterating over the stream with ``data.draw()`` calls. - :func:`~hypothesis.strategies.sampled_from` and :func:`~hypothesis.strategies.permutations` raise errors instead of warnings if passed a collection that is not a sequence. Miscellaneous ~~~~~~~~~~~~~ - Applying :func:`@given ` to a test function multiple times was really inefficient, and now it's also an error. - Using the ``.example()`` method of a strategy (intended for interactive exploration) within another strategy or a test function always weakened data generation and broke shrinking, and now it's an error too. - The ``HYPOTHESIS_DATABASE_FILE`` environment variable is no longer supported, as the ``database_file`` setting has been removed. - The ``HYPOTHESIS_VERBOSITY_LEVEL`` environment variable is no longer supported. You can use the ``--hypothesis-verbosity`` pytest argument instead, or write your own setup code using the settings profile system to replace it. - Using :func:`@seed ` or :obj:`derandomize=True ` now forces :obj:`database=None ` to ensure results are in fact reproducible. If :obj:`~hypothesis.settings.database` is *not* ``None``, doing so also emits a ``HypothesisWarning``. - Unused exception types have been removed from ``hypothesis.errors``; namely ``AbnormalExit``, ``BadData``, ``BadTemplateDraw``, ``DefinitelyNoSuchExample``, ``Timeout``, and ``WrongFormat``. .. _v3.88.3: ------------------- 3.88.3 - 2019-01-11 ------------------- This changes the order that the shrinker tries certain operations in its "emergency" phase which runs late in the process. The new order should be better at avoiding long stalls where the shrinker is failing to make progress, which may be helpful if you have difficult to shrink test cases. However this will not be noticeable in the vast majority of use cases. .. _v3.88.2: ------------------- 3.88.2 - 2019-01-11 ------------------- This is a pure refactoring release that extracts some logic from the core Hypothesis engine into its own class and file. It should have no user visible impact. .. _v3.88.1: ------------------- 3.88.1 - 2019-01-11 ------------------- This patch fixes some markup in our documentation. .. _v3.88.0: ------------------- 3.88.0 - 2019-01-10 ------------------- Introduces :func:`hypothesis.stateful.multiple`, which allows rules in rule based state machines to send multiple results at once to their target Bundle, or none at all. .. _v3.87.0: ------------------- 3.87.0 - 2019-01-10 ------------------- This release contains a massive cleanup of the Hypothesis for Django extra: - ``hypothesis.extra.django.models.models()`` is deprecated in favor of :func:`hypothesis.extra.django.from_model`. - ``hypothesis.extra.django.models.add_default_field_mapping()`` is deprecated in favor of :func:`hypothesis.extra.django.register_field_strategy`. - :func:`~hypothesis.extra.django.from_model` does not infer a strategy for nullable fields or fields with a default unless passed ``infer``, like :func:`~hypothesis.strategies.builds`. ``models.models()`` would usually but not always infer, and a special ``default_value`` marker object was required to disable inference. .. _v3.86.9: ------------------- 3.86.9 - 2019-01-09 ------------------- This release improves some internal logic about when a test case in Hypothesis's internal representation could lead to a valid test case. In some circumstances this can lead to a significant speed up during shrinking. It may have some minor negative impact on the quality of the final result due to certain shrink passes now having access to less information about test cases in some circumstances, but this should rarely matter. .. _v3.86.8: ------------------- 3.86.8 - 2019-01-09 ------------------- This release has no user visible changes but updates our URLs to use HTTPS. .. _v3.86.7: ------------------- 3.86.7 - 2019-01-08 ------------------- Hypothesis can now automatically generate values for Django models with a `URLfield`, thanks to a new provisional strategy for URLs (:issue:`1388`). .. _v3.86.6: ------------------- 3.86.6 - 2019-01-07 ------------------- This release is a pure refactoring that extracts some internal code into its own file. It should have no user visible effect. .. _v3.86.5: ------------------- 3.86.5 - 2019-01-06 ------------------- This is a docs-only patch, which fixes some typos and removes a few hyperlinks for deprecated features. .. _v3.86.4: ------------------- 3.86.4 - 2019-01-04 ------------------- This release changes the order in which the shrinker tries to delete data. For large and slow tests this may significantly improve the performance of shrinking. .. _v3.86.3: ------------------- 3.86.3 - 2019-01-04 ------------------- This release fixes a bug where certain places Hypothesis internal errors could be raised during shrinking when a user exception occurred that suppressed an exception Hypothesis uses internally in its generation. The two known ways to trigger this problem were: * Errors raised in stateful tests' teardown function. * Errors raised in finally blocks that wrapped a call to ``data.draw``. These cases will now be handled correctly. .. _v3.86.2: ------------------- 3.86.2 - 2019-01-04 ------------------- This patch is a docs-only change to fix a broken hyperlink. .. _v3.86.1: ------------------- 3.86.1 - 2019-01-04 ------------------- This patch fixes :issue:`1732`, where :func:`~hypothesis.strategies.integers` would always return ``long`` values on Python 2. .. _v3.86.0: ------------------- 3.86.0 - 2019-01-03 ------------------- This release ensures that infinite numbers are never generated by :func:`~hypothesis.strategies.floats` with ``allow_infinity=False``, which could previously happen in some cases where one bound was also provided. The trivially inconsistent ``min_value=inf, allow_infinity=False`` now raises an InvalidArgumentError, as does the inverse with ``max_value``. You can still use :func:`just(inf) ` to generate ``inf`` without violating other constraints. .. _v3.85.3: ------------------- 3.85.3 - 2019-01-02 ------------------- Happy new year everyone! This release has no user visible changes but updates our copyright headers to include 2019. .. _v3.85.2: ------------------- 3.85.2 - 2018-12-31 ------------------- This release makes a small change to the way the shrinker works. You may see some improvements to speed of shrinking on especially large and hard to shrink examples, but most users are unlikely to see much difference. .. _v3.85.1: ------------------- 3.85.1 - 2018-12-30 ------------------- This patch fixes :issue:`1700`, where a line that contained a Unicode character before a lambda definition would cause an internal exception. .. _v3.85.0: ------------------- 3.85.0 - 2018-12-29 ------------------- Introduces the :func:`hypothesis.stateful.consumes` function. When defining a rule in stateful testing, it can be used to mark bundles from which values should be consumed, i. e. removed after use in the rule. This has been proposed in :issue:`136`. Thanks to Jochen Müller for this long-awaited feature. .. _v3.84.6: ------------------- 3.84.6 - 2018-12-28 ------------------- This patch makes a small internal change to fix an issue in Hypothesis's own coverage tests (:issue:`1718`). There is no user-visible change. .. _v3.84.5: ------------------- 3.84.5 - 2018-12-21 ------------------- This patch refactors the ``hypothesis.strategies`` module, so that private names should no longer appear in tab-completion lists. We previously relied on ``__all__`` for this, but not all editors respect it. .. _v3.84.4: ------------------- 3.84.4 - 2018-12-21 ------------------- This is a follow-up patch to ensure that the deprecation date is automatically recorded for any new deprecations. There is no user-visible effect. .. _v3.84.3: ------------------- 3.84.3 - 2018-12-20 ------------------- This patch updates the Hypothesis pytest plugin to avoid a recently deprecated hook interface. There is no user-visible change. .. _v3.84.2: ------------------- 3.84.2 - 2018-12-19 ------------------- This patch fixes the internals for :func:`~hypothesis.strategies.integers` with one bound. Values from this strategy now always shrink towards zero instead of towards the bound, and should shrink much more efficiently too. On Python 2, providing a bound incorrectly excluded ``long`` integers, which can now be generated. .. _v3.84.1: ------------------- 3.84.1 - 2018-12-18 ------------------- This patch adds information about when features were deprecated, but this is only recorded internally and has no user-visible effect. .. _v3.84.0: ------------------- 3.84.0 - 2018-12-18 ------------------- This release changes the stateful testing backend from ``find()`` to use :func:`@given ` (:issue:`1300`). This doesn't change how you create stateful tests, but does make them run more like other Hypothesis tests. :func:`@reproduce_failure ` and :func:`@seed ` now work for stateful tests. Stateful tests now respect the :attr:`~hypothesis.settings.deadline` and :attr:`~hypothesis.settings.suppress_health_check` settings, though they are disabled by default. You can enable them by using :func:`@settings(...) ` as a class decorator with whatever arguments you prefer. .. _v3.83.2: ------------------- 3.83.2 - 2018-12-17 ------------------- Hypothesis has adopted :pypi:`Black` as our code formatter (:issue:`1686`). There are no functional changes to the source, but it's prettier! .. _v3.83.1: ------------------- 3.83.1 - 2018-12-13 ------------------- This patch increases the variety of examples generated by :func:`~hypothesis.strategies.from_type`. .. _v3.83.0: ------------------- 3.83.0 - 2018-12-12 ------------------- Our pytest plugin now warns you when strategy functions have been collected as tests, which may happen when e.g. using the :func:`@composite ` decorator when you should be using ``@given(st.data())`` for inline draws. Such functions *always* pass when treated as tests, because the lazy creation of strategies mean that the function body is never actually executed! .. _v3.82.6: ------------------- 3.82.6 - 2018-12-11 ------------------- Hypothesis can now :ref:`show statistics ` when running under :pypi:`pytest-xdist`. Previously, statistics were only reported when all tests were run in a single process (:issue:`700`). .. _v3.82.5: ------------------- 3.82.5 - 2018-12-08 ------------------- This patch fixes :issue:`1667`, where passing bounds of Numpy dtype ``int64`` to :func:`~hypothesis.strategies.integers` could cause errors on Python 3 due to internal rounding. .. _v3.82.4: ------------------- 3.82.4 - 2018-12-08 ------------------- Hypothesis now seeds and resets the global state of :mod:`np.random ` for each test case, to ensure that tests are reproducible. This matches and complements the existing handling of the :mod:`python:random` module - Numpy simply maintains an independent PRNG for performance reasons. .. _v3.82.3: ------------------- 3.82.3 - 2018-12-08 ------------------- This is a no-op release to add the new ``Framework :: Hypothesis`` `trove classifier `_ to :pypi:`hypothesis` on PyPI. You can `use it as a filter `_ to find Hypothesis-related packages such as extensions as they add the tag over the coming weeks, or simply visit :doc:`our curated list `. .. _v3.82.2: ------------------- 3.82.2 - 2018-12-08 ------------------- The :ref:`Hypothesis for Pandas extension ` is now listed in ``setup.py``, so you can ``pip install hypothesis[pandas]``. Thanks to jmshi for this contribution. .. _v3.82.1: ------------------- 3.82.1 - 2018-10-29 ------------------- This patch fixes :func:`~hypothesis.strategies.from_type` on Python 2 for classes where ``cls.__init__ is object.__init__``. Thanks to ccxcz for reporting :issue:`1656`. .. _v3.82.0: ------------------- 3.82.0 - 2018-10-29 ------------------- The ``alphabet`` argument for :func:`~hypothesis.strategies.text` now uses its default value of ``characters(blacklist_categories=('Cs',))`` directly, instead of hiding that behind ``alphabet=None`` and replacing it within the function. Passing ``None`` is therefore deprecated. .. _v3.81.0: ------------------- 3.81.0 - 2018-10-27 ------------------- :class:`~hypothesis.stateful.GenericStateMachine` and :class:`~hypothesis.stateful.RuleBasedStateMachine` now raise an explicit error when instances of :obj:`~hypothesis.settings` are assigned to the classes' settings attribute, which is a no-op (:issue:`1643`). Instead assign to ``SomeStateMachine.TestCase.settings``, or use ``@settings(...)`` as a class decorator to handle this automatically. .. _v3.80.0: ------------------- 3.80.0 - 2018-10-25 ------------------- Since :ref:`version 3.68.0 `, :func:`~hypothesis.extra.numpy.arrays` checks that values drawn from the ``elements`` and ``fill`` strategies can be safely cast to the dtype of the array, and emits a warning otherwise. This release expands the checks to cover overflow for finite ``complex64`` elements and string truncation caused by too-long elements or trailing null characters (:issue:`1591`). .. _v3.79.4: ------------------- 3.79.4 - 2018-10-25 ------------------- Tests using :func:`@given ` now shrink errors raised from :pypi:`pytest` helper functions, instead of reporting the first example found. This was previously fixed in :ref:`version 3.56.0 `, but only for stateful testing. .. _v3.79.3: ------------------- 3.79.3 - 2018-10-23 ------------------- Traceback elision is now disabled on Python 2, to avoid an import-time :class:`python:SyntaxError` under Python < 2.7.9 (Python: :bpo:`21591`, :ref:`Hypothesis 3.79.2 `: :issue:`1648`). We encourage all users to `upgrade to Python 3 before the end of 2019 `_. .. _v3.79.2: ------------------- 3.79.2 - 2018-10-23 ------------------- This patch shortens tracebacks from Hypothesis, so you can see exactly happened in your code without having to skip over irrelevant details about our internals (:issue:`848`). In the example test (see :pull:`1582`), this reduces tracebacks from nine frames to just three - and for a test with multiple errors, from seven frames per error to just one! If you *do* want to see the internal details, you can disable frame elision by setting :obj:`~hypothesis.settings.verbosity` to ``debug``. .. _v3.79.1: ------------------- 3.79.1 - 2018-10-22 ------------------- The abstract number classes :class:`~python:numbers.Number`, :class:`~python:numbers.Complex`, :class:`~python:numbers.Real`, :class:`~python:numbers.Rational`, and :class:`~python:numbers.Integral` are now supported by the :func:`~hypothesis.strategies.from_type` strategy. Previously, you would have to use :func:`~hypothesis.strategies.register_type_strategy` before they could be resolved (:issue:`1636`) .. _v3.79.0: ------------------- 3.79.0 - 2018-10-18 ------------------- This release adds a CLI flag for verbosity ``--hypothesis-verbosity`` to the Hypothesis pytest plugin, applied after loading the profile specified by ``--hypothesis-profile``. Valid options are the names of verbosity settings, quiet, normal, verbose or debug. Thanks to Bex Dunn for writing this patch at the PyCon Australia sprints! The pytest header now correctly reports the current profile if ``--hypothesis-profile`` has been used. Thanks to Mathieu Paturel for the contribution at the Canberra Python Hacktoberfest. .. _v3.78.0: ------------------- 3.78.0 - 2018-10-16 ------------------- This release has deprecated the generation of integers, floats and fractions when the conversion of the upper and/ or lower bound is not 100% exact, e.g. when an integer gets passed a bound that is not a whole number. (:issue:`1625`) Thanks to Felix Grünewald for this patch during Hacktoberfest 2018. .. _v3.77.0: ------------------- 3.77.0 - 2018-10-16 ------------------- This minor release adds functionality to :obj:`~hypothesis.settings` allowing it to be used as a decorator on :obj:`~hypothesis.stateful.RuleBasedStateMachine` and :obj:`~hypothesis.stateful.GenericStateMachine`. Thanks to Tyler Nickerson for this feature in #hacktoberfest! .. _v3.76.1: ------------------- 3.76.1 - 2018-10-16 ------------------- This patch fixes some warnings added by recent releases of :pypi:`pydocstyle` and :pypi:`mypy`. .. _v3.76.0: ------------------- 3.76.0 - 2018-10-11 ------------------- This release deprecates using floats for ``min_size`` and ``max_size``. The type hint for ``average_size`` arguments has been changed from ``Optional[int]`` to None, because non-None values are always ignored and deprecated. .. _v3.75.4: ------------------- 3.75.4 - 2018-10-10 ------------------- This patch adds more internal comments to the core engine's sequence-length shrinker. There should be no user-visible change. .. _v3.75.3: ------------------- 3.75.3 - 2018-10-09 ------------------- This patch adds additional comments to some of the core engine's internal data structures. There is no user-visible change. .. _v3.75.2: ------------------- 3.75.2 - 2018-10-09 ------------------- This patch avoids caching a trivial case, fixing :issue:`493`. .. _v3.75.1: ------------------- 3.75.1 - 2018-10-09 ------------------- This patch fixes a broken link in a docstring. Thanks to Benjamin Lee for this contribution! .. _v3.75.0: ------------------- 3.75.0 - 2018-10-08 ------------------- This release deprecates the use of ``min_size=None``, setting the default ``min_size`` to 0 (:issue:`1618`). .. _v3.74.3: ------------------- 3.74.3 - 2018-10-08 ------------------- This patch makes some small internal changes to comply with a new lint setting in the build. There should be no user-visible change. .. _v3.74.2: ------------------- 3.74.2 - 2018-10-03 ------------------- This patch fixes :issue:`1153`, where time spent reifying a strategy was also counted in the time spent generating the first example. Strategies are now fully constructed and validated before the timer is started. .. _v3.74.1: ------------------- 3.74.1 - 2018-10-03 ------------------- This patch fixes some broken formatting and links in the documentation. .. _v3.74.0: ------------------- 3.74.0 - 2018-10-01 ------------------- This release checks that the value of the :attr:`~hypothesis.settings.print_blob` setting is a ``PrintSettings`` instance. Being able to specify a boolean value was not intended, and is now deprecated. In addition, specifying ``True`` will now cause the blob to always be printed, instead of causing it to be suppressed. Specifying any value that is not a ``PrintSettings`` or a boolean is now an error. .. _v3.73.5: ------------------- 3.73.5 - 2018-10-01 ------------------- Changes the documentation for ``hypothesis.strategies.datetimes``, ``hypothesis.strategies.dates``, ``hypothesis.strategies.times`` to use the new parameter names ``min_value`` and ``max_value`` instead of the deprecated names .. _v3.73.4: ------------------- 3.73.4 - 2018-09-30 ------------------- This patch ensures that Hypothesis deprecation warnings display the code that emitted them when you're not running in ``-Werror`` mode (:issue:`652`). .. _v3.73.3: ------------------- 3.73.3 - 2018-09-27 ------------------- Tracebacks involving :func:`@composite ` are now slightly shorter due to some internal refactoring. .. _v3.73.2: ------------------- 3.73.2 - 2018-09-26 ------------------- This patch fixes errors in the internal comments for one of the shrinker passes. There is no user-visible change. .. _v3.73.1: ------------------- 3.73.1 - 2018-09-25 ------------------- This patch substantially improves the distribution of data generated with :func:`~hypothesis.strategies.recursive`, and fixes a rare internal error (:issue:`1502`). .. _v3.73.0: ------------------- 3.73.0 - 2018-09-24 ------------------- This release adds the :func:`~hypothesis.extra.dpcontracts.fulfill` function, which is designed for testing code that uses :pypi:`dpcontracts` 0.4 or later for input validation. This provides some syntactic sugar around use of :func:`~hypothesis.assume`, to automatically filter out and retry calls that cause a precondition check to fail (:issue:`1474`). .. _v3.72.0: ------------------- 3.72.0 - 2018-09-24 ------------------- This release makes setting attributes of the :class:`hypothesis.settings` class an explicit error. This has never had any effect, but could mislead users who confused it with the current settings *instance* ``hypothesis.settings.default`` (which is also immutable). You can change the global settings with :ref:`settings profiles `. .. _v3.71.11: -------------------- 3.71.11 - 2018-09-24 -------------------- This patch factors out some common code in the shrinker for iterating over pairs of data blocks. There should be no user-visible change. .. _v3.71.10: -------------------- 3.71.10 - 2018-09-18 -------------------- This patch allows :func:`~hypothesis.strategies.from_type` to handle the empty tuple type, :obj:`typing.Tuple[()] `. .. _v3.71.9: ------------------- 3.71.9 - 2018-09-17 ------------------- This patch updates some internal comments for :pypi:`mypy`. There is no user-visible effect, even for Mypy users. .. _v3.71.8: ------------------- 3.71.8 - 2018-09-17 ------------------- This patch fixes a rare bug that would cause a particular shrinker pass to raise an IndexError, if a shrink improvement changed the underlying data in an unexpected way. .. _v3.71.7: ------------------- 3.71.7 - 2018-09-17 ------------------- This release fixes the broken cross-references in our docs, and adds a CI check so we don't add new ones. .. _v3.71.6: ------------------- 3.71.6 - 2018-09-16 ------------------- This patch fixes two bugs (:issue:`944` and :issue:`1521`), where messages about :func:`@seed ` did not check the current verbosity setting, and the wrong settings were active while executing :ref:`explicit examples `. .. _v3.71.5: ------------------- 3.71.5 - 2018-09-15 ------------------- This patch fixes a ``DeprecationWarning`` added in Python 3.8 (:issue:`1576`). Thanks to tirkarthi for this contribution! .. _v3.71.4: ------------------- 3.71.4 - 2018-09-14 ------------------- This is a no-op release, which implements automatic DOI minting and code archival of Hypothesis via `Zenodo `_. Thanks to CERN and the EU *Horizon 2020* programme for providing this service! Check our :gh-file:`CITATION` file for details, or head right on over to `doi.org/10.5281/zenodo.1412597 `_ .. _v3.71.3: ------------------- 3.71.3 - 2018-09-10 ------------------- This release adds the test name to some deprecation warnings, for easier debugging. Thanks to Sanyam Khurana for the patch! .. _v3.71.2: ------------------- 3.71.2 - 2018-09-10 ------------------- This release makes Hypothesis's memory usage substantially smaller for tests with many examples, by bounding the number of past examples it keeps around. You will not see much difference unless you are running tests with :obj:`~hypothesis.settings.max_examples` set to well over ``1000``, but if you do have such tests then you should see memory usage mostly plateau where previously it would have grown linearly with time. .. _v3.71.1: ------------------- 3.71.1 - 2018-09-09 ------------------- This patch adds internal comments to some tree traversals in the core engine. There is no user-visible change. .. _v3.71.0: ------------------- 3.71.0 - 2018-09-08 ------------------- This release deprecates the coverage-guided testing functionality, as it has proven brittle and does not really pull its weight. We intend to replace it with something more useful in the future, but the feature in its current form does not seem to be worth the cost of using, and whatever replaces it will likely look very different. .. _v3.70.4: ------------------- 3.70.4 - 2018-09-08 ------------------- This patch changes the behaviour of :func:`~hypothesis.reproduce_failure` so that blobs are only printed in quiet mode when the :obj:`~hypothesis.settings.print_blob` setting is set to ``ALWAYS``. Thanks to Cameron McGill for writing this patch at the PyCon Australia sprints! .. _v3.70.3: ------------------- 3.70.3 - 2018-09-03 ------------------- This patch removes some unnecessary code from the internals. There is no user-visible change. .. _v3.70.2: ------------------- 3.70.2 - 2018-09-03 ------------------- This patch fixes an internal bug where a corrupted argument to :func:`@reproduce_failure ` could raise the wrong type of error. Thanks again to Paweł T. Jochym, who maintains Hypothesis on `conda-forge `_ and consistently provides excellent bug reports including :issue:`1558`. .. _v3.70.1: ------------------- 3.70.1 - 2018-09-03 ------------------- This patch updates hypothesis to report its version and settings when run with pytest. (:issue:`1223`). Thanks to Jack Massey for this feature. .. _v3.70.0: ------------------- 3.70.0 - 2018-09-01 ------------------- This release adds a ``fullmatch`` argument to :func:`~hypothesis.strategies.from_regex`. When ``fullmatch=True``, the whole example will match the regex pattern as for :func:`python:re.fullmatch`. Thanks to Jakub Nabaglo for writing this patch at the PyCon Australia sprints! .. _v3.69.12: -------------------- 3.69.12 - 2018-08-30 -------------------- This release reverts the changes to logging handling in 3.69.11, which broke test that use the :pypi:`pytest` ``caplog`` fixture internally because all logging was disabled (:issue:`1546`). .. _v3.69.11: -------------------- 3.69.11 - 2018-08-29 -------------------- This patch will hide all logging messages produced by test cases before the final, minimal, failing test case (:issue:`356`). Thanks to Gary Donovan for writing this patch at the PyCon Australia sprints! .. _v3.69.10: -------------------- 3.69.10 - 2018-08-29 -------------------- This patch fixes a bug that prevents coverage from reporting unexecuted Python files (:issue:`1085`). Thanks to Gary Donovan for writing this patch at the PyCon Australia sprints! .. _v3.69.9: ------------------- 3.69.9 - 2018-08-28 ------------------- This patch improves the packaging of the Python package by adding ``LICENSE.txt`` to the sdist (:issue:`1311`), clarifying the minimum supported versions of :pypi:`pytz` and :pypi:`dateutil ` (:issue:`1383`), and adds keywords to the metadata (:issue:`1520`). Thanks to Graham Williamson for writing this patch at the PyCon Australia sprints! .. _v3.69.8: ------------------- 3.69.8 - 2018-08-28 ------------------- This is an internal change which replaces pickle with json to prevent possible security issues. Thanks to Vidya Rani D G for writing this patch at the PyCon Australia sprints! .. _v3.69.7: ------------------- 3.69.7 - 2018-08-28 ------------------- This patch ensures that :func:`~hypothesis.note` prints the note for every test case when the :obj:`~hypothesis.settings.verbosity` setting is ``Verbosity.verbose``. At normal verbosity it only prints from the final test case. Thanks to Tom McDermott for writing this patch at the PyCon Australia sprints! .. _v3.69.6: ------------------- 3.69.6 - 2018-08-27 ------------------- This patch improves the testing of some internal caching. It should have no user-visible effect. .. _v3.69.5: ------------------- 3.69.5 - 2018-08-27 ------------------- This change performs a small rename and refactoring in the core engine. There is no user-visible change. .. _v3.69.4: ------------------- 3.69.4 - 2018-08-27 ------------------- This change improves the core engine's ability to avoid unnecessary work, by consulting its cache of previously-tried inputs in more cases. .. _v3.69.3: ------------------- 3.69.3 - 2018-08-27 ------------------- This patch handles passing an empty :class:`python:enum.Enum` to :func:`~hypothesis.strategies.from_type` by returning :func:`~hypothesis.strategies.nothing`, instead of raising an internal :class:`python:AssertionError`. Thanks to Paul Amazona for writing this patch at the PyCon Australia sprints! .. _v3.69.2: ------------------- 3.69.2 - 2018-08-23 ------------------- This patch fixes a small mistake in an internal comment. There is no user-visible change. .. _v3.69.1: ------------------- 3.69.1 - 2018-08-21 ------------------- This change fixes a small bug in how the core engine consults its cache of previously-tried inputs. There is unlikely to be any user-visible change. .. _v3.69.0: ------------------- 3.69.0 - 2018-08-20 ------------------- This release improves argument validation for stateful testing. - If the target or targets of a :func:`~hypothesis.stateful.rule` are invalid, we now raise a useful validation error rather than an internal exception. - Passing both the ``target`` and ``targets`` arguments is deprecated - append the ``target`` bundle to the ``targets`` tuple of bundles instead. - Passing the name of a Bundle rather than the Bundle itself is also deprecated. .. _v3.68.3: ------------------- 3.68.3 - 2018-08-20 ------------------- This is a docs-only patch, fixing some typos and formatting issues. .. _v3.68.2: ------------------- 3.68.2 - 2018-08-19 ------------------- This change fixes a small bug in how the core engine caches the results of previously-tried inputs. The effect is unlikely to be noticeable, but it might avoid unnecesary work in some cases. .. _v3.68.1: ------------------- 3.68.1 - 2018-08-18 ------------------- This patch documents the :func:`~hypothesis.extra.numpy.from_dtype` function, which infers a strategy for :class:`numpy:numpy.dtype`\ s. This is used in :func:`~hypothesis.extra.numpy.arrays`, but can also be used directly when creating e.g. Pandas objects. .. _v3.68.0: ------------------- 3.68.0 - 2018-08-15 ------------------- :func:`~hypothesis.extra.numpy.arrays` now checks that integer and float values drawn from ``elements`` and ``fill`` strategies can be safely cast to the dtype of the array, and emits a warning otherwise (:issue:`1385`). Elements in the resulting array could previously violate constraints on the elements strategy due to floating-point overflow or truncation of integers to fit smaller types. .. _v3.67.1: ------------------- 3.67.1 - 2018-08-14 ------------------- This release contains a tiny refactoring of the internals. There is no user-visible change. .. _v3.67.0: ------------------- 3.67.0 - 2018-08-10 ------------------- This release adds a ``width`` argument to :func:`~hypothesis.strategies.floats`, to generate lower-precision floating point numbers for e.g. Numpy arrays. The generated examples are always instances of Python's native ``float`` type, which is 64bit, but passing ``width=32`` will ensure that all values can be exactly represented as 32bit floats. This can be useful to avoid overflow (to +/- infinity), and for efficiency of generation and shrinking. Half-precision floats (``width=16``) are also supported, but require Numpy if you are running Python 3.5 or earlier. .. _v3.66.33: -------------------- 3.66.33 - 2018-08-10 -------------------- This release fixes a bug in :func:`~hypothesis.strategies.floats`, where setting ``allow_infinity=False`` and exactly one of ``min_value`` and ``max_value`` would allow infinite values to be generated. .. _v3.66.32: -------------------- 3.66.32 - 2018-08-09 -------------------- This release adds type hints to the :func:`~hypothesis.example` and :func:`~hypothesis.seed` decorators, and fixes the type hint on :func:`~hypothesis.strategies.register_type_strategy`. The second argument to :func:`~hypothesis.strategies.register_type_strategy` must either be a ``SearchStrategy``, or a callable which takes a ``type`` and returns a ``SearchStrategy``. .. _v3.66.31: -------------------- 3.66.31 - 2018-08-08 -------------------- Another set of changes designed to improve the performance of shrinking on large examples. In particular the shrinker should now spend considerably less time running useless shrinks. .. _v3.66.30: -------------------- 3.66.30 - 2018-08-06 -------------------- "Bug fixes and performance improvements". This release is a fairly major overhaul of the shrinker designed to improve its behaviour on large examples, especially around stateful testing. You should hopefully see shrinking become much faster, with little to no quality degradation (in some cases quality may even improve). .. _v3.66.29: -------------------- 3.66.29 - 2018-08-05 -------------------- This release fixes two very minor bugs in the core engine: * it fixes a corner case that was missing in :ref:`3.66.28 `, which should cause shrinking to work slightly better. * it fixes some logic for how shrinking interacts with the database that was causing Hypothesis to be insufficiently aggressive about clearing out old keys. .. _v3.66.28: -------------------- 3.66.28 - 2018-08-05 -------------------- This release improves how Hypothesis handles reducing the size of integers' representation. This change should mostly be invisible as it's purely about the underlying representation and not the generated value, but it may result in some improvements to shrink performance. .. _v3.66.27: -------------------- 3.66.27 - 2018-08-05 -------------------- This release changes the order in which Hypothesis chooses parts of the test case to shrink. For typical usage this should be a significant performance improvement on large examples. It is unlikely to have a major impact on example quality, but where it does change the result it should usually be an improvement. .. _v3.66.26: -------------------- 3.66.26 - 2018-08-05 -------------------- This release improves the debugging information that the shrinker emits about the operations it performs, giving better summary statistics about which passes resulted in test executions and whether they were successful. .. _v3.66.25: -------------------- 3.66.25 - 2018-08-05 -------------------- This release fixes several bugs that were introduced to the shrinker in :ref:`3.66.24 ` which would have caused it to behave significantly less well than advertised. With any luck you should *actually* see the promised benefits now. .. _v3.66.24: -------------------- 3.66.24 - 2018-08-03 -------------------- This release changes how Hypothesis deletes data when shrinking in order to better handle deletion of large numbers of contiguous sequences. Most tests should see little change, but this will hopefully provide a significant speed up for :doc:`stateful testing `. .. _v3.66.23: -------------------- 3.66.23 - 2018-08-02 -------------------- This release makes some internal changes to enable further improvements to the shrinker. You may see some changes in the final shrunk examples, but they are unlikely to be significant. .. _v3.66.22: -------------------- 3.66.22 - 2018-08-01 -------------------- This release adds some more internal caching to the shrinker. This should cause a significant speed up for shrinking, especially for stateful testing and large example sizes. .. _v3.66.21: -------------------- 3.66.21 - 2018-08-01 -------------------- This patch is for downstream packagers - our tests now pass under :pypi:`pytest` 3.7.0 (released 2018-07-30). There are no changes to the source of Hypothesis itself. .. _v3.66.20: -------------------- 3.66.20 - 2018-08-01 -------------------- This release removes some functionality from the shrinker that was taking a considerable amount of time and does not appear to be useful any more due to a number of quality improvements in the shrinker. You may see some degradation in shrink quality as a result of this, but mostly shrinking should just get much faster. .. _v3.66.19: -------------------- 3.66.19 - 2018-08-01 -------------------- This release slightly changes the format of some debugging information emitted during shrinking, and refactors some of the internal interfaces around that. .. _v3.66.18: -------------------- 3.66.18 - 2018-07-31 -------------------- This release is a very small internal refactoring which should have no user visible impact. .. _v3.66.17: -------------------- 3.66.17 - 2018-07-31 -------------------- This release fixes a bug that could cause an ``IndexError`` to be raised from inside Hypothesis during shrinking. It is likely that it was impossible to trigger this bug in practice - it was only made visible by some currently unreleased work. .. _v3.66.16: -------------------- 3.66.16 - 2018-07-31 -------------------- This release is a very small internal refactoring which should have no user visible impact. .. _v3.66.15: -------------------- 3.66.15 - 2018-07-31 -------------------- This release makes Hypothesis's shrinking faster by removing some redundant work that it does when minimizing values in its internal representation. .. _v3.66.14: -------------------- 3.66.14 - 2018-07-30 -------------------- This release expands the deprecation of timeout from :ref:`3.16.0 ` to also emit the deprecation warning in ``find`` or :doc:`stateful testing `. .. _v3.66.13: -------------------- 3.66.13 - 2018-07-30 -------------------- This release adds an additional shrink pass that is able to reduce the size of examples in some cases where the transformation is non-obvious. In particular this will improve the quality of some examples which would have regressed in :ref:`3.66.12 `. .. _v3.66.12: -------------------- 3.66.12 - 2018-07-28 -------------------- This release changes how we group data together for shrinking. It should result in improved shrinker performance, especially in stateful testing. .. _v3.66.11: -------------------- 3.66.11 - 2018-07-28 -------------------- This patch modifies how which rule to run is selected during :doc:`rule based stateful testing `. This should result in a slight performance increase during generation and a significant performance and quality improvement when shrinking. As a result of this change, some state machines which would previously have thrown an ``InvalidDefinition`` are no longer detected as invalid. .. _v3.66.10: -------------------- 3.66.10 - 2018-07-28 -------------------- This release weakens some minor functionality in the shrinker that had only modest benefit and made its behaviour much harder to reason about. This is unlikely to have much user visible effect, but it is possible that in some cases shrinking may get slightly slower. It is primarily to make it easier to work on the shrinker and pave the way for future work. .. _v3.66.9: ------------------- 3.66.9 - 2018-07-26 ------------------- This release improves the information that Hypothesis emits about its shrinking when :obj:`~hypothesis.settings.verbosity` is set to debug. .. _v3.66.8: ------------------- 3.66.8 - 2018-07-24 ------------------- This patch includes some minor fixes in the documentation, and updates the minimum version of :pypi:`pytest` to 3.0 (released August 2016). .. _v3.66.7: ------------------- 3.66.7 - 2018-07-24 ------------------- This release fixes a bug where difficult to shrink tests could sometimes trigger an internal assertion error inside the shrinker. .. _v3.66.6: ------------------- 3.66.6 - 2018-07-23 ------------------- This patch ensures that Hypothesis fully supports Python 3.7, by upgrading :func:`~hypothesis.strategies.from_type` (:issue:`1264`) and fixing some minor issues in our test suite (:issue:`1148`). .. _v3.66.5: ------------------- 3.66.5 - 2018-07-22 ------------------- This patch fixes the online docs for various extras, by ensuring that their dependencies are installed on readthedocs.io (:issue:`1326`). .. _v3.66.4: ------------------- 3.66.4 - 2018-07-20 ------------------- This release improves the shrinker's ability to reorder examples. For example, consider the following test: .. code-block:: python import hypothesis.strategies as st from hypothesis import given @given(st.text(), st.text()) def test_non_equal(x, y): assert x != y Previously this could have failed with either of ``x="", y="0"`` or ``x="0", y=""``. Now it should always fail with ``x="", y="0"``. This will allow the shrinker to produce more consistent results, especially in cases where test cases contain some ordered collection whose actual order does not matter. .. _v3.66.3: ------------------- 3.66.3 - 2018-07-20 ------------------- This patch fixes inference in the :func:`~hypothesis.strategies.builds` strategy with subtypes of :class:`python:typing.NamedTuple`, where the ``__init__`` method is not useful for introspection. We now use the field types instead - thanks to James Uther for identifying this bug. .. _v3.66.2: ------------------- 3.66.2 - 2018-07-19 ------------------- This release improves the shrinker's ability to handle situations where there is an additive constraint between two values. For example, consider the following test: .. code-block:: python import hypothesis.strategies as st from hypothesis import given @given(st.integers(), st.integers()) def test_does_not_exceed_100(m, n): assert m + n < 100 Previously this could have failed with almost any pair ``(m, n)`` with ``0 <= m <= n`` and ``m + n == 100``. Now it should almost always fail with ``m=0, n=100``. This is a relatively niche specialisation, but can be useful in situations where e.g. a bug is triggered by an integer overflow. .. _v3.66.1: ------------------- 3.66.1 - 2018-07-09 ------------------- This patch fixes a rare bug where an incorrect percentage drawtime could be displayed for a test, when the system clock was changed during a test running under Python 2 (we use :func:`python:time.monotonic` where it is available to avoid such problems). It also fixes a possible zero-division error that can occur when the underlying C library double-rounds an intermediate value in :func:`python:math.fsum` and gets the least significant bit wrong. .. _v3.66.0: ------------------- 3.66.0 - 2018-07-05 ------------------- This release improves validation of the ``alphabet`` argument to the :func:`~hypothesis.strategies.text` strategy. The following misuses are now deprecated, and will be an error in a future version: - passing an unordered collection (such as ``set('abc')``), which violates invariants about shrinking and reproducibility - passing an alphabet sequence with elements that are not strings - passing an alphabet sequence with elements that are not of length one, which violates any size constraints that may apply Thanks to Sushobhit for adding these warnings (:issue:`1329`). .. _v3.65.3: ------------------- 3.65.3 - 2018-07-04 ------------------- This release fixes a mostly theoretical bug where certain usage of the internal API could trigger an assertion error inside Hypothesis. It is unlikely that this problem is even possible to trigger through the public API. .. _v3.65.2: ------------------- 3.65.2 - 2018-07-04 ------------------- This release fixes dependency information for coverage. Previously Hypothesis would allow installing :pypi:`coverage` with any version, but it only works with coverage 4.0 or later. We now specify the correct metadata in our ``setup.py``, so Hypothesis will only allow installation with compatible versions of coverage. .. _v3.65.1: ------------------- 3.65.1 - 2018-07-03 ------------------- This patch ensures that :doc:`stateful tests ` which raise an error from a :pypi:`pytest` helper still print the sequence of steps taken to reach that point (:issue:`1372`). This reporting was previously broken because the helpers inherit directly from :class:`python:BaseException`, and therefore require special handling to catch without breaking e.g. the use of ctrl-C to quit the test. .. _v3.65.0: ------------------- 3.65.0 - 2018-06-30 ------------------- This release deprecates the ``max_shrinks`` setting in favor of an internal heuristic. If you need to avoid shrinking examples, use the :obj:`~hypothesis.settings.phases` setting instead. (:issue:`1235`) .. _v3.64.2: ------------------- 3.64.2 - 2018-06-27 ------------------- This release fixes a bug where an internal assertion error could sometimes be triggered while shrinking a failing test. .. _v3.64.1: ------------------- 3.64.1 - 2018-06-27 ------------------- This patch fixes type-checking errors in our vendored pretty-printer, which were ignored by our mypy config but visible for anyone else (whoops). Thanks to Pi Delport for reporting :issue:`1359` so promptly. .. _v3.64.0: ------------------- 3.64.0 - 2018-06-26 ------------------- This release adds :ref:`an interface ` which can be used to insert a wrapper between the original test function and :func:`@given ` (:issue:`1257`). This will be particularly useful for test runner extensions such as :pypi:`pytest-trio`, but is not recommended for direct use by other users of Hypothesis. .. _v3.63.0: ------------------- 3.63.0 - 2018-06-26 ------------------- This release adds a new mechanism to infer strategies for classes defined using :pypi:`attrs`, based on the the type, converter, or validator of each attribute. This inference is now built in to :func:`~hypothesis.strategies.builds` and :func:`~hypothesis.strategies.from_type`. On Python 2, :func:`~hypothesis.strategies.from_type` no longer generates instances of ``int`` when passed ``long``, or vice-versa. .. _v3.62.0: ------------------- 3.62.0 - 2018-06-26 ------------------- This release adds :PEP:`484` type hints to Hypothesis on a provisional basis, using the comment-based syntax for Python 2 compatibility. You can :ref:`read more about our type hints here `. It *also* adds the ``py.typed`` marker specified in :PEP:`561`. After you ``pip install hypothesis``, :pypi:`mypy` 0.590 or later will therefore type-check your use of our public interface! .. _v3.61.0: ------------------- 3.61.0 - 2018-06-24 ------------------- This release deprecates the use of :class:`~hypothesis.settings` as a context manager, the use of which is somewhat ambiguous. Users should define settings with global state or with the :func:`@settings(...) ` decorator. .. _v3.60.1: ------------------- 3.60.1 - 2018-06-20 ------------------- Fixed a bug in generating an instance of a Django model from a strategy where the primary key is generated as part of the strategy. See :ref:`details here `. Thanks to Tim Martin for this contribution. .. _v3.60.0: ------------------- 3.60.0 - 2018-06-20 ------------------- This release adds the :func:`@initialize ` decorator for stateful testing (originally discussed in :issue:`1216`). All :func:`@initialize ` rules will be called once each in an arbitrary order before any normal rule is called. .. _v3.59.3: ------------------- 3.59.3 - 2018-06-19 ------------------- This is a no-op release to take into account some changes to the release process. It should have no user visible effect. .. _v3.59.2: ------------------- 3.59.2 - 2018-06-18 ------------------- This adds support for partially sorting examples which cannot be fully sorted. For example, [5, 4, 3, 2, 1, 0] with a constraint that the first element needs to be larger than the last becomes [1, 2, 3, 4, 5, 0]. Thanks to Luke for contributing. .. _v3.59.1: ------------------- 3.59.1 - 2018-06-16 ------------------- This patch uses :func:`python:random.getstate` and :func:`python:random.setstate` to restore the PRNG state after :func:`@given ` runs deterministic tests. Without restoring state, you might have noticed problems such as :issue:`1266`. The fix also applies to stateful testing (:issue:`702`). .. _v3.59.0: ------------------- 3.59.0 - 2018-06-14 ------------------- This release adds the :func:`~hypothesis.strategies.emails` strategy, which generates unicode strings representing an email address. Thanks to Sushobhit for moving this to the public API (:issue:`162`). .. _v3.58.1: ------------------- 3.58.1 - 2018-06-13 ------------------- This improves the shrinker. It can now reorder examples: 3 1 2 becomes 1 2 3. Thanks to Luke for contributing. .. _v3.58.0: ------------------- 3.58.0 - 2018-06-13 ------------------- This adds a new extra :py:func:`~hypothesis.extra.dateutil.timezones` strategy that generates :pypi:`dateutil timezones `. Thanks to Conrad for contributing. .. _v3.57.0: ------------------- 3.57.0 - 2018-05-20 ------------------- Using an unordered collection with the :func:`~hypothesis.strategies.permutations` strategy has been deprecated because the order in which e.g. a set shrinks is arbitrary. This may cause different results between runs. .. _v3.56.10: -------------------- 3.56.10 - 2018-05-16 -------------------- This release makes ``hypothesis.settings.define_setting`` a private method, which has the effect of hiding it from the documentation. .. _v3.56.9: ------------------- 3.56.9 - 2018-05-11 ------------------- This is another release with no functionality changes as part of changes to Hypothesis's new release tagging scheme. .. _v3.56.8: ------------------- 3.56.8 - 2018-05-10 ------------------- This is a release with no functionality changes that moves Hypothesis over to a new release tagging scheme. .. _v3.56.7: ------------------- 3.56.7 - 2018-05-10 ------------------- This release provides a performance improvement for most tests, but in particular users of :func:`~hypothesis.strategies.sampled_from` who don't have numpy installed should see a significant performance improvement. .. _v3.56.6: ------------------- 3.56.6 - 2018-05-09 ------------------- This patch contains further internal work to support Mypy. There are no user-visible changes... yet. .. _v3.56.5: ------------------- 3.56.5 - 2018-04-22 ------------------- This patch contains some internal refactoring to run :pypi:`mypy` in CI. There are no user-visible changes. .. _v3.56.4: ------------------- 3.56.4 - 2018-04-21 ------------------- This release involves some very minor internal clean up and should have no user visible effect at all. .. _v3.56.3: ------------------- 3.56.3 - 2018-04-20 ------------------- This release fixes a problem introduced in :ref:`3.56.0 ` where setting the hypothesis home directory (through currently undocumented means) would no longer result in the default database location living in the new home directory. .. _v3.56.2: ------------------- 3.56.2 - 2018-04-20 ------------------- This release fixes a problem introduced in :ref:`3.56.0 ` where setting :obj:`~hypothesis.settings.max_examples` to ``1`` would result in tests failing with ``Unsatisfiable``. This problem could also occur in other harder to trigger circumstances (e.g. by setting it to a low value, having a hard to satisfy assumption, and disabling health checks). .. _v3.56.1: ------------------- 3.56.1 - 2018-04-20 ------------------- This release fixes a problem that was introduced in :ref:`3.56.0 `: Use of the ``HYPOTHESIS_VERBOSITY_LEVEL`` environment variable was, rather than deprecated, actually broken due to being read before various setup the deprecation path needed was done. It now works correctly (and emits a deprecation warning). .. _v3.56.0: ------------------- 3.56.0 - 2018-04-17 ------------------- This release deprecates several redundant or internally oriented :class:`~hypothesis.settings`, working towards an orthogonal set of configuration options that are widely useful *without* requiring any knowledge of our internals (:issue:`535`). - Deprecated settings that no longer have any effect are no longer shown in the ``__repr__`` unless set to a non-default value. - ``hypothesis.settings.perform_health_check`` is deprecated, as it duplicates :obj:`~hypothesis.settings.suppress_health_check`. - ``hypothesis.settings.max_iterations`` is deprecated and disabled, because we can usually get better behaviour from an internal heuristic than a user-controlled setting. - ``hypothesis.settings.min_satisfying_examples`` is deprecated and disabled, due to overlap with the :obj:`~hypothesis.HealthCheck.filter_too_much` healthcheck and poor interaction with :obj:`~hypothesis.settings.max_examples`. - ``HYPOTHESIS_VERBOSITY_LEVEL`` is now deprecated. Set :obj:`~hypothesis.settings.verbosity` through the profile system instead. - Examples tried by ``find()`` are now reported at ``debug`` verbosity level (as well as ``verbose`` level). .. _v3.55.6: ------------------- 3.55.6 - 2018-04-14 ------------------- This release fixes a somewhat obscure condition (:issue:`1230`) under which you could occasionally see a failing test trigger an assertion error inside Hypothesis instead of failing normally. .. _v3.55.5: ------------------- 3.55.5 - 2018-04-14 ------------------- This patch fixes one possible cause of :issue:`966`. When running Python 2 with hash randomisation, passing a :obj:`python:bytes` object to :func:`python:random.seed` would use ``version=1``, which broke :obj:`~hypothesis.settings.derandomize` (because the seed depended on a randomised hash). If :obj:`~hypothesis.settings.derandomize` is *still* nondeterministic for you, please open an issue. .. _v3.55.4: ------------------- 3.55.4 - 2018-04-13 ------------------- This patch makes a variety of minor improvements to the documentation, and improves a few validation messages for invalid inputs. .. _v3.55.3: ------------------- 3.55.3 - 2018-04-12 ------------------- This release updates the URL metadata associated with the PyPI package (again). It has no other user visible effects. .. _v3.55.2: ------------------- 3.55.2 - 2018-04-11 ------------------- This release updates the URL metadata associated with the PyPI package. It has no other user visible effects. .. _v3.55.1: ------------------- 3.55.1 - 2018-04-06 ------------------- This patch relaxes constraints in our tests on the expected values returned by the standard library function :func:`~python:math.hypot` and the internal helper function ``cathetus``, to fix near-exact test failures on some 32-bit systems used by downstream packagers. .. _v3.55.0: ------------------- 3.55.0 - 2018-04-05 ------------------- This release includes several improvements to the handling of the :obj:`~hypothesis.settings.database` setting. - The ``database_file`` setting was a historical artefact, and you should just use :obj:`~hypothesis.settings.database` directly. - The ``HYPOTHESIS_DATABASE_FILE`` environment variable is deprecated, in favor of :meth:`~hypothesis.settings.load_profile` and the :obj:`~hypothesis.settings.database` setting. - If you have not configured the example database at all and the default location is not usable (due to e.g. permissions issues), Hypothesis will fall back to an in-memory database. This is not persisted between sessions, but means that the defaults work on read-only filesystems. .. _v3.54.0: ------------------- 3.54.0 - 2018-04-04 ------------------- This release improves the :func:`~hypothesis.strategies.complex_numbers` strategy, which now supports ``min_magnitude`` and ``max_magnitude`` arguments, along with ``allow_nan`` and ``allow_infinity`` like for :func:`~hypothesis.strategies.floats`. Thanks to J.J. Green for this feature. .. _v3.53.0: ------------------- 3.53.0 - 2018-04-01 ------------------- This release removes support for Django 1.8, which reached end of life on 2018-04-01. You can see Django's release and support schedule `on the Django Project website `_. .. _v3.52.3: ------------------- 3.52.3 - 2018-04-01 ------------------- This patch fixes the ``min_satisfying_examples`` settings documentation, by explaining that example shrinking is tracked at the level of the underlying bytestream rather than the output value. The output from ``find()`` in verbose mode has also been adjusted - see :ref:`the example session ` - to avoid duplicating lines when the example repr is constant, even if the underlying representation has been shrunken. .. _v3.52.2: ------------------- 3.52.2 - 2018-03-30 ------------------- This release improves the output of failures with :ref:`rule based stateful testing ` in two ways: * The output from it is now usually valid Python code. * When the same value has two different names because it belongs to two different bundles, it will now display with the name associated with the correct bundle for a rule argument where it is used. .. _v3.52.1: ------------------- 3.52.1 - 2018-03-29 ------------------- This release improves the behaviour of :doc:`stateful testing ` in two ways: * Previously some runs would run no steps (:issue:`376`). This should no longer happen. * RuleBasedStateMachine tests which used bundles extensively would often shrink terribly. This should now be significantly improved, though there is likely a lot more room for improvement. This release also involves a low level change to how ranges of integers are handles which may result in other improvements to shrink quality in some cases. .. _v3.52.0: ------------------- 3.52.0 - 2018-03-24 ------------------- This release deprecates use of :func:`@settings(...) ` as a decorator, on functions or methods that are not also decorated with :func:`@given `. You can still apply these decorators in any order, though you should only do so once each. Applying :func:`@given ` twice was already deprecated, and applying :func:`@settings(...) ` twice is deprecated in this release and will become an error in a future version. Neither could ever be used twice to good effect. Using :func:`@settings(...) ` as the sole decorator on a test is completely pointless, so this common usage error will become an error in a future version of Hypothesis. .. _v3.51.0: ------------------- 3.51.0 - 2018-03-24 ------------------- This release deprecates the ``average_size`` argument to :func:`~hypothesis.strategies.lists` and other collection strategies. You should simply delete it wherever it was used in your tests, as it no longer has any effect. In early versions of Hypothesis, the ``average_size`` argument was treated as a hint about the distribution of examples from a strategy. Subsequent improvements to the conceptual model and the engine for generating and shrinking examples mean it is more effective to simply describe what constitutes a valid example, and let our internals handle the distribution. .. _v3.50.3: ------------------- 3.50.3 - 2018-03-24 ------------------- This patch contains some internal refactoring so that we can run with warnings as errors in CI. .. _v3.50.2: ------------------- 3.50.2 - 2018-03-20 ------------------- This has no user-visible changes except one slight formatting change to one docstring, to avoid a deprecation warning. .. _v3.50.1: ------------------- 3.50.1 - 2018-03-20 ------------------- This patch fixes an internal error introduced in :ref:`3.48.0 `, where a check for the Django test runner would expose import-time errors in Django configuration (:issue:`1167`). .. _v3.50.0: ------------------- 3.50.0 - 2018-03-19 ------------------- This release improves validation of numeric bounds for some strategies. - :func:`~hypothesis.strategies.integers` and :func:`~hypothesis.strategies.floats` now raise ``InvalidArgument`` if passed a ``min_value`` or ``max_value`` which is not an instance of :class:`~python:numbers.Real`, instead of various internal errors. - :func:`~hypothesis.strategies.floats` now converts its bounding values to the nearest float above or below the min or max bound respectively, instead of just casting to float. The old behaviour was incorrect in that you could generate ``float(min_value)``, even when this was less than ``min_value`` itself (possible with eg. fractions). - When both bounds are provided to :func:`~hypothesis.strategies.floats` but there are no floats in the interval, such as ``[(2**54)+1 .. (2**55)-1]``, InvalidArgument is raised. - :func:`~hypothesis.strategies.decimals` gives a more useful error message if passed a string that cannot be converted to :class:`~python:decimal.Decimal` in a context where this error is not trapped. Code that previously **seemed** to work may be explicitly broken if there were no floats between ``min_value`` and ``max_value`` (only possible with non-float bounds), or if a bound was not a :class:`~python:numbers.Real` number but still allowed in :obj:`python:math.isnan` (some custom classes with a ``__float__`` method). .. _v3.49.1: ------------------- 3.49.1 - 2018-03-15 ------------------- This patch fixes our tests for Numpy dtype strategies on big-endian platforms, where the strategy behaved correctly but the test assumed that the native byte order was little-endian. There is no user impact unless you are running our test suite on big-endian platforms. Thanks to Graham Inggs for reporting :issue:`1164`. .. _v3.49.0: ------------------- 3.49.0 - 2018-03-12 ------------------- This release deprecates passing ``elements=None`` to collection strategies, such as :func:`~hypothesis.strategies.lists`. Requiring ``lists(nothing())`` or ``builds(list)`` instead of ``lists()`` means slightly more typing, but also improves the consistency and discoverability of our API - as well as showing how to compose or construct strategies in ways that still work in more complex situations. Passing a nonzero max_size to a collection strategy where the elements strategy contains no values is now deprecated, and will be an error in a future version. The equivalent with ``elements=None`` is already an error. .. _v3.48.1: ------------------- 3.48.1 - 2018-03-05 ------------------- This patch will minimize examples that would come out non-minimal in previous versions. Thanks to Kyle Reeve for this patch. .. _v3.48.0: ------------------- 3.48.0 - 2018-03-05 ------------------- This release improves some "unhappy paths" when using Hypothesis with the standard library :mod:`python:unittest` module: - Applying :func:`@given ` to a non-test method which is overridden from :class:`python:unittest.TestCase`, such as ``setUp``, raises :attr:`a new health check `. (:issue:`991`) - Using :meth:`~python:unittest.TestCase.subTest` within a test decorated with :func:`@given ` would leak intermediate results when tests were run under the :mod:`python:unittest` test runner. Individual reporting of failing subtests is now disabled during a test using :func:`@given `. (:issue:`1071`) - :func:`@given ` is still not a class decorator, but the error message if you try using it on a class has been improved. As a related improvement, using :class:`django:django.test.TestCase` with :func:`@given ` instead of :class:`hypothesis.extra.django.TestCase` raises an explicit error instead of running all examples in a single database transaction. .. _v3.47.0: ------------------- 3.47.0 - 2018-03-02 ------------------- :obj:`~hypothesis.settings.register_profile` now accepts keyword arguments for specific settings, and the parent settings object is now optional. Using a ``name`` for a registered profile which is not a string was never suggested, but it is now also deprecated and will eventually be an error. .. _v3.46.2: ------------------- 3.46.2 - 2018-03-01 ------------------- This release removes an unnecessary branch from the code, and has no user-visible impact. .. _v3.46.1: ------------------- 3.46.1 - 2018-03-01 ------------------- This changes only the formatting of our docstrings and should have no user-visible effects. .. _v3.46.0: ------------------- 3.46.0 - 2018-02-26 ------------------- :func:`~hypothesis.strategies.characters` has improved docs about what arguments are valid, and additional validation logic to raise a clear error early (instead of e.g. silently ignoring a bad argument). Categories may be specified as the Unicode 'general category' (eg ``u'Nd'``), or as the 'major category' (eg ``[u'N', u'Lu']`` is equivalent to ``[u'Nd', u'Nl', u'No', u'Lu']``). In previous versions, general categories were supported and all other input was silently ignored. Now, major categories are supported in addition to general categories (which may change the behaviour of some existing code), and all other input is deprecated. .. _v3.45.5: ------------------- 3.45.5 - 2018-02-26 ------------------- This patch improves strategy inference in ``hypothesis.extra.django`` to account for some validators in addition to field type - see :issue:`1116` for ongoing work in this space. Specifically, if a :class:`~django:django.db.models.CharField` or :class:`~django:django.db.models.TextField` has an attached :class:`~django:django.core.validators.RegexValidator`, we now use :func:`~hypothesis.strategies.from_regex` instead of :func:`~hypothesis.strategies.text` as the underlying strategy. This allows us to generate examples of the default :class:`~django:django.contrib.auth.models.User` model, closing :issue:`1112`. .. _v3.45.4: ------------------- 3.45.4 - 2018-02-25 ------------------- This patch improves some internal debugging information, fixes a typo in a validation error message, and expands the documentation for new contributors. .. _v3.45.3: ------------------- 3.45.3 - 2018-02-23 ------------------- This patch may improve example shrinking slightly for some strategies. .. _v3.45.2: ------------------- 3.45.2 - 2018-02-18 ------------------- This release makes our docstring style more consistent, thanks to :pypi:`flake8-docstrings`. There are no user-visible changes. .. _v3.45.1: ------------------- 3.45.1 - 2018-02-17 ------------------- This fixes an indentation issue in docstrings for :func:`~hypothesis.strategies.datetimes`, :func:`~hypothesis.strategies.dates`, :func:`~hypothesis.strategies.times`, and :func:`~hypothesis.strategies.timedeltas`. .. _v3.45.0: ------------------- 3.45.0 - 2018-02-13 ------------------- This release fixes :func:`~hypothesis.strategies.builds` so that ``target`` can be used as a keyword argument for passing values to the target. The target itself can still be specified as a keyword argument, but that behavior is now deprecated. The target should be provided as the first positional argument. .. _v3.44.26: -------------------- 3.44.26 - 2018-02-06 -------------------- This release fixes some formatting issues in the Hypothesis source code. It should have no externally visible effects. .. _v3.44.25: -------------------- 3.44.25 - 2018-02-05 -------------------- This release changes the way in which Hypothesis tries to shrink the size of examples. It probably won't have much impact, but might make shrinking faster in some cases. It is unlikely but not impossible that it will change the resulting examples. .. _v3.44.24: -------------------- 3.44.24 - 2018-01-27 -------------------- This release fixes dependency information when installing Hypothesis from a binary "wheel" distribution. - The ``install_requires`` for :pypi:`enum34` is resolved at install time, rather than at build time (with potentially different results). - Django has fixed their ``python_requires`` for versions 2.0.0 onward, simplifying Python2-compatible constraints for downstream projects. .. _v3.44.23: -------------------- 3.44.23 - 2018-01-24 -------------------- This release improves shrinking in a class of pathological examples that you are probably never hitting in practice. If you *are* hitting them in practice this should be a significant speed up in shrinking. If you are not, you are very unlikely to notice any difference. You might see a slight slow down and/or slightly better falsifying examples. .. _v3.44.22: -------------------- 3.44.22 - 2018-01-23 -------------------- This release fixes a dependency problem. It was possible to install Hypothesis with an old version of :pypi:`attrs`, which would throw a ``TypeError`` as soon as you tried to import hypothesis. Specifically, you need attrs 16.0.0 or newer. Hypothesis will now require the correct version of attrs when installing. .. _v3.44.21: -------------------- 3.44.21 - 2018-01-22 -------------------- This change adds some additional structural information that Hypothesis will use to guide its search. You mostly shouldn't see much difference from this. The two most likely effects you would notice are: 1. Hypothesis stores slightly more examples in its database for passing tests. 2. Hypothesis *may* find new bugs that it was previously missing, but it probably won't (this is a basic implementation of the feature that is intended to support future work. Although it is useful on its own, it's not *very* useful on its own). .. _v3.44.20: -------------------- 3.44.20 - 2018-01-21 -------------------- This is a small refactoring release that changes how Hypothesis tracks some information about the boundary of examples in its internal representation. You are unlikely to see much difference in behaviour, but memory usage and run time may both go down slightly during normal test execution, and when failing Hypothesis might print its failing example slightly sooner. .. _v3.44.19: -------------------- 3.44.19 - 2018-01-21 -------------------- This changes how we compute the default ``average_size`` for all collection strategies. Previously setting a ``max_size`` without setting an ``average_size`` would have the seemingly paradoxical effect of making data generation *slower*, because it would raise the average size from its default. Now setting ``max_size`` will either leave the default unchanged or lower it from its default. If you are currently experiencing this problem, this may make your tests substantially faster. If you are not, this will likely have no effect on you. .. _v3.44.18: -------------------- 3.44.18 - 2018-01-20 -------------------- This is a small refactoring release that changes how Hypothesis detects when the structure of data generation depends on earlier values generated (e.g. when using :ref:`flatmap ` or :func:`~hypothesis.strategies.composite`). It should not have any observable effect on behaviour. .. _v3.44.17: -------------------- 3.44.17 - 2018-01-15 -------------------- This release fixes a typo in internal documentation, and has no user-visible impact. .. _v3.44.16: -------------------- 3.44.16 - 2018-01-13 -------------------- This release improves test case reduction for recursive data structures. Hypothesis now guarantees that whenever a strategy calls itself recursively (usually this will happen because you are using :func:`~hypothesis.strategies.deferred`), any recursive call may replace the top level value. e.g. given a tree structure, Hypothesis will always try replacing it with a subtree. Additionally this introduces a new heuristic that may in some circumstances significantly speed up test case reduction - Hypothesis should be better at immediately replacing elements drawn inside another strategy with their minimal possible value. .. _v3.44.15: -------------------- 3.44.15 - 2018-01-13 -------------------- :func:`~hypothesis.strategies.from_type` can now resolve recursive types such as binary trees (:issue:`1004`). Detection of non-type arguments has also improved, leading to better error messages in many cases involving :pep:`forward references <484#forward-references>`. .. _v3.44.14: -------------------- 3.44.14 - 2018-01-08 -------------------- This release fixes a bug in the shrinker that prevented the optimisations in :ref:`3.44.6 ` from working in some cases. It would not have worked correctly when filtered examples were nested (e.g. with a set of integers in some range). This would not have resulted in any correctness problems, but shrinking may have been slower than it otherwise could be. .. _v3.44.13: -------------------- 3.44.13 - 2018-01-08 -------------------- This release changes the average bit length of values drawn from :func:`~hypothesis.strategies.integers` to be much smaller. Additionally it changes the shrinking order so that now size is considered before sign - e.g. -1 will be preferred to +10. The new internal format for integers required some changes to the minimizer to make work well, so you may also see some improvements to example quality in unrelated areas. .. _v3.44.12: -------------------- 3.44.12 - 2018-01-07 -------------------- This changes Hypothesis's internal implementation of weighted sampling. This will affect example distribution and quality, but you shouldn't see any other effects. .. _v3.44.11: -------------------- 3.44.11 - 2018-01-06 -------------------- This is a change to some internals around how Hypothesis handles avoiding generating duplicate examples and seeking out novel regions of the search space. You are unlikely to see much difference as a result of it, but it fixes a bug where an internal assertion could theoretically be triggered and has some minor effects on the distribution of examples so could potentially find bugs that have previously been missed. .. _v3.44.10: -------------------- 3.44.10 - 2018-01-06 -------------------- This patch avoids creating debug statements when debugging is disabled. Profiling suggests this is a 5-10% performance improvement (:pull:`1040`). .. _v3.44.9: ------------------- 3.44.9 - 2018-01-06 ------------------- This patch blacklists null characters (``'\x00'``) in automatically created strategies for Django :obj:`~django:django.db.models.CharField` and :obj:`~django:django.db.models.TextField`, due to a database issue which `was recently fixed upstream `_ (Hypothesis :issue:`1045`). .. _v3.44.8: ------------------- 3.44.8 - 2018-01-06 ------------------- This release makes the Hypothesis shrinker slightly less greedy in order to avoid local minima - when it gets stuck, it makes a small attempt to search around the final example it would previously have returned to find a new starting point to shrink from. This should improve example quality in some cases, especially ones where the test data has dependencies among parts of it that make it difficult for Hypothesis to proceed. .. _v3.44.7: ------------------- 3.44.7 - 2018-01-04 ------------------- This release adds support for `Django 2 `_ in the hypothesis-django extra. This release drops support for Django 1.10, as it is no longer supported by the Django team. .. _v3.44.6: ------------------- 3.44.6 - 2018-01-02 ------------------- This release speeds up test case reduction in many examples by being better at detecting large shrinks it can use to discard redundant parts of its input. This will be particularly noticeable in examples that make use of filtering and for some integer ranges. .. _v3.44.5: ------------------- 3.44.5 - 2018-01-02 ------------------- Happy new year! This is a no-op release that updates the year range on all of the copyright headers in our source to include 2018. .. _v3.44.4: ------------------- 3.44.4 - 2017-12-23 ------------------- This release fixes :issue:`1044`, which slowed tests by up to 6% due to broken caching. .. _v3.44.3: ------------------- 3.44.3 - 2017-12-21 ------------------- This release improves the shrinker in cases where examples drawn earlier can affect how much data is drawn later (e.g. when you draw a length parameter in a composite and then draw that many elements). Examples found in cases like this should now be much closer to minimal. .. _v3.44.2: ------------------- 3.44.2 - 2017-12-20 ------------------- This is a pure refactoring release which changes how Hypothesis manages its set of examples internally. It should have no externally visible effects. .. _v3.44.1: ------------------- 3.44.1 - 2017-12-18 ------------------- This release fixes :issue:`997`, in which under some circumstances the body of tests run under Hypothesis would not show up when run under coverage even though the tests were run and the code they called outside of the test file would show up normally. .. _v3.44.0: ------------------- 3.44.0 - 2017-12-17 ------------------- This release adds a new feature: The :func:`@reproduce_failure ` decorator, designed to make it easy to use Hypothesis's binary format for examples to reproduce a problem locally without having to share your example database between machines. This also changes when seeds are printed: * They will no longer be printed for normal falsifying examples, as there are now adequate ways of reproducing those for all cases, so it just contributes noise. * They will once again be printed when reusing examples from the database, as health check failures should now be more reliable in this scenario so it will almost always work in this case. This work was funded by `Smarkets `_. .. _v3.43.1: ------------------- 3.43.1 - 2017-12-17 ------------------- This release fixes a bug with Hypothesis's database management - examples that were found in the course of shrinking were saved in a way that indicated that they had distinct causes, and so they would all be retried on the start of the next test. The intended behaviour, which is now what is implemented, is that only a bounded subset of these examples would be retried. .. _v3.43.0: ------------------- 3.43.0 - 2017-12-17 ------------------- :exc:`~hypothesis.errors.HypothesisDeprecationWarning` now inherits from :exc:`python:FutureWarning` instead of :exc:`python:DeprecationWarning`, as recommended by :pep:`565` for user-facing warnings (:issue:`618`). If you have not changed the default warnings settings, you will now see each distinct :exc:`~hypothesis.errors.HypothesisDeprecationWarning` instead of only the first. .. _v3.42.2: ------------------- 3.42.2 - 2017-12-12 ------------------- This patch fixes :issue:`1017`, where instances of a list or tuple subtype used as an argument to a strategy would be coerced to tuple. .. _v3.42.1: ------------------- 3.42.1 - 2017-12-10 ------------------- This release has some internal cleanup, which makes reading the code more pleasant and may shrink large examples slightly faster. .. _v3.42.0: ------------------- 3.42.0 - 2017-12-09 ------------------- This release deprecates ``faker-extra``, which was designed as a transition strategy but does not support example shrinking or coverage-guided discovery. .. _v3.41.0: ------------------- 3.41.0 - 2017-12-06 ------------------- :func:`~hypothesis.strategies.sampled_from` can now sample from one-dimensional numpy ndarrays. Sampling from multi-dimensional ndarrays still results in a deprecation warning. Thanks to Charlie Tanksley for this patch. .. _v3.40.1: ------------------- 3.40.1 - 2017-12-04 ------------------- This release makes two changes: * It makes the calculation of some of the metadata that Hypothesis uses for shrinking occur lazily. This should speed up performance of test case generation a bit because it no longer calculates information it doesn't need. * It improves the shrinker for certain classes of nested examples. e.g. when shrinking lists of lists, the shrinker is now able to concatenate two adjacent lists together into a single list. As a result of this change, shrinking may get somewhat slower when the minimal example found is large. .. _v3.40.0: ------------------- 3.40.0 - 2017-12-02 ------------------- This release improves how various ways of seeding Hypothesis interact with the example database: * Using the example database with :func:`~hypothesis.seed` is now deprecated. You should set ``database=None`` if you are doing that. This will only warn if you actually load examples from the database while using ``@seed``. * The :attr:`~hypothesis.settings.derandomize` will behave the same way as ``@seed``. * Using ``--hypothesis-seed`` will disable use of the database. * If a test used examples from the database, it will not suggest using a seed to reproduce it, because that won't work. This work was funded by `Smarkets `_. .. _v3.39.0: ------------------- 3.39.0 - 2017-12-01 ------------------- This release adds a new health check that checks if the smallest "natural" possible example of your test case is very large - this will tend to cause Hypothesis to generate bad examples and be quite slow. This work was funded by `Smarkets `_. .. _v3.38.9: ------------------- 3.38.9 - 2017-11-29 ------------------- This is a documentation release to improve the documentation of shrinking behaviour for Hypothesis's strategies. .. _v3.38.8: ------------------- 3.38.8 - 2017-11-29 ------------------- This release improves the performance of :func:`~hypothesis.strategies.characters` when using ``blacklist_characters`` and :func:`~hypothesis.strategies.from_regex` when using negative character classes. The problems this fixes were found in the course of work funded by `Smarkets `_. .. _v3.38.7: ------------------- 3.38.7 - 2017-11-29 ------------------- This is a patch release for :func:`~hypothesis.strategies.from_regex`, which had a bug in handling of the :obj:`python:re.VERBOSE` flag (:issue:`992`). Flags are now handled correctly when parsing regex. .. _v3.38.6: ------------------- 3.38.6 - 2017-11-28 ------------------- This patch changes a few byte-string literals from double to single quotes, thanks to an update in :pypi:`unify`. There are no user-visible changes. .. _v3.38.5: ------------------- 3.38.5 - 2017-11-23 ------------------- This fixes the repr of strategies using lambda that are defined inside decorators to include the lambda source. This would mostly have been visible when using the :ref:`statistics ` functionality - lambdas used for e.g. filtering would have shown up with a ```` as their body. This can still happen, but it should happen less often now. .. _v3.38.4: ------------------- 3.38.4 - 2017-11-22 ------------------- This release updates the reported :ref:`statistics ` so that they show approximately what fraction of your test run time is spent in data generation (as opposed to test execution). This work was funded by `Smarkets `_. .. _v3.38.3: ------------------- 3.38.3 - 2017-11-21 ------------------- This is a documentation release, which ensures code examples are up to date by running them as doctests in CI (:issue:`711`). .. _v3.38.2: ------------------- 3.38.2 - 2017-11-21 ------------------- This release changes the behaviour of the :attr:`~hypothesis.settings.deadline` setting when used with :func:`~hypothesis.strategies.data`: Time spent inside calls to ``data.draw`` will no longer be counted towards the deadline time. As a side effect of some refactoring required for this work, the way flaky tests are handled has changed slightly. You are unlikely to see much difference from this, but some error messages will have changed. This work was funded by `Smarkets `_. .. _v3.38.1: ------------------- 3.38.1 - 2017-11-21 ------------------- This patch has a variety of non-user-visible refactorings, removing various minor warts ranging from indirect imports to typos in comments. .. _v3.38.0: ------------------- 3.38.0 - 2017-11-18 ------------------- This release overhauls :doc:`the health check system ` in a variety of small ways. It adds no new features, but is nevertheless a minor release because it changes which tests are likely to fail health checks. The most noticeable effect is that some tests that used to fail health checks will now pass, and some that used to pass will fail. These should all be improvements in accuracy. In particular: * New failures will usually be because they are now taking into account things like use of :func:`~hypothesis.strategies.data` and :func:`~hypothesis.assume` inside the test body. * New failures *may* also be because for some classes of example the way data generation performance was measured was artificially faster than real data generation (for most examples that are hitting performance health checks the opposite should be the case). * Tests that used to fail health checks and now pass do so because the health check system used to run in a way that was subtly different than the main Hypothesis data generation and lacked some of its support for e.g. large examples. If your data generation is especially slow, you may also see your tests get somewhat faster, as there is no longer a separate health check phase. This will be particularly noticeable when rerunning test failures. This work was funded by `Smarkets `_. .. _v3.37.0: ------------------- 3.37.0 - 2017-11-12 ------------------- This is a deprecation release for some health check related features. The following are now deprecated: * Passing ``HealthCheck.exception_in_generation`` to :attr:`~hypothesis.settings.suppress_health_check`. This no longer does anything even when passed - All errors that occur during data generation will now be immediately reraised rather than going through the health check mechanism. * Passing ``HealthCheck.random_module`` to :attr:`~hypothesis.settings.suppress_health_check`. This hasn't done anything for a long time, but was never explicitly deprecated. Hypothesis always seeds the random module when running :func:`@given ` tests, so this is no longer an error and suppressing it doesn't do anything. * Passing non-:class:`~hypothesis.HealthCheck` values in :attr:`~hypothesis.settings.suppress_health_check`. This was previously allowed but never did anything useful. In addition, passing a non-iterable value as :attr:`~hypothesis.settings.suppress_health_check` will now raise an error immediately (it would never have worked correctly, but it would previously have failed later). Some validation error messages have also been updated. This work was funded by `Smarkets `_. .. _v3.36.1: ------------------- 3.36.1 - 2017-11-10 ------------------- This is a yak shaving release, mostly concerned with our own tests. While :func:`~python:inspect.getfullargspec` was documented as deprecated in Python 3.5, it never actually emitted a warning. Our code to silence this (nonexistent) warning has therefore been removed. We now run our tests with ``DeprecationWarning`` as an error, and made some minor changes to our own tests as a result. This required similar upstream updates to :pypi:`coverage` and :pypi:`execnet` (a test-time dependency via :pypi:`pytest-xdist`). There is no user-visible change in Hypothesis itself, but we encourage you to consider enabling deprecations as errors in your own tests. .. _v3.36.0: ------------------- 3.36.0 - 2017-11-06 ------------------- This release adds a setting to the public API, and does some internal cleanup: - The :attr:`~hypothesis.settings.derandomize` setting is now documented (:issue:`890`) - Removed - and disallowed - all 'bare excepts' in Hypothesis (:issue:`953`) - Documented the ``strict`` setting as deprecated, and updated the build so our docs always match deprecations in the code. .. _v3.35.0: ------------------- 3.35.0 - 2017-11-06 ------------------- This minor release supports constraining :func:`~hypothesis.strategies.uuids` to generate a particular version of :class:`~python:uuid.UUID` (:issue:`721`). Thanks to Dion Misic for this feature. .. _v3.34.1: ------------------- 3.34.1 - 2017-11-02 ------------------- This patch updates the documentation to suggest :func:`builds(callable) ` instead of :func:`just(callable()) `. .. _v3.34.0: ------------------- 3.34.0 - 2017-11-02 ------------------- Hypothesis now emits deprecation warnings if you apply :func:`@given ` more than once to a target. Applying :func:`@given ` repeatedly wraps the target multiple times. Each wrapper will search the space of of possible parameters separately. This is equivalent but will be much more inefficient than doing it with a single call to :func:`@given `. For example, instead of ``@given(booleans()) @given(integers())``, you could write ``@given(booleans(), integers())`` .. _v3.33.1: ------------------- 3.33.1 - 2017-11-02 ------------------- This is a bugfix release: - :func:`~hypothesis.strategies.builds` would try to infer a strategy for required positional arguments of the target from type hints, even if they had been given to :func:`~hypothesis.strategies.builds` as positional arguments (:issue:`946`). Now it only infers missing required arguments. - An internal introspection function wrongly reported ``self`` as a required argument for bound methods, which might also have affected :func:`~hypothesis.strategies.builds`. Now it knows better. .. _v3.33.0: ------------------- 3.33.0 - 2017-10-16 ------------------- This release supports strategy inference for more Django field types - you can now omit an argument for Date, Time, Duration, Slug, IP Address, and UUID fields. (:issue:`642`) Strategy generation for fields with grouped choices now selects choices from each group, instead of selecting from the group names. .. _v3.32.2: ------------------- 3.32.2 - 2017-10-15 ------------------- This patch removes the ``mergedb`` tool, introduced in Hypothesis 1.7.1 on an experimental basis. It has never actually worked, and the new :doc:`Hypothesis example database ` is designed to make such a tool unnecessary. .. _v3.32.1: ------------------- 3.32.1 - 2017-10-13 ------------------- This patch has two improvements for strategies based on enumerations. - :func:`~hypothesis.strategies.from_type` now handles enumerations correctly, delegating to :func:`~hypothesis.strategies.sampled_from`. Previously it noted that ``Enum.__init__`` has no required arguments and therefore delegated to :func:`~hypothesis.strategies.builds`, which would subsequently fail. - When sampling from an :class:`python:enum.Flag`, we also generate combinations of members. Eg for ``Flag('Permissions', 'READ, WRITE, EXECUTE')`` we can now generate, ``Permissions.READ``, ``Permissions.READ|WRITE``, and so on. .. _v3.32.0: ------------------- 3.32.0 - 2017-10-09 ------------------- This changes the default value of the ``use_coverage`` setting to True when running on pypy (it was already True on CPython). It was previously set to False because we expected it to be too slow, but recent benchmarking shows that actually performance of the feature on pypy is fairly acceptable - sometimes it's slower than on CPython, sometimes it's faster, but it's generally within a factor of two either way. .. _v3.31.6: ------------------- 3.31.6 - 2017-10-08 ------------------- This patch improves the quality of strategies inferred from Numpy dtypes: * Integer dtypes generated examples with the upper half of their (non-sign) bits set to zero. The inferred strategies can now produce any representable integer. * Fixed-width unicode- and byte-string dtypes now cap the internal example length, which should improve example and shrink quality. * Numpy arrays can only store fixed-size strings internally, and allow shorter strings by right-padding them with null bytes. Inferred string strategies no longer generate such values, as they can never be retrieved from an array. This improves shrinking performance by skipping useless values. This has already been useful in Hypothesis - we found an overflow bug in our Pandas support, and as a result :func:`~hypothesis.extra.pandas.indexes` and :func:`~hypothesis.extra.pandas.range_indexes` now check that ``min_size`` and ``max_size`` are at least zero. .. _v3.31.5: ------------------- 3.31.5 - 2017-10-08 ------------------- This release fixes a performance problem in tests where the ``use_coverage`` setting is True. Tests experience a slow-down proportionate to the amount of code they cover. This is still the case, but the factor is now low enough that it should be unnoticeable. Previously it was large and became much larger in :ref:`3.30.4 `. .. _v3.31.4: ------------------- 3.31.4 - 2017-10-08 ------------------- :func:`~hypothesis.strategies.from_type` failed with a very confusing error if passed a :func:`~python:typing.NewType` (:issue:`901`). These psudeo-types are now unwrapped correctly, and strategy inference works as expected. .. _v3.31.3: ------------------- 3.31.3 - 2017-10-06 ------------------- This release makes some small optimisations to our use of coverage that should reduce constant per-example overhead. This is probably only noticeable on examples where the test itself is quite fast. On no-op tests that don't test anything you may see up to a fourfold speed increase (which is still significantly slower than without coverage). On more realistic tests the speed up is likely to be less than that. .. _v3.31.2: ------------------- 3.31.2 - 2017-09-30 ------------------- This release fixes some formatting and small typos/grammar issues in the documentation, specifically the page docs/settings.rst, and the inline docs for the various settings. .. _v3.31.1: ------------------- 3.31.1 - 2017-09-30 ------------------- This release improves the handling of deadlines so that they act better with the shrinking process. This fixes :issue:`892`. This involves two changes: 1. The deadline is raised during the initial generation and shrinking, and then lowered to the set value for final replay. This restricts our attention to examples which exceed the deadline by a more significant margin, which increases their reliability. 2. When despite the above a test still becomes flaky because it is significantly faster on rerun than it was on its first run, the error message is now more explicit about the nature of this problem, and includes both the initial test run time and the new test run time. In addition, this release also clarifies the documentation of the deadline setting slightly to be more explicit about where it applies. This work was funded by `Smarkets `_. .. _v3.31.0: ------------------- 3.31.0 - 2017-09-29 ------------------- This release blocks installation of Hypothesis on Python 3.3, which :PEP:`reached its end of life date on 2017-09-29 <398>`. This should not be of interest to anyone but downstream maintainers - if you are affected, migrate to a secure version of Python as soon as possible or at least seek commercial support. .. _v3.30.4: ------------------- 3.30.4 - 2017-09-27 ------------------- This release makes several changes: 1. It significantly improves Hypothesis's ability to use coverage information to find interesting examples. 2. It reduces the default :attr:`~hypothesis.settings.max_examples` setting from 200 to 100. This takes advantage of the improved algorithm meaning fewer examples are typically needed to get the same testing and is sufficiently better at covering interesting behaviour, and offsets some of the performance problems of running under coverage. 3. Hypothesis will always try to start its testing with an example that is near minimized. The new algorithm for 1 also makes some changes to Hypothesis's low level data generation which apply even with coverage turned off. They generally reduce the total amount of data generated, which should improve test performance somewhat. Between this and 3 you should see a noticeable reduction in test runtime (how much so depends on your tests and how much example size affects their performance. On our benchmarks, where data generation dominates, we saw up to a factor of two performance improvement, but it's unlikely to be that large. .. _v3.30.3: ------------------- 3.30.3 - 2017-09-25 ------------------- This release fixes some formatting and small typos/grammar issues in the documentation, specifically the page docs/details.rst, and some inline docs linked from there. .. _v3.30.2: ------------------- 3.30.2 - 2017-09-24 ------------------- This release changes Hypothesis's caching approach for functions in ``hypothesis.strategies``. Previously it would have cached extremely aggressively and cache entries would never be evicted. Now it adopts a least-frequently used, least recently used key invalidation policy, and is somewhat more conservative about which strategies it caches. Workloads which create strategies based on dynamic values, e.g. by using :ref:`flatmap ` or :func:`~hypothesis.strategies.composite`, will use significantly less memory. .. _v3.30.1: ------------------- 3.30.1 - 2017-09-22 ------------------- This release fixes a bug where when running with the ``use_coverage=True`` setting inside an existing running instance of coverage, Hypothesis would frequently put files that the coveragerc excluded in the report for the enclosing coverage. .. _v3.30.0: ------------------- 3.30.0 - 2017-09-20 ------------------- This release introduces two new features: * When a test fails, either with a health check failure or a falsifying example, Hypothesis will print out a seed that led to that failure, if the test is not already running with a fixed seed. You can then recreate that failure using either the :func:`@seed ` decorator or (if you are running pytest) with ``--hypothesis-seed``. * :pypi:`pytest` users can specify a seed to use for :func:`@given ` based tests by passing the ``--hypothesis-seed`` command line argument. This work was funded by `Smarkets `_. .. _v3.29.0: ------------------- 3.29.0 - 2017-09-19 ------------------- This release makes Hypothesis coverage aware. Hypothesis now runs all test bodies under coverage, and uses this information to guide its testing. The ``use_coverage`` setting can be used to disable this behaviour if you want to test code that is sensitive to coverage being enabled (either because of performance or interaction with the trace function). The main benefits of this feature are: * Hypothesis now observes when examples it discovers cover particular lines or branches and stores them in the database for later. * Hypothesis will make some use of this information to guide its exploration of the search space and improve the examples it finds (this is currently used only very lightly and will likely improve significantly in future releases). This also has the following side-effects: * Hypothesis now has an install time dependency on the :pypi:`coverage` package. * Tests that are already running Hypothesis under coverage will likely get faster. * Tests that are not running under coverage now run their test bodies under coverage by default. This feature is only partially supported under pypy. It is significantly slower than on CPython and is turned off by default as a result, but it should still work correctly if you want to use it. .. _v3.28.3: ------------------- 3.28.3 - 2017-09-18 ------------------- This release is an internal change that affects how Hypothesis handles calculating certain properties of strategies. The primary effect of this is that it fixes a bug where use of :func:`~hypothesis.strategies.deferred` could sometimes trigger an internal assertion error. However the fix for this bug involved some moderately deep changes to how Hypothesis handles certain constructs so you may notice some additional knock-on effects. In particular the way Hypothesis handles drawing data from strategies that cannot generate any values has changed to bail out sooner than it previously did. This may speed up certain tests, but it is unlikely to make much of a difference in practice for tests that were not already failing with Unsatisfiable. .. _v3.28.2: ------------------- 3.28.2 - 2017-09-18 ------------------- This is a patch release that fixes a bug in the :mod:`hypothesis.extra.pandas` documentation where it incorrectly referred to :func:`~hypothesis.extra.pandas.column` instead of :func:`~hypothesis.extra.pandas.columns`. .. _v3.28.1: ------------------- 3.28.1 - 2017-09-16 ------------------- This is a refactoring release. It moves a number of internal uses of :func:`~python:collections.namedtuple` over to using attrs based classes, and removes a couple of internal namedtuple classes that were no longer in use. It should have no user visible impact. .. _v3.28.0: ------------------- 3.28.0 - 2017-09-15 ------------------- This release adds support for testing :pypi:`pandas` via the :ref:`hypothesis.extra.pandas ` module. It also adds a dependency on :pypi:`attrs`. This work was funded by `Stripe `_. .. _v3.27.1: ------------------- 3.27.1 - 2017-09-14 ------------------- This release fixes some formatting and broken cross-references in the documentation, which includes editing docstrings - and thus a patch release. .. _v3.27.0: ------------------- 3.27.0 - 2017-09-13 ------------------- This release introduces a :attr:`~hypothesis.settings.deadline` setting to Hypothesis. When set this turns slow tests into errors. By default it is unset but will warn if you exceed 200ms, which will become the default value in a future release. This work was funded by `Smarkets `_. .. _v3.26.0: ------------------- 3.26.0 - 2017-09-12 ------------------- Hypothesis now emits deprecation warnings if you are using the legacy SQLite example database format, or the tool for merging them. These were already documented as deprecated, so this doesn't change their deprecation status, only that we warn about it. .. _v3.25.1: ------------------- 3.25.1 - 2017-09-12 ------------------- This release fixes a bug with generating :doc:`numpy datetime and timedelta types `: When inferring the strategy from the dtype, datetime and timedelta dtypes with sub-second precision would always produce examples with one second resolution. Inferring a strategy from a time dtype will now always produce example with the same precision. .. _v3.25.0: ------------------- 3.25.0 - 2017-09-12 ------------------- This release changes how Hypothesis shrinks and replays examples to take into account that it can encounter new bugs while shrinking the bug it originally found. Previously it would end up replacing the originally found bug with the new bug and show you only that one. Now it is (often) able to recognise when two bugs are distinct and when it finds more than one will show both. .. _v3.24.2: ------------------- 3.24.2 - 2017-09-11 ------------------- This release removes the (purely internal and no longer useful) ``strategy_test_suite`` function and the corresponding strategytests module. .. _v3.24.1: ------------------- 3.24.1 - 2017-09-06 ------------------- This release improves the reduction of examples involving floating point numbers to produce more human readable examples. It also has some general purpose changes to the way the minimizer works internally, which may see some improvement in quality and slow down of test case reduction in cases that have nothing to do with floating point numbers. .. _v3.24.0: ------------------- 3.24.0 - 2017-09-05 ------------------- Hypothesis now emits deprecation warnings if you use ``some_strategy.example()`` inside a test function or strategy definition (this was never intended to be supported, but is sufficiently widespread that it warrants a deprecation path). .. _v3.23.3: ------------------- 3.23.3 - 2017-09-05 ------------------- This is a bugfix release for :func:`~hypothesis.strategies.decimals` with the ``places`` argument. - No longer fails health checks (:issue:`725`, due to internal filtering) - Specifying a ``min_value`` and ``max_value`` without any decimals with ``places`` places between them gives a more useful error message. - Works for any valid arguments, regardless of the decimal precision context. .. _v3.23.2: ------------------- 3.23.2 - 2017-09-01 ------------------- This is a small refactoring release that removes a now-unused parameter to an internal API. It shouldn't have any user visible effect. .. _v3.23.1: ------------------- 3.23.1 - 2017-09-01 ------------------- Hypothesis no longer propagates the dynamic scope of settings into strategy definitions. This release is a small change to something that was never part of the public API and you will almost certainly not notice any effect unless you're doing something surprising, but for example the following code will now give a different answer in some circumstances: .. code-block:: python import hypothesis.strategies as st from hypothesis import settings CURRENT_SETTINGS = st.builds(lambda: settings.default) (We don't actually encourage you writing code like this) Previously this would have generated the settings that were in effect at the point of definition of ``CURRENT_SETTINGS``. Now it will generate the settings that are used for the current test. It is very unlikely to be significant enough to be visible, but you may also notice a small performance improvement. .. _v3.23.0: ------------------- 3.23.0 - 2017-08-31 ------------------- This release adds a ``unique`` argument to :func:`~hypothesis.extra.numpy.arrays` which behaves the same ways as the corresponding one for :func:`~hypothesis.strategies.lists`, requiring all of the elements in the generated array to be distinct. .. _v3.22.2: ------------------- 3.22.2 - 2017-08-29 ------------------- This release fixes an issue where Hypothesis would raise a ``TypeError`` when using the datetime-related strategies if running with ``PYTHONOPTIMIZE=2``. This bug was introduced in :ref:`3.20.0 `. (See :issue:`822`) .. _v3.22.1: ------------------- 3.22.1 - 2017-08-28 ------------------- Hypothesis now transparently handles problems with an internal unicode cache, including file truncation or read-only filesystems (:issue:`767`). Thanks to Sam Hames for the patch. .. _v3.22.0: ------------------- 3.22.0 - 2017-08-26 ------------------- This release provides what should be a substantial performance improvement to numpy arrays generated using :ref:`provided numpy support `, and adds a new ``fill_value`` argument to :func:`~hypothesis.extra.numpy.arrays` to control this behaviour. This work was funded by `Stripe `_. .. _v3.21.3: ------------------- 3.21.3 - 2017-08-26 ------------------- This release fixes some extremely specific circumstances that probably have never occurred in the wild where users of :func:`~hypothesis.strategies.deferred` might have seen a :class:`python:RuntimeError` from too much recursion, usually in cases where no valid example could have been generated anyway. .. _v3.21.2: ------------------- 3.21.2 - 2017-08-25 ------------------- This release fixes some minor bugs in argument validation: * :ref:`hypothesis.extra.numpy ` dtype strategies would raise an internal error instead of an InvalidArgument exception when passed an invalid endianness specification. * :func:`~hypothesis.strategies.fractions` would raise an internal error instead of an InvalidArgument if passed ``float("nan")`` as one of its bounds. * The error message for passing ``float("nan")`` as a bound to various strategies has been improved. * Various bound arguments will now raise InvalidArgument in cases where they would previously have raised an internal TypeError or ValueError from the relevant conversion function. * ``streaming()`` would not have emitted a deprecation warning when called with an invalid argument. .. _v3.21.1: ------------------- 3.21.1 - 2017-08-24 ------------------- This release fixes a bug where test failures that were the result of an :func:`@example ` would print an extra stack trace before re-raising the exception. .. _v3.21.0: ------------------- 3.21.0 - 2017-08-23 ------------------- This release deprecates Hypothesis's strict mode, which turned Hypothesis's deprecation warnings into errors. Similar functionality can be achieved by using :func:`simplefilter('error', HypothesisDeprecationWarning) `. .. _v3.20.0: ------------------- 3.20.0 - 2017-08-22 ------------------- This release renames the relevant arguments on the :func:`~hypothesis.strategies.datetimes`, :func:`~hypothesis.strategies.dates`, :func:`~hypothesis.strategies.times`, and :func:`~hypothesis.strategies.timedeltas` strategies to ``min_value`` and ``max_value``, to make them consistent with the other strategies in the module. The old argument names are still supported but will emit a deprecation warning when used explicitly as keyword arguments. Arguments passed positionally will go to the new argument names and are not deprecated. .. _v3.19.3: ------------------- 3.19.3 - 2017-08-22 ------------------- This release provides a major overhaul to the internals of how Hypothesis handles shrinking. This should mostly be visible in terms of getting better examples for tests which make heavy use of :func:`~hypothesis.strategies.composite`, :func:`~hypothesis.strategies.data` or :ref:`flatmap ` where the data drawn depends a lot on previous choices, especially where size parameters are affected. Previously Hypothesis would have struggled to reliably produce good examples here. Now it should do much better. Performance should also be better for examples with a non-zero ``min_size``. You may see slight changes to example generation (e.g. improved example diversity) as a result of related changes to internals, but they are unlikely to be significant enough to notice. .. _v3.19.2: ------------------- 3.19.2 - 2017-08-21 ------------------- This release fixes two bugs in :mod:`hypothesis.extra.numpy`: * :func:`~hypothesis.extra.numpy.unicode_string_dtypes` didn't work at all due to an incorrect dtype specifier. Now it does. * Various impossible conditions would have been accepted but would error when they fail to produced any example. Now they raise an explicit InvalidArgument error. .. _v3.19.1: ------------------- 3.19.1 - 2017-08-21 ------------------- This is a bugfix release for :issue:`739`, where bounds for :func:`~hypothesis.strategies.fractions` or floating-point :func:`~hypothesis.strategies.decimals` were not properly converted to integers before passing them to the integers strategy. This excluded some values that should have been possible, and could trigger internal errors if the bounds lay between adjacent integers. You can now bound :func:`~hypothesis.strategies.fractions` with two arbitrarily close fractions. It is now an explicit error to supply a min_value, max_value, and max_denominator to :func:`~hypothesis.strategies.fractions` where the value bounds do not include a fraction with denominator at most max_denominator. .. _v3.19.0: ------------------- 3.19.0 - 2017-08-20 ------------------- This release adds the :func:`~hypothesis.strategies.from_regex` strategy, which generates strings that contain a match of a regular expression. Thanks to Maxim Kulkin for creating the `hypothesis-regex `_ package and then helping to upstream it! (:issue:`662`) .. _v3.18.5: ------------------- 3.18.5 - 2017-08-18 ------------------- This is a bugfix release for :func:`~hypothesis.strategies.integers`. Previously the strategy would hit an internal assertion if passed non-integer bounds for ``min_value`` and ``max_value`` that had no integers between them. The strategy now raises InvalidArgument instead. .. _v3.18.4: ------------------- 3.18.4 - 2017-08-18 ------------------- Release to fix a bug where mocks can be used as test runners under certain conditions. Specifically, if a mock is injected into a test via pytest fixtures or patch decorators, and that mock is the first argument in the list, hypothesis will think it represents self and turns the mock into a test runner. If this happens, the affected test always passes because the mock is executed instead of the test body. Sometimes, it will also fail health checks. Fixes :issue:`491` and a section of :issue:`198`. Thanks to Ben Peterson for this bug fix. .. _v3.18.3: ------------------- 3.18.3 - 2017-08-17 ------------------- This release should improve the performance of some tests which experienced a slow down as a result of the :ref:`3.13.0 ` release. Tests most likely to benefit from this are ones that make extensive use of ``min_size`` parameters, but others may see some improvement as well. .. _v3.18.2: ------------------- 3.18.2 - 2017-08-16 ------------------- This release fixes a bug introduced in :ref:`3.18.0 `. If the arguments ``whitelist_characters`` and ``blacklist_characters`` to :func:`~hypothesis.strategies.characters` contained overlapping elements, then an ``InvalidArgument`` exception would be raised. Thanks to Zac Hatfield-Dodds for reporting and fixing this. .. _v3.18.1: ------------------- 3.18.1 - 2017-08-14 ------------------- This is a bug fix release to fix :issue:`780`, where :func:`~hypothesis.strategies.sets` and similar would trigger health check errors if their element strategy could only produce one element (e.g. if it was :func:`~hypothesis.strategies.just`). .. _v3.18.0: ------------------- 3.18.0 - 2017-08-13 ------------------- This is a feature release: * :func:`~hypothesis.strategies.characters` now accepts ``whitelist_characters``, particular characters which will be added to those it produces. (:issue:`668`) * A bug fix for the internal function ``_union_interval_lists()``, and a rename to ``_union_intervals()``. It now correctly handles all cases where intervals overlap, and it always returns the result as a tuple for tuples. Thanks to Alex Willmer for these. .. _v3.17.0: ------------------- 3.17.0 - 2017-08-07 ------------------- This release documents :ref:`the previously undocumented phases feature `, making it part of the public API. It also updates how the example database is used. Principally: * A ``Phases.reuse`` argument will now correctly control whether examples from the database are run (it previously did exactly the wrong thing and controlled whether examples would be *saved*). * Hypothesis will no longer try to rerun *all* previously failing examples. Instead it will replay the smallest previously failing example and a selection of other examples that are likely to trigger any other bugs that will found. This prevents a previous failure from dominating your tests unnecessarily. * As a result of the previous change, Hypothesis will be slower about clearing out old examples from the database that are no longer failing (because it can only clear out ones that it actually runs). .. _v3.16.1: ------------------- 3.16.1 - 2017-08-07 ------------------- This release makes an implementation change to how Hypothesis handles certain internal constructs. The main effect you should see is improvement to the behaviour and performance of collection types, especially ones with a ``min_size`` parameter. Many cases that would previously fail due to being unable to generate enough valid examples will now succeed, and other cases should run slightly faster. .. _v3.16.0: ------------------- 3.16.0 - 2017-08-04 ------------------- This release introduces a deprecation of the timeout feature. This results in the following changes: * Creating a settings object with an explicit timeout will emit a deprecation warning. * If your test stops because it hits the timeout (and has not found a bug) then it will emit a deprecation warning. * There is a new value ``unlimited`` which you can import from hypothesis. ``settings(timeout=unlimited)`` will *not* cause a deprecation warning. * There is a new health check, ``hung_test``, which will trigger after a test has been running for five minutes if it is not suppressed. .. _v3.15.0: ------------------- 3.15.0 - 2017-08-04 ------------------- This release deprecates two strategies, ``choices()`` and ``streaming()``. Both of these are somewhat confusing to use and are entirely redundant since the introduction of the :func:`~hypothesis.strategies.data` strategy for interactive drawing in tests, and their use should be replaced with direct use of :func:`~hypothesis.strategies.data` instead. .. _v3.14.2: ------------------- 3.14.2 - 2017-08-03 ------------------- This fixes a bug where Hypothesis would not work correctly on Python 2.7 if you had the :mod:`python:typing` module :pypi:`backport ` installed. .. _v3.14.1: ------------------- 3.14.1 - 2017-08-02 ------------------- This raises the maximum depth at which Hypothesis starts cutting off data generation to a more reasonable value which it is harder to hit by accident. This resolves (:issue:`751`), in which some examples which previously worked would start timing out, but it will also likely improve the data generation quality for complex data types. .. _v3.14.0: ------------------- 3.14.0 - 2017-07-23 ------------------- Hypothesis now understands inline type annotations (:issue:`293`): - If the target of :func:`~hypothesis.strategies.builds` has type annotations, a default strategy for missing required arguments is selected based on the type. Type-based strategy selection will only override a default if you pass :const:`hypothesis.infer` as a keyword argument. - If :func:`@given ` wraps a function with type annotations, you can pass :const:`~hypothesis.infer` as a keyword argument and the appropriate strategy will be substituted. - You can check what strategy will be inferred for a type with the new :func:`~hypothesis.strategies.from_type` function. - :func:`~hypothesis.strategies.register_type_strategy` teaches Hypothesis which strategy to infer for custom or unknown types. You can provide a strategy, or for more complex cases a function which takes the type and returns a strategy. .. _v3.13.1: ------------------- 3.13.1 - 2017-07-20 ------------------- This is a bug fix release for :issue:`514` - Hypothesis would continue running examples after a :class:`~python:unittest.SkipTest` exception was raised, including printing a falsifying example. Skip exceptions from the standard :mod:`python:unittest` module, and ``pytest``, ``nose``, or ``unittest2`` modules now abort the test immediately without printing output. .. _v3.13.0: ------------------- 3.13.0 - 2017-07-16 ------------------- This release has two major aspects to it: The first is the introduction of :func:`~hypothesis.strategies.deferred`, which allows more natural definition of recursive (including mutually recursive) strategies. The second is a number of engine changes designed to support this sort of strategy better. These should have a knock-on effect of also improving the performance of any existing strategies that currently generate a lot of data or involve heavy nesting by reducing their typical example size. .. _v3.12.0: ------------------- 3.12.0 - 2017-07-07 ------------------- This release makes some major internal changes to how Hypothesis represents data internally, as a prelude to some major engine changes that should improve data quality. There are no API changes, but it's a significant enough internal change that a minor version bump seemed warranted. User facing impact should be fairly mild, but includes: * All existing examples in the database will probably be invalidated. Hypothesis handles this automatically, so you don't need to do anything, but if you see all your examples disappear that's why. * Almost all data distributions have changed significantly. Possibly for the better, possibly for the worse. This may result in new bugs being found, but it may also result in Hypothesis being unable to find bugs it previously did. * Data generation may be somewhat faster if your existing bottleneck was in draw_bytes (which is often the case for large examples). * Shrinking will probably be slower, possibly significantly. If you notice any effects you consider to be a significant regression, please open an issue about them. .. _v3.11.6: ------------------- 3.11.6 - 2017-06-19 ------------------- This release involves no functionality changes, but is the first to ship wheels as well as an sdist. .. _v3.11.5: ------------------- 3.11.5 - 2017-06-18 ------------------- This release provides a performance improvement to shrinking. For cases where there is some non-trivial "boundary" value (e.g. the bug happens for all values greater than some other value), shrinking should now be substantially faster. Other types of bug will likely see improvements too. This may also result in some changes to the quality of the final examples - it may sometimes be better, but is more likely to get slightly worse in some edge cases. If you see any examples where this happens in practice, please report them. .. _v3.11.4: ------------------- 3.11.4 - 2017-06-17 ------------------- This is a bugfix release: Hypothesis now prints explicit examples when running in verbose mode. (:issue:`313`) .. _v3.11.3: ------------------- 3.11.3 - 2017-06-11 ------------------- This is a bugfix release: Hypothesis no longer emits a warning if you try to use :func:`~hypothesis.strategies.sampled_from` with :class:`python:collections.OrderedDict`. (:issue:`688`) .. _v3.11.2: ------------------- 3.11.2 - 2017-06-10 ------------------- This is a documentation release. Several outdated snippets have been updated or removed, and many cross-references are now hyperlinks. .. _v3.11.1: ------------------- 3.11.1 - 2017-05-28 ------------------- This is a minor ergonomics release. Tracebacks shown by pytest no longer include Hypothesis internals for test functions decorated with :func:`@given `. .. _v3.11.0: ------------------- 3.11.0 - 2017-05-23 ------------------- This is a feature release, adding datetime-related strategies to the core strategies. :func:`~hypothesis.extra.pytz.timezones` allows you to sample pytz timezones from the Olsen database. Use directly in a recipe for tz-aware datetimes, or compose with :func:`~hypothesis.strategies.none` to allow a mix of aware and naive output. The new :func:`~hypothesis.strategies.dates`, :func:`~hypothesis.strategies.times`, :func:`~hypothesis.strategies.datetimes`, and :func:`~hypothesis.strategies.timedeltas` strategies are all constrained by objects of their type. This means that you can generate dates bounded by a single day (i.e. a single date), or datetimes constrained to the microsecond. :func:`~hypothesis.strategies.times` and :func:`~hypothesis.strategies.datetimes` take an optional ``timezones=`` argument, which defaults to :func:`~hypothesis.strategies.none` for naive times. You can use our extra strategy based on pytz, or roll your own timezones strategy with dateutil or even the standard library. The old ``dates``, ``times``, and ``datetimes`` strategies in ``hypothesis.extra.datetimes`` are deprecated in favor of the new core strategies, which are more flexible and have no dependencies. .. _v3.10.0: ------------------- 3.10.0 - 2017-05-22 ------------------- Hypothesis now uses :func:`python:inspect.getfullargspec` internally. On Python 2, there are no visible changes. On Python 3 :func:`@given ` and :func:`@composite ` now preserve :pep:`3107` annotations on the decorated function. Keyword-only arguments are now either handled correctly (e.g. :func:`@composite `), or caught in validation instead of silently discarded or raising an unrelated error later (e.g. :func:`@given `). .. _v3.9.1: ------------------ 3.9.1 - 2017-05-22 ------------------ This is a bugfix release: the default field mapping for a DateTimeField in the Django extra now respects the ``USE_TZ`` setting when choosing a strategy. .. _v3.9.0: ------------------ 3.9.0 - 2017-05-19 ------------------ This is feature release, expanding the capabilities of the :func:`~hypothesis.strategies.decimals` strategy. * The new (optional) ``places`` argument allows you to generate decimals with a certain number of places (e.g. cents, thousandths, satoshis). * If allow_infinity is None, setting min_bound no longer excludes positive infinity and setting max_value no longer excludes negative infinity. * All of ``NaN``, ``-Nan``, ``sNaN``, and ``-sNaN`` may now be drawn if allow_nan is True, or if allow_nan is None and min_value or max_value is None. * min_value and max_value may be given as decimal strings, e.g. ``"1.234"``. .. _v3.8.5: ------------------ 3.8.5 - 2017-05-16 ------------------ Hypothesis now imports :mod:`python:sqlite3` when a SQLite database is used, rather than at module load, improving compatibility with Python implementations compiled without SQLite support (such as BSD or Jython). .. _v3.8.4: ------------------ 3.8.4 - 2017-05-16 ------------------ This is a compatibility bugfix release. :func:`~hypothesis.strategies.sampled_from` no longer raises a deprecation warning when sampling from an :class:`python:enum.Enum`, as all enums have a reliable iteration order. .. _v3.8.3: ------------------ 3.8.3 - 2017-05-09 ------------------ This release removes a version check for older versions of :pypi:`pytest` when using the Hypothesis pytest plugin. The pytest plugin will now run unconditionally on all versions of pytest. This breaks compatibility with any version of pytest prior to 2.7.0 (which is more than two years old). The primary reason for this change is that the version check was a frequent source of breakage when pytest change their versioning scheme. If you are not working on pytest itself and are not running a very old version of it, this release probably doesn't affect you. .. _v3.8.2: ------------------ 3.8.2 - 2017-04-26 ------------------ This is a code reorganisation release that moves some internal test helpers out of the main source tree so as to not have changes to them trigger releases in future. .. _v3.8.1: ------------------ 3.8.1 - 2017-04-26 ------------------ This is a documentation release. Almost all code examples are now doctests checked in CI, eliminating stale examples. .. _v3.8.0: ------------------ 3.8.0 - 2017-04-23 ------------------ This is a feature release, adding the :func:`~hypothesis.strategies.iterables` strategy, equivalent to ``lists(...).map(iter)`` but with a much more useful repr. You can use this strategy to check that code doesn't accidentally depend on sequence properties such as indexing support or repeated iteration. .. _v3.7.4: ------------------ 3.7.4 - 2017-04-22 ------------------ This patch fixes a bug in :ref:`3.7.3 `, where using :func:`@example ` and a pytest fixture in the same test could cause the test to fail to fill the arguments, and throw a TypeError. .. _v3.7.3: ------------------ 3.7.3 - 2017-04-21 ------------------ This release should include no user visible changes and is purely a refactoring release. This modularises the behaviour of the core :func:`~hypothesis.given` function, breaking it up into smaller and more accessible parts, but its actual behaviour should remain unchanged. .. _v3.7.2: ------------------ 3.7.2 - 2017-04-21 ------------------ This reverts an undocumented change in :ref:`3.7.1 ` which broke installation on debian stable: The specifier for the hypothesis[django] extra\_requires had introduced a wild card, which was not supported on the default version of pip. .. _v3.7.1: ------------------ 3.7.1 - 2017-04-21 ------------------ This is a bug fix and internal improvements release. * In particular Hypothesis now tracks a tree of where it has already explored. This allows it to avoid some classes of duplicate examples, and significantly improves the performance of shrinking failing examples by allowing it to skip some shrinks that it can determine can't possibly work. * Hypothesis will no longer seed the global random arbitrarily unless you have asked it to using :py:meth:`~hypothesis.strategies.random_module` * Shrinking would previously have not worked correctly in some special cases on Python 2, and would have resulted in suboptimal examples. .. _v3.7.0: ------------------ 3.7.0 - 2017-03-20 ------------------ This is a feature release. New features: * Rule based stateful testing now has an :func:`@invariant ` decorator that specifies methods that are run after init and after every step, allowing you to encode properties that should be true at all times. Thanks to Tom Prince for this feature. * The :func:`~hypothesis.strategies.decimals` strategy now supports ``allow_nan`` and ``allow_infinity`` flags. * There are :ref:`significantly more strategies available for numpy `, including for generating arbitrary data types. Thanks to Zac Hatfield Dodds for this feature. * When using the :func:`~hypothesis.strategies.data` strategy you can now add a label as an argument to ``draw()``, which will be printed along with the value when an example fails. Thanks to Peter Inglesby for this feature. Bug fixes: * Bug fix: :func:`~hypothesis.strategies.composite` now preserves functions' docstrings. * The build is now reproducible and doesn't depend on the path you build it from. Thanks to Chris Lamb for this feature. * numpy strategies for the void data type did not work correctly. Thanks to Zac Hatfield Dodds for this fix. There have also been a number of performance optimizations: * The :func:`~hypothesis.strategies.permutations` strategy is now significantly faster to use for large lists (the underlying algorithm has gone from O(n^2) to O(n)). * Shrinking of failing test cases should have got significantly faster in some circumstances where it was previously struggling for a long time. * Example generation now involves less indirection, which results in a small speedup in some cases (small enough that you won't really notice it except in pathological cases). .. _v3.6.1: ------------------ 3.6.1 - 2016-12-20 ------------------ This release fixes a dependency problem and makes some small behind the scenes improvements. * The fake-factory dependency was renamed to faker. If you were depending on it through hypothesis[django] or hypothesis[fake-factory] without pinning it yourself then it would have failed to install properly. This release changes it so that hypothesis[fakefactory] (which can now also be installed as hypothesis[faker]) will install the renamed faker package instead. * This release also removed the dependency of hypothesis[django] on hypothesis[fakefactory] - it was only being used for emails. These now use a custom strategy that isn't from fakefactory. As a result you should also see performance improvements of tests which generated User objects or other things with email fields, as well as better shrinking of email addresses. * The distribution of code using nested calls to :func:`~hypothesis.strategies.one_of` or the ``|`` operator for combining strategies has been improved, as branches are now flattened to give a more uniform distribution. * Examples using :func:`~hypothesis.strategies.composite` or ``.flatmap`` should now shrink better. In particular this will affect things which work by first generating a length and then generating that many items, which have historically not shrunk very well. .. _v3.6.0: ------------------ 3.6.0 - 2016-10-31 ------------------ This release reverts Hypothesis to its old pretty printing of lambda functions based on attempting to extract the source code rather than decompile the bytecode. This is unfortunately slightly inferior in some cases and may result in you occasionally seeing things like ``lambda x: `` in statistics reports and strategy reprs. This removes the dependencies on uncompyle6, xdis and spark-parser. The reason for this is that the new functionality was based on uncompyle6, which turns out to introduce a hidden GPLed dependency - it in turn depended on xdis, and although the library was licensed under the MIT license, it contained some GPL licensed source code and thus should have been released under the GPL. My interpretation is that Hypothesis itself was never in violation of the GPL (because the license it is under, the Mozilla Public License v2, is fully compatible with being included in a GPL licensed work), but I have not consulted a lawyer on the subject. Regardless of the answer to this question, adding a GPLed dependency will likely cause a lot of users of Hypothesis to inadvertently be in violation of the GPL. As a result, if you are running Hypothesis 3.5.x you really should upgrade to this release immediately. .. _v3.5.3: ------------------ 3.5.3 - 2016-10-05 ------------------ This is a bug fix release. Bugs fixed: * If the same test was running concurrently in two processes and there were examples already in the test database which no longer failed, Hypothesis would sometimes fail with a FileNotFoundError (IOError on Python 2) because an example it was trying to read was deleted before it was read. (:issue:`372`). * Drawing from an :func:`~hypothesis.strategies.integers` strategy with both a min_value and a max_value would reject too many examples needlessly. Now it repeatedly redraws until satisfied. (:pull:`366`. Thanks to Calen Pennington for the contribution). .. _v3.5.2: ------------------ 3.5.2 - 2016-09-24 ------------------ This is a bug fix release. * The Hypothesis pytest plugin broke pytest support for doctests. Now it doesn't. .. _v3.5.1: ------------------ 3.5.1 - 2016-09-23 ------------------ This is a bug fix release. * Hypothesis now runs cleanly in -B and -BB modes, avoiding mixing bytes and unicode. * :class:`python:unittest.TestCase` tests would not have shown up in the new statistics mode. Now they do. * Similarly, stateful tests would not have shown up in statistics and now they do. * Statistics now print with pytest node IDs (the names you'd get in pytest verbose mode). .. _v3.5.0: ------------------ 3.5.0 - 2016-09-22 ------------------ This is a feature release. * :func:`~hypothesis.strategies.fractions` and :func:`~hypothesis.strategies.decimals` strategies now support min_value and max_value parameters. Thanks go to Anne Mulhern for the development of this feature. * The Hypothesis pytest plugin now supports a ``--hypothesis-show-statistics`` parameter that gives detailed statistics about the tests that were run. Huge thanks to Jean-Louis Fuchs and Adfinis-SyGroup for funding the development of this feature. * There is a new :func:`~hypothesis.event` function that can be used to add custom statistics. Additionally there have been some minor bug fixes: * In some cases Hypothesis should produce fewer duplicate examples (this will mostly only affect cases with a single parameter). * :pypi:`pytest` command line parameters are now under an option group for Hypothesis (thanks to David Keijser for fixing this) * Hypothesis would previously error if you used :pep:`3107` function annotations on your tests under Python 3.4. * The repr of many strategies using lambdas has been improved to include the lambda body (this was previously supported in many but not all cases). .. _v3.4.2: ------------------ 3.4.2 - 2016-07-13 ------------------ This is a bug fix release, fixing a number of problems with the settings system: * Test functions defined using :func:`@given ` can now be called from other threads (:issue:`337`) * Attempting to delete a settings property would previously have silently done the wrong thing. Now it raises an AttributeError. * Creating a settings object with a custom database_file parameter was silently getting ignored and the default was being used instead. Now it's not. .. _v3.4.1: ------------------ 3.4.1 - 2016-07-07 ------------------ This is a bug fix release for a single bug: * On Windows when running two Hypothesis processes in parallel (e.g. using :pypi:`pytest-xdist`) they could race with each other and one would raise an exception due to the non-atomic nature of file renaming on Windows and the fact that you can't rename over an existing file. This is now fixed. .. _v3.4.0: ------------------ 3.4.0 - 2016-05-27 ------------------ This release is entirely provided by `Lucas Wiman `_: Strategies constructed by the Django extra will now respect much more of Django's validations out of the box. Wherever possible, :meth:`~django:django.db.models.Model.full_clean` should succeed. In particular: * The max_length, blank and choices kwargs are now respected. * Add support for DecimalField. * If a field includes validators, the list of validators are used to filter the field strategy. .. _v3.3.0: ------------------ 3.3.0 - 2016-05-27 ------------------ This release went wrong and is functionally equivalent to :ref:`3.2.0 `. Ignore it. .. _v3.2.0: ------------------ 3.2.0 - 2016-05-19 ------------------ This is a small single-feature release: * All tests using :func:`@given ` now fix the global random seed. This removes the health check for that. If a non-zero seed is required for the final falsifying example, it will be reported. Otherwise Hypothesis will assume randomization was not a significant factor for the test and be silent on the subject. If you use :func:`~hypothesis.strategies.random_module` this will continue to work and will always display the seed. .. _v3.1.3: ------------------ 3.1.3 - 2016-05-01 ------------------ Single bug fix release * Another charmap problem. In :ref:`3.1.2 ` :func:`~hypothesis.strategies.text` and :func:`~hypothesis.strategies.characters` would break on systems which had ``/tmp`` mounted on a different partition than the Hypothesis storage directory (usually in home). This fixes that. .. _v3.1.2: ------------------ 3.1.2 - 2016-04-30 ------------------ Single bug fix release: * Anything which used a :func:`~hypothesis.strategies.text` or :func:`~hypothesis.strategies.characters` strategy was broken on Windows and I hadn't updated appveyor to use the new repository location so I didn't notice. This is now fixed and windows support should work correctly. .. _v3.1.1: ------------------ 3.1.1 - 2016-04-29 ------------------ Minor bug fix release. * Fix concurrency issue when running tests that use :func:`~hypothesis.strategies.text` from multiple processes at once (:issue:`302`, thanks to Alex Chan). * Improve performance of code using :func:`~hypothesis.strategies.lists` with max_size (thanks to Cristi Cobzarenco). * Fix install on Python 2 with ancient versions of pip so that it installs the :pypi:`enum34` backport (thanks to Donald Stufft for telling me how to do this). * Remove duplicated __all__ exports from hypothesis.strategies (thanks to Piët Delport). * Update headers to point to new repository location. * Allow use of strategies that can't be used in ``find()`` (e.g. ``choices()``) in stateful testing. .. _v3.1.0: ------------------ 3.1.0 - 2016-03-06 ------------------ * Add a :func:`~hypothesis.strategies.nothing` strategy that never successfully generates values. * :func:`~hypothesis.strategies.sampled_from` and :func:`~hypothesis.strategies.one_of` can both now be called with an empty argument list, in which case they also never generate any values. * :func:`~hypothesis.strategies.one_of` may now be called with a single argument that is a collection of strategies as well as as varargs. * Add a :func:`~hypothesis.strategies.runner` strategy which returns the instance of the current test object if there is one. * 'Bundle' for RuleBasedStateMachine is now a normal(ish) strategy and can be used as such. * Tests using RuleBasedStateMachine should now shrink significantly better. * Hypothesis now uses a pretty-printing library internally, compatible with IPython's pretty printing protocol (actually using the same code). This may improve the quality of output in some cases. * Add a 'phases' setting that allows more fine grained control over which parts of the process Hypothesis runs * Add a suppress_health_check setting which allows you to turn off specific health checks in a fine grained manner. * Fix a bug where lists of non fixed size would always draw one more element than they included. This mostly didn't matter, but if would cause problems with empty strategies or ones with side effects. * Add a mechanism to the Django model generator to allow you to explicitly request the default value (thanks to Jeremy Thurgood for this one). .. _v3.0.5: ------------------ 3.0.5 - 2016-02-25 ------------------ * Fix a bug where Hypothesis would now error on :pypi:`pytest` development versions. .. _v3.0.4: ------------------ 3.0.4 - 2016-02-24 ------------------ * Fix a bug where Hypothesis would error when running on Python 2.7.3 or earlier because it was trying to pass a :class:`python:bytearray` object to :func:`python:struct.unpack` (which is only supported since 2.7.4). .. _v3.0.3: ------------------ 3.0.3 - 2016-02-23 ------------------ * Fix version parsing of pytest to work with pytest release candidates * More general handling of the health check problem where things could fail because of a cache miss - now one "free" example is generated before the start of the health check run. .. _v3.0.2: ------------------ 3.0.2 - 2016-02-18 ------------------ * Under certain circumstances, strategies involving :func:`~hypothesis.strategies.text` buried inside some other strategy (e.g. ``text().filter(...)`` or ``recursive(text(), ...))`` would cause a test to fail its health checks the first time it ran. This was caused by having to compute some related data and cache it to disk. On travis or anywhere else where the ``.hypothesis`` directory was recreated this would have caused the tests to fail their health check on every run. This is now fixed for all the known cases, although there could be others lurking. .. _v3.0.1: ------------------ 3.0.1 - 2016-02-18 ------------------ * Fix a case where it was possible to trigger an "Unreachable" assertion when running certain flaky stateful tests. * Improve shrinking of large stateful tests by eliminating a case where it was hard to delete early steps. * Improve efficiency of drawing :func:`binary(min_size=n, max_size=n) ` significantly by provide a custom implementation for fixed size blocks that can bypass a lot of machinery. * Set default home directory based on the current working directory at the point Hypothesis is imported, not whenever the function first happens to be called. .. _v3.0.0: ------------------ 3.0.0 - 2016-02-17 ------------------ Codename: This really should have been 2.1. Externally this looks like a very small release. It has one small breaking change that probably doesn't affect anyone at all (some behaviour that never really worked correctly is now outright forbidden) but necessitated a major version bump and one visible new feature. Internally this is a complete rewrite. Almost nothing other than the public API is the same. New features: * Addition of :func:`~hypothesis.strategies.data` strategy which allows you to draw arbitrary data interactively within the test. * New "exploded" database format which allows you to more easily check the example database into a source repository while supporting merging. * Better management of how examples are saved in the database. * Health checks will now raise as errors when they fail. It was too easy to have the warnings be swallowed entirely. New limitations: * ``choices()`` and ``streaming()`` strategies may no longer be used with ``find()``. Neither may :func:`~hypothesis.strategies.data` (this is the change that necessitated a major version bump). Feature removal: * The ForkingTestCase executor has gone away. It may return in some more working form at a later date. Performance improvements: * A new model which allows flatmap, composite strategies and stateful testing to perform *much* better. They should also be more reliable. * Filtering may in some circumstances have improved significantly. This will help especially in cases where you have lots of values with individual filters on them, such as lists(x.filter(...)). * Modest performance improvements to the general test runner by avoiding expensive operations In general your tests should have got faster. If they've instead got significantly slower, I'm interested in hearing about it. Data distribution: The data distribution should have changed significantly. This may uncover bugs the previous version missed. It may also miss bugs the previous version could have uncovered. Hypothesis is now producing less strongly correlated data than it used to, but the correlations are extended over more of the structure. Shrinking: Shrinking quality should have improved. In particular Hypothesis can now perform simultaneous shrinking of separate examples within a single test (previously it was only able to do this for elements of a single collection). In some cases performance will have improved, in some cases it will have got worse but generally shouldn't have by much. .. _v2.0.0: ------------------ 2.0.0 - 2016-01-10 ------------------ Codename: A new beginning This release cleans up all of the legacy that accrued in the course of Hypothesis 1.0. These are mostly things that were emitting deprecation warnings in 1.19.0, but there were a few additional changes. In particular: * non-strategy values will no longer be converted to strategies when used in given or find. * FailedHealthCheck is now an error and not a warning. * Handling of non-ascii reprs in user types have been simplified by using raw strings in more places in Python 2. * given no longer allows mixing positional and keyword arguments. * given no longer works with functions with defaults. * given no longer turns provided arguments into defaults - they will not appear in the argspec at all. * the basic() strategy no longer exists. * the n_ary_tree strategy no longer exists. * the average_list_length setting no longer exists. Note: If you're using using recursive() this will cause you a significant slow down. You should pass explicit average_size parameters to collections in recursive calls. * @rule can no longer be applied to the same method twice. * Python 2.6 and 3.3 are no longer officially supported, although in practice they still work fine. This also includes two non-deprecation changes: * given's keyword arguments no longer have to be the rightmost arguments and can appear anywhere in the method signature. * The max_shrinks setting would sometimes not have been respected. .. _v1.19.0: ------------------- 1.19.0 - 2016-01-09 ------------------- Codename: IT COMES This release heralds the beginning of a new and terrible age of Hypothesis 2.0. It's primary purpose is some final deprecations prior to said release. The goal is that if your code emits no warnings under this release then it will probably run unchanged under Hypothesis 2.0 (there are some caveats to this: 2.0 will drop support for some Python versions, and if you're using internal APIs then as usual that may break without warning). It does have two new features: * New @seed() decorator which allows you to manually seed a test. This may be harmlessly combined with and overrides the derandomize setting. * settings objects may now be used as a decorator to fix those settings to a particular @given test. API changes (old usage still works but is deprecated): * Settings has been renamed to settings (lower casing) in order to make the decorator usage more natural. * Functions for the storage directory that were in hypothesis.settings are now in a new hypothesis.configuration module. Additional deprecations: * the average_list_length setting has been deprecated in favour of being explicit. * the basic() strategy has been deprecated as it is impossible to support it under a Conjecture based model, which will hopefully be implemented at some point in the 2.x series. * the n_ary_tree strategy (which was never actually part of the public API) has been deprecated. * Passing settings or random as keyword arguments to given is deprecated (use the new functionality instead) Bug fixes: * No longer emit PendingDeprecationWarning for __iter__ and StopIteration in streaming() values. * When running in health check mode with non strict, don't print quite so many errors for an exception in reify. * When an assumption made in a test or a filter is flaky, tests will now raise Flaky instead of UnsatisfiedAssumption. .. _v1.18.1: ------------------- 1.18.1 - 2015-12-22 ------------------- Two behind the scenes changes: * Hypothesis will no longer write generated code to the file system. This will improve performance on some systems (e.g. if you're using `PythonAnywhere `_ which is running your code from NFS) and prevent some annoying interactions with auto-restarting systems. * Hypothesis will cache the creation of some strategies. This can significantly improve performance for code that uses flatmap or composite and thus has to instantiate strategies a lot. .. _v1.18.0: ------------------- 1.18.0 - 2015-12-21 ------------------- Features: * Tests and find are now explicitly seeded off the global random module. This means that if you nest one inside the other you will now get a health check error. It also means that you can control global randomization by seeding random. * There is a new random_module() strategy which seeds the global random module for you and handles things so that you don't get a health check warning if you use it inside your tests. * floats() now accepts two new arguments: allow\_nan and allow\_infinity. These default to the old behaviour, but when set to False will do what the names suggest. Bug fixes: * Fix a bug where tests that used text() on Python 3.4+ would not actually be deterministic even when explicitly seeded or using the derandomize mode, because generation depended on dictionary iteration order which was affected by hash randomization. * Fix a bug where with complicated strategies the timing of the initial health check could affect the seeding of the subsequent test, which would also render supposedly deterministic tests non-deterministic in some scenarios. * In some circumstances flatmap() could get confused by two structurally similar things it could generate and would produce a flaky test where the first time it produced an error but the second time it produced the other value, which was not an error. The same bug was presumably also possible in composite(). * flatmap() and composite() initial generation should now be moderately faster. This will be particularly noticeable when you have many values drawn from the same strategy in a single run, e.g. constructs like lists(s.flatmap(f)). Shrinking performance *may* have suffered, but this didn't actually produce an interestingly worse result in any of the standard scenarios tested. .. _v1.17.1: ------------------- 1.17.1 - 2015-12-16 ------------------- A small bug fix release, which fixes the fact that the 'note' function could not be used on tests which used the @example decorator to provide explicit examples. .. _v1.17.0: ------------------- 1.17.0 - 2015-12-15 ------------------- This is actually the same release as 1.16.1, but 1.16.1 has been pulled because it contains the following additional change that was not intended to be in a patch release (it's perfectly stable, but is a larger change that should have required a minor version bump): * Hypothesis will now perform a series of "health checks" as part of running your tests. These detect and warn about some common error conditions that people often run into which wouldn't necessarily have caused the test to fail but would cause e.g. degraded performance or confusing results. .. _v1.16.1: ------------------- 1.16.1 - 2015-12-14 ------------------- Note: This release has been removed. A small bugfix release that allows bdists for Hypothesis to be built under 2.7 - the compat3.py file which had Python 3 syntax wasn't intended to be loaded under Python 2, but when building a bdist it was. In particular this would break running setup.py test. .. _v1.16.0: ------------------- 1.16.0 - 2015-12-08 ------------------- There are no public API changes in this release but it includes a behaviour change that I wasn't comfortable putting in a patch release. * Functions from hypothesis.strategies will no longer raise InvalidArgument on bad arguments. Instead the same errors will be raised when a test using such a strategy is run. This may improve startup time in some cases, but the main reason for it is so that errors in strategies won't cause errors in loading, and it can interact correctly with things like pytest.mark.skipif. * Errors caused by accidentally invoking the legacy API are now much less confusing, although still throw NotImplementedError. * hypothesis.extra.django is 1.9 compatible. * When tests are run with max_shrinks=0 this will now still rerun the test on failure and will no longer print "Trying example:" before each run. Additionally note() will now work correctly when used with max_shrinks=0. .. _v1.15.0: ------------------- 1.15.0 - 2015-11-24 ------------------- A release with two new features. * A 'characters' strategy for more flexible generation of text with particular character ranges and types, kindly contributed by `Alexander Shorin `_. * Add support for preconditions to the rule based stateful testing. Kindly contributed by `Christopher Armstrong `_ .. _v1.14.0: ------------------- 1.14.0 - 2015-11-01 ------------------- New features: * Add 'note' function which lets you include additional information in the final test run's output. * Add 'choices' strategy which gives you a choice function that emulates random.choice. * Add 'uuid' strategy that generates UUIDs' * Add 'shared' strategy that lets you create a strategy that just generates a single shared value for each test run Bugs: * Using strategies of the form streaming(x.flatmap(f)) with find or in stateful testing would have caused InvalidArgument errors when the resulting values were used (because code that expected to only be called within a test context would be invoked). .. _v1.13.0: ------------------- 1.13.0 - 2015-10-29 ------------------- This is quite a small release, but deprecates some public API functions and removes some internal API functionality so gets a minor version bump. * All calls to the 'strategy' function are now deprecated, even ones which pass just a SearchStrategy instance (which is still a no-op). * Never documented hypothesis.extra entry_points mechanism has now been removed ( it was previously how hypothesis.extra packages were loaded and has been deprecated and unused for some time) * Some corner cases that could previously have produced an OverflowError when simplifying failing cases using hypothesis.extra.datetimes (or dates or times) have now been fixed. * Hypothesis load time for first import has been significantly reduced - it used to be around 250ms (on my SSD laptop) and now is around 100-150ms. This almost never matters but was slightly annoying when using it in the console. * hypothesis.strategies.randoms was previously missing from \_\_all\_\_. .. _v1.12.0: ------------------- 1.12.0 - 2015-10-18 ------------------- * Significantly improved performance of creating strategies using the functions from the hypothesis.strategies module by deferring the calculation of their repr until it was needed. This is unlikely to have been an performance issue for you unless you were using flatmap, composite or stateful testing, but for some cases it could be quite a significant impact. * A number of cases where the repr of strategies build from lambdas is improved * Add dates() and times() strategies to hypothesis.extra.datetimes * Add new 'profiles' mechanism to the settings system * Deprecates mutability of Settings, both the Settings.default top level property and individual settings. * A Settings object may now be directly initialized from a parent Settings. * @given should now give a better error message if you attempt to use it with a function that uses destructuring arguments (it still won't work, but it will error more clearly), * A number of spelling corrections in error messages * :pypi:`pytest` should no longer display the intermediate modules Hypothesis generates when running in verbose mode * Hypothesis should now correctly handle printing objects with non-ascii reprs on python 3 when running in a locale that cannot handle ascii printing to stdout. * Add a unique=True argument to lists(). This is equivalent to unique_by=lambda x: x, but offers a more convenient syntax. .. _v1.11.4: ------------------- 1.11.4 - 2015-09-27 ------------------- * Hide modifications Hypothesis needs to make to sys.path by undoing them after we've imported the relevant modules. This is a workaround for issues cryptography experienced on windows. * Slightly improved performance of drawing from sampled_from on large lists of alternatives. * Significantly improved performance of drawing from one_of or strategies using \| (note this includes a lot of strategies internally - floats() and integers() both fall into this category). There turned out to be a massive performance regression introduced in 1.10.0 affecting these which probably would have made tests using Hypothesis significantly slower than they should have been. .. _v1.11.3: ------------------- 1.11.3 - 2015-09-23 ------------------- * Better argument validation for datetimes() strategy - previously setting max_year < datetime.MIN_YEAR or min_year > datetime.MAX_YEAR would not have raised an InvalidArgument error and instead would have behaved confusingly. * Compatibility with being run on pytest < 2.7 (achieved by disabling the plugin). .. _v1.11.2: ------------------- 1.11.2 - 2015-09-23 ------------------- Bug fixes: * Settings(database=my_db) would not be correctly inherited when used as a default setting, so that newly created settings would use the database_file setting and create an SQLite example database. * Settings.default.database = my_db would previously have raised an error and now works. * Timeout could sometimes be significantly exceeded if during simplification there were a lot of examples tried that didn't trigger the bug. * When loading a heavily simplified example using a basic() strategy from the database this could cause Python to trigger a recursion error. * Remove use of deprecated API in pytest plugin so as to not emit warning Misc: * hypothesis-pytest is now part of hypothesis core. This should have no externally visible consequences, but you should update your dependencies to remove hypothesis-pytest and depend on only Hypothesis. * Better repr for hypothesis.extra.datetimes() strategies. * Add .close() method to abstract base class for Backend (it was already present in the main implementation). .. _v1.11.1: ------------------- 1.11.1 - 2015-09-16 ------------------- Bug fixes: * When running Hypothesis tests in parallel (e.g. using pytest-xdist) there was a race condition caused by code generation. * Example databases are now cached per thread so as to not use sqlite connections from multiple threads. This should make Hypothesis now entirely thread safe. * floats() with only min_value or max_value set would have had a very bad distribution. * Running on 3.5, Hypothesis would have emitted deprecation warnings because of use of inspect.getargspec .. _v1.11.0: ------------------- 1.11.0 - 2015-08-31 ------------------- * text() with a non-string alphabet would have used the repr() of the the alphabet instead of its contexts. This is obviously silly. It now works with any sequence of things convertible to unicode strings. * @given will now work on methods whose definitions contains no explicit positional arguments, only varargs (:issue:`118`). This may have some knock on effects because it means that @given no longer changes the argspec of functions other than by adding defaults. * Introduction of new @composite feature for more natural definition of strategies you'd previously have used flatmap for. .. _v1.10.6: ------------------- 1.10.6 - 2015-08-26 ------------------- Fix support for fixtures on Django 1.7. .. _v1.10.4: ------------------- 1.10.4 - 2015-08-21 ------------------- Tiny bug fix release: * If the database_file setting is set to None, this would have resulted in an error when running tests. Now it does the same as setting database to None. .. _v1.10.3: ------------------- 1.10.3 - 2015-08-19 ------------------- Another small bug fix release. * lists(elements, unique_by=some_function, min_size=n) would have raised a ValidationError if n > Settings.default.average_list_length because it would have wanted to use an average list length shorter than the minimum size of the list, which is impossible. Now it instead defaults to twice the minimum size in these circumstances. * basic() strategy would have only ever produced at most ten distinct values per run of the test (which is bad if you e.g. have it inside a list). This was obviously silly. It will now produce a much better distribution of data, both duplicated and non duplicated. .. _v1.10.2: ------------------- 1.10.2 - 2015-08-19 ------------------- This is a small bug fix release: * star imports from hypothesis should now work correctly. * example quality for examples using flatmap will be better, as the way it had previously been implemented was causing problems where Hypothesis was erroneously labelling some examples as being duplicates. .. _v1.10.0: ------------------- 1.10.0 - 2015-08-04 ------------------- This is just a bugfix and performance release, but it changes some semi-public APIs, hence the minor version bump. * Significant performance improvements for strategies which are one\_of() many branches. In particular this included recursive() strategies. This should take the case where you use one recursive() strategy as the base strategy of another from unusably slow (tens of seconds per generated example) to reasonably fast. * Better handling of just() and sampled_from() for values which have an incorrect \_\_repr\_\_ implementation that returns non-ASCII unicode on Python 2. * Better performance for flatmap from changing the internal morpher API to be significantly less general purpose. * Introduce a new semi-public BuildContext/cleanup API. This allows strategies to register cleanup activities that should run once the example is complete. Note that this will interact somewhat weirdly with find. * Better simplification behaviour for streaming strategies. * Don't error on lambdas which use destructuring arguments in Python 2. * Add some better reprs for a few strategies that were missing good ones. * The Random instances provided by randoms() are now copyable. * Slightly more debugging information about simplify when using a debug verbosity level. * Support using given for functions with varargs, but not passing arguments to it as positional. .. _v1.9.0: ------------------ 1.9.0 - 2015-07-27 ------------------ Codename: The great bundling. This release contains two fairly major changes. The first is the deprecation of the hypothesis-extra mechanism. From now on all the packages that were previously bundled under it other than hypothesis-pytest (which is a different beast and will remain separate). The functionality remains unchanged and you can still import them from exactly the same location, they just are no longer separate packages. The second is that this introduces a new way of building strategies which lets you build up strategies recursively from other strategies. It also contains the minor change that calling .example() on a strategy object will give you examples that are more representative of the actual data you'll get. There used to be some logic in there to make the examples artificially simple but this proved to be a bad idea. .. _v1.8.5: ------------------ 1.8.5 - 2015-07-24 ------------------ This contains no functionality changes but fixes a mistake made with building the previous package that would have broken installation on Windows. .. _v1.8.4: ------------------ 1.8.4 - 2015-07-20 ------------------ Bugs fixed: * When a call to floats() had endpoints which were not floats but merely convertible to one (e.g. integers), these would be included in the generated data which would cause it to generate non-floats. * Splitting lambdas used in the definition of flatmap, map or filter over multiple lines would break the repr, which would in turn break their usage. .. _v1.8.3: ------------------ 1.8.3 - 2015-07-20 ------------------ "Falsifying example" would not have been printed when the failure came from an explicit example. .. _v1.8.2: ------------------ 1.8.2 - 2015-07-18 ------------------ Another small bugfix release: * When using ForkingTestCase you would usually not get the falsifying example printed if the process exited abnormally (e.g. due to os._exit). * Improvements to the distribution of characters when using text() with a default alphabet. In particular produces a better distribution of ascii and whitespace in the alphabet. .. _v1.8.1: ------------------ 1.8.1 - 2015-07-17 ------------------ This is a small release that contains a workaround for people who have bad reprs returning non ascii text on Python 2.7. This is not a bug fix for Hypothesis per se because that's not a thing that is actually supposed to work, but Hypothesis leans more heavily on repr than is typical so it's worth having a workaround for. .. _v1.8.0: ------------------ 1.8.0 - 2015-07-16 ------------------ New features: * Much more sensible reprs for strategies, especially ones that come from hypothesis.strategies. These should now have as reprs python code that would produce the same strategy. * lists() accepts a unique_by argument which forces the generated lists to be only contain elements unique according to some function key (which must return a hashable value). * Better error messages from flaky tests to help you debug things. Mostly invisible implementation details that may result in finding new bugs in your code: * Sets and dictionary generation should now produce a better range of results. * floats with bounds now focus more on 'critical values', trying to produce values at edge cases. * flatmap should now have better simplification for complicated cases, as well as generally being (I hope) more reliable. Bug fixes: * You could not previously use assume() if you were using the forking executor. .. _v1.7.2: ------------------ 1.7.2 - 2015-07-10 ------------------ This is purely a bug fix release: * When using floats() with stale data in the database you could sometimes get values in your tests that did not respect min_value or max_value. * When getting a Flaky error from an unreliable test it would have incorrectly displayed the example that caused it. * 2.6 dependency on backports was incorrectly specified. This would only have caused you problems if you were building a universal wheel from Hypothesis, which is not how Hypothesis ships, so unless you're explicitly building wheels for your dependencies and support Python 2.6 plus a later version of Python this probably would never have affected you. * If you use flatmap in a way that the strategy on the right hand side depends sensitively on the left hand side you may have occasionally seen Flaky errors caused by producing unreliable examples when minimizing a bug. This use case may still be somewhat fraught to be honest. This code is due a major rearchitecture for 1.8, but in the meantime this release fixes the only source of this error that I'm aware of. .. _v1.7.1: ------------------ 1.7.1 - 2015-06-29 ------------------ Codename: There is no 1.7.0. A slight technical hitch with a premature upload means there's was a yanked 1.7.0 release. Oops. The major feature of this release is Python 2.6 support. Thanks to Jeff Meadows for doing most of the work there. Other minor features * strategies now has a permutations() function which returns a strategy yielding permutations of values from a given collection. * if you have a flaky test it will print the exception that it last saw before failing with Flaky, even if you do not have verbose reporting on. * Slightly experimental git merge script available as "python -m hypothesis.tools.mergedbs". Instructions on how to use it in the docstring of that file. Bug fixes: * Better performance from use of filter. In particular tests which involve large numbers of heavily filtered strategies should perform a lot better. * floats() with a negative min_value would not have worked correctly (worryingly, it would have just silently failed to run any examples). This is now fixed. * tests using sampled\_from would error if the number of sampled elements was smaller than min\_satisfying\_examples. .. _v1.6.2: ------------------ 1.6.2 - 2015-06-08 ------------------ This is just a few small bug fixes: * Size bounds were not validated for values for a binary() strategy when reading examples from the database. * sampled\_from is now in __all__ in hypothesis.strategies * floats no longer consider negative integers to be simpler than positive non-integers * Small floating point intervals now correctly count members, so if you have a floating point interval so narrow there are only a handful of values in it, this will no longer cause an error when Hypothesis runs out of values. .. _v1.6.1: ------------------ 1.6.1 - 2015-05-21 ------------------ This is a small patch release that fixes a bug where 1.6.0 broke the use of flatmap with the deprecated API and assumed the passed in function returned a SearchStrategy instance rather than converting it to a strategy. .. _v1.6.0: ------------------ 1.6.0 - 2015-05-21 ------------------ This is a smallish release designed to fix a number of bugs and smooth out some weird behaviours. * Fix a critical bug in flatmap where it would reuse old strategies. If all your flatmap code was pure you're fine. If it's not, I'm surprised it's working at all. In particular if you want to use flatmap with django models, you desperately need to upgrade to this version. * flatmap simplification performance should now be better in some cases where it previously had to redo work. * Fix for a bug where invalid unicode data with surrogates could be generated during simplification (it was already filtered out during actual generation). * The Hypothesis database is now keyed off the name of the test instead of the type of data. This makes much more sense now with the new strategies API and is generally more robust. This means you will lose old examples on upgrade. * The database will now not delete values which fail to deserialize correctly, just skip them. This is to handle cases where multiple incompatible strategies share the same key. * find now also saves and loads values from the database, keyed off a hash of the function you're finding from. * Stateful tests now serialize and load values from the database. They should have before, really. This was a bug. * Passing a different verbosity level into a test would not have worked entirely correctly, leaving off some messages. This is now fixed. * Fix a bug where derandomized tests with unicode characters in the function body would error on Python 2.7. .. _v1.5.0: ------------------ 1.5.0 - 2015-05-14 ------------------ Codename: Strategic withdrawal. The purpose of this release is a radical simplification of the API for building strategies. Instead of the old approach of @strategy.extend and things that get converted to strategies, you just build strategies directly. The old method of defining strategies will still work until Hypothesis 2.0, because it's a major breaking change, but will now emit deprecation warnings. The new API is also a lot more powerful as the functions for defining strategies give you a lot of dials to turn. See :doc:`the updated data section ` for details. Other changes: * Mixing keyword and positional arguments in a call to @given is deprecated as well. * There is a new setting called 'strict'. When set to True, Hypothesis will raise warnings instead of merely printing them. Turning it on by default is inadvisable because it means that Hypothesis minor releases can break your code, but it may be useful for making sure you catch all uses of deprecated APIs. * max_examples in settings is now interpreted as meaning the maximum number of unique (ish) examples satisfying assumptions. A new setting max_iterations which defaults to a larger value has the old interpretation. * Example generation should be significantly faster due to a new faster parameter selection algorithm. This will mostly show up for simple data types - for complex ones the parameter selection is almost certainly dominated. * Simplification has some new heuristics that will tend to cut down on cases where it could previously take a very long time. * timeout would previously not have been respected in cases where there were a lot of duplicate examples. You probably wouldn't have previously noticed this because max_examples counted duplicates, so this was very hard to hit in a way that mattered. * A number of internal simplifications to the SearchStrategy API. * You can now access the current Hypothesis version as hypothesis.__version__. * A top level function is provided for running the stateful tests without the TestCase infrastructure. .. _v1.4.0: ------------------ 1.4.0 - 2015-05-04 ------------------ Codename: What a state. The *big* feature of this release is the new and slightly experimental stateful testing API. You can read more about that in :doc:`the appropriate section `. Two minor features the were driven out in the course of developing this: * You can now set settings.max_shrinks to limit the number of times Hypothesis will try to shrink arguments to your test. If this is set to <= 0 then Hypothesis will not rerun your test and will just raise the failure directly. Note that due to technical limitations if max_shrinks is <= 0 then Hypothesis will print *every* example it calls your test with rather than just the failing one. Note also that I don't consider settings max_shrinks to zero a sensible way to run your tests and it should really be considered a debug feature. * There is a new debug level of verbosity which is even *more* verbose than verbose. You probably don't want this. Breakage of semi-public SearchStrategy API: * It is now a required invariant of SearchStrategy that if u simplifies to v then it is not the case that strictly_simpler(u, v). i.e. simplifying should not *increase* the complexity even though it is not required to decrease it. Enforcing this invariant lead to finding some bugs where simplifying of integers, floats and sets was suboptimal. * Integers in basic data are now required to fit into 64 bits. As a result python integer types are now serialized as strings, and some types have stopped using quite so needlessly large random seeds. Hypothesis Stateful testing was then turned upon Hypothesis itself, which lead to an amazing number of minor bugs being found in Hypothesis itself. Bugs fixed (most but not all from the result of stateful testing) include: * Serialization of streaming examples was flaky in a way that you would probably never notice: If you generate a template, simplify it, serialize it, deserialize it, serialize it again and then deserialize it you would get the original stream instead of the simplified one. * If you reduced max_examples below the number of examples already saved in the database, you would have got a ValueError. Additionally, if you had more than max_examples in the database all of them would have been considered. * @given will no longer count duplicate examples (which it never called your function with) towards max_examples. This may result in your tests running slower, but that's probably just because they're trying more examples. * General improvements to example search which should result in better performance and higher quality examples. In particular parameters which have a history of producing useless results will be more aggressively culled. This is useful both because it decreases the chance of useless examples and also because it's much faster to not check parameters which we were unlikely to ever pick! * integers_from and lists of types with only one value (e.g. [None]) would previously have had a very high duplication rate so you were probably only getting a handful of examples. They now have a much lower duplication rate, as well as the improvements to search making this less of a problem in the first place. * You would sometimes see simplification taking significantly longer than your defined timeout. This would happen because timeout was only being checked after each *successful* simplification, so if Hypothesis was spending a lot of time unsuccessfully simplifying things it wouldn't stop in time. The timeout is now applied for unsuccessful simplifications too. * In Python 2.7, integers_from strategies would have failed during simplification with an OverflowError if their starting point was at or near to the maximum size of a 64-bit integer. * flatmap and map would have failed if called with a function without a __name__ attribute. * If max_examples was less than min_satisfying_examples this would always error. Now min_satisfying_examples is capped to max_examples. Note that if you have assumptions to satisfy here this will still cause an error. Some minor quality improvements: * Lists of streams, flatmapped strategies and basic strategies should now now have slightly better simplification. .. _v1.3.0: ------------------ 1.3.0 - 2015-05-22 ------------------ New features: * New verbosity level API for printing intermediate results and exceptions. * New specifier for strings generated from a specified alphabet. * Better error messages for tests that are failing because of a lack of enough examples. Bug fixes: * Fix error where use of ForkingTestCase would sometimes result in too many open files. * Fix error where saving a failing example that used flatmap could error. * Implement simplification for sampled_from, which apparently never supported it previously. Oops. General improvements: * Better range of examples when using one_of or sampled_from. * Fix some pathological performance issues when simplifying lists of complex values. * Fix some pathological performance issues when simplifying examples that require unicode strings with high codepoints. * Random will now simplify to more readable examples. .. _v1.2.1: ------------------ 1.2.1 - 2015-04-16 ------------------ A small patch release for a bug in the new executors feature. Tests which require doing something to their result in order to fail would have instead reported as flaky. .. _v1.2.0: ------------------ 1.2.0 - 2015-04-15 ------------------ Codename: Finders keepers. A bunch of new features and improvements. * Provide a mechanism for customizing how your tests are executed. * Provide a test runner that forks before running each example. This allows better support for testing native code which might trigger a segfault or a C level assertion failure. * Support for using Hypothesis to find examples directly rather than as just as a test runner. * New streaming type which lets you generate infinite lazily loaded streams of data - perfect for if you need a number of examples but don't know how many. * Better support for large integer ranges. You can now use integers_in_range with ranges of basically any size. Previously large ranges would have eaten up all your memory and taken forever. * Integers produce a wider range of data than before - previously they would only rarely produce integers which didn't fit into a machine word. Now it's much more common. This percolates to other numeric types which build on integers. * Better validation of arguments to @given. Some situations that would previously have caused silently wrong behaviour will now raise an error. * Include +/- sys.float_info.max in the set of floating point edge cases that Hypothesis specifically tries. * Fix some bugs in floating point ranges which happen when given +/- sys.float_info.max as one of the endpoints... (really any two floats that are sufficiently far apart so that x, y are finite but y - x is infinite). This would have resulted in generating infinite values instead of ones inside the range. .. _v1.1.1: ------------------ 1.1.1 - 2015-04-07 ------------------ Codename: Nothing to see here This is just a patch release put out because it fixed some internal bugs that would block the Django integration release but did not actually affect anything anyone could previously have been using. It also contained a minor quality fix for floats that I'd happened to have finished in time. * Fix some internal bugs with object lifecycle management that were impossible to hit with the previously released versions but broke hypothesis-django. * Bias floating point numbers somewhat less aggressively towards very small numbers .. _v1.1.0: ------------------ 1.1.0 - 2015-04-06 ------------------ Codename: No-one mention the M word. * Unicode strings are more strongly biased towards ascii characters. Previously they would generate all over the space. This is mostly so that people who try to shape their unicode strings with assume() have less of a bad time. * A number of fixes to data deserialization code that could theoretically have caused mysterious bugs when using an old version of a Hypothesis example database with a newer version. To the best of my knowledge a change that could have triggered this bug has never actually been seen in the wild. Certainly no-one ever reported a bug of this nature. * Out of the box support for Decimal and Fraction. * new dictionary specifier for dictionaries with variable keys. * Significantly faster and higher quality simplification, especially for collections of data. * New filter() and flatmap() methods on Strategy for better ways of building strategies out of other strategies. * New BasicStrategy class which allows you to define your own strategies from scratch without needing an existing matching strategy or being exposed to the full horror or non-public nature of the SearchStrategy interface. .. _v1.0.0: ------------------ 1.0.0 - 2015-03-27 ------------------ Codename: Blast-off! There are no code changes in this release. This is precisely the 0.9.2 release with some updated documentation. .. _v0.9.2: ------------------ 0.9.2 - 2015-03-26 ------------------ Codename: T-1 days. * floats_in_range would not actually have produced floats_in_range unless that range happened to be (0, 1). Fix this. .. _v0.9.1: ------------------ 0.9.1 - 2015-03-25 ------------------ Codename: T-2 days. * Fix a bug where if you defined a strategy using map on a lambda then the results would not be saved in the database. * Significant performance improvements when simplifying examples using lists, strings or bounded integer ranges. .. _v0.9.0: ------------------ 0.9.0 - 2015-03-23 ------------------ Codename: The final countdown This release could also be called 1.0-RC1. It contains a teeny tiny bugfix, but the real point of this release is to declare feature freeze. There will be zero functionality changes between 0.9.0 and 1.0 unless something goes really really wrong. No new features will be added, no breaking API changes will occur, etc. This is the final shakedown before I declare Hypothesis stable and ready to use and throw a party to celebrate. Bug bounty for any bugs found between now and 1.0: I will buy you a drink (alcoholic, caffeinated, or otherwise) and shake your hand should we ever find ourselves in the same city at the same time. The one tiny bugfix: * Under pypy, databases would fail to close correctly when garbage collected, leading to a memory leak and a confusing error message if you were repeatedly creating databases and not closing them. It is very unlikely you were doing this and the chances of you ever having noticed this bug are very low. .. _v0.7.2: ------------------ 0.7.2 - 2015-03-22 ------------------ Codename: Hygienic macros or bust * You can now name an argument to @given 'f' and it won't break (:issue:`38`) * strategy_test_suite is now named strategy_test_suite as the documentation claims and not in fact strategy_test_suitee * Settings objects can now be used as a context manager to temporarily override the default values inside their context. .. _v0.7.1: ------------------ 0.7.1 - 2015-03-21 ------------------ Codename: Point releases go faster * Better string generation by parametrizing by a limited alphabet * Faster string simplification - previously if simplifying a string with high range unicode characters it would try every unicode character smaller than that. This was pretty pointless. Now it stops after it's a short range (it can still reach smaller ones through recursive calls because of other simplifying operations). * Faster list simplification by first trying a binary chop down the middle * Simultaneous simplification of identical elements in a list. So if a bug only triggers when you have duplicates but you drew e.g. [-17, -17], this will now simplify to [0, 0]. .. _v0.7.0,: ------------------- 0.7.0, - 2015-03-20 ------------------- Codename: Starting to look suspiciously real This is probably the last minor release prior to 1.0. It consists of stability improvements, a few usability things designed to make Hypothesis easier to try out, and filing off some final rough edges from the API. * Significant speed and memory usage improvements * Add an example() method to strategy objects to give an example of the sort of data that the strategy generates. * Remove .descriptor attribute of strategies * Rename descriptor_test_suite to strategy_test_suite * Rename the few remaining uses of descriptor to specifier (descriptor already has a defined meaning in Python) .. _v0.6.0: --------------------------------------------------------- 0.6.0 - 2015-03-13 --------------------------------------------------------- Codename: I'm sorry, were you using that API? This is primarily a "simplify all the weird bits of the API" release. As a result there are a lot of breaking changes. If you just use @given with core types then you're probably fine. In particular: * Stateful testing has been removed from the API * The way the database is used has been rendered less useful (sorry). The feature for reassembling values saved from other tests doesn't currently work. This will probably be brought back in post 1.0. * SpecificationMapper is no longer a thing. Instead there is an ExtMethod called strategy which you extend to specify how to convert other types to strategies. * Settings are now extensible so you can add your own for configuring a strategy * MappedSearchStrategy no longer needs an unpack method * Basically all the SearchStrategy internals have changed massively. If you implemented SearchStrategy directly rather than using MappedSearchStrategy talk to me about fixing it. * Change to the way extra packages work. You now specify the package. This must have a load() method. Additionally any modules in the package will be loaded in under hypothesis.extra Bug fixes: * Fix for a bug where calling falsify on a lambda with a non-ascii character in its body would error. Hypothesis Extra: hypothesis-fakefactory\: An extension for using faker data in hypothesis. Depends on fake-factory. .. _v0.5.0: ------------------ 0.5.0 - 2015-02-10 ------------------ Codename: Read all about it. Core hypothesis: * Add support back in for pypy and python 3.2 * @given functions can now be invoked with some arguments explicitly provided. If all arguments that hypothesis would have provided are passed in then no falsification is run. * Related to the above, this means that you can now use pytest fixtures and mark.parametrize with Hypothesis without either interfering with the other. * Breaking change: @given no longer works for functions with varargs (varkwargs are fine). This might be added back in at a later date. * Windows is now fully supported. A limited version (just the tests with none of the extras) of the test suite is run on windows with each commit so it is now a first class citizen of the Hypothesis world. * Fix a bug for fuzzy equality of equal complex numbers with different reprs (this can happen when one coordinate is zero). This shouldn't affect users - that feature isn't used anywhere public facing. * Fix generation of floats on windows and 32-bit builds of python. I was using some struct.pack logic that only worked on certain word sizes. * When a test times out and hasn't produced enough examples this now raises a Timeout subclass of Unfalsifiable. * Small search spaces are better supported. Previously something like a @given(bool, bool) would have failed because it couldn't find enough examples. Hypothesis is now aware of the fact that these are small search spaces and will not error in this case. * Improvements to parameter search in the case of hard to satisfy assume. Hypothesis will now spend less time exploring parameters that are unlikely to provide anything useful. * Increase chance of generating "nasty" floats * Fix a bug that would have caused unicode warnings if you had a sampled_from that was mixing unicode and byte strings. * Added a standard test suite that you can use to validate a custom strategy you've defined is working correctly. Hypothesis extra: First off, introducing Hypothesis extra packages! These are packages that are separated out from core Hypothesis because they have one or more dependencies. Every hypothesis-extra package is pinned to a specific point release of Hypothesis and will have some version requirements on its dependency. They use entry_points so you will usually not need to explicitly import them, just have them installed on the path. This release introduces two of them: hypothesis-datetime: Does what it says on the tin: Generates datetimes for Hypothesis. Just install the package and datetime support will start working. Depends on pytz for timezone support hypothesis-pytest: A very rudimentary pytest plugin. All it does right now is hook the display of falsifying examples into pytest reporting. Depends on pytest. .. _v0.4.3: ------------------ 0.4.3 - 2015-02-05 ------------------ Codename: TIL narrow Python builds are a thing This just fixes the one bug. * Apparently there is such a thing as a "narrow python build" and OS X ships with these by default for python 2.7. These are builds where you only have two bytes worth of unicode. As a result, generating unicode was completely broken on OS X. Fix this by only generating unicode codepoints in the range supported by the system. .. _v0.4.2: ------------------ 0.4.2 - 2015-02-04 ------------------ Codename: O(dear) This is purely a bugfix release: * Provide sensible external hashing for all core types. This will significantly improve performance of tracking seen examples which happens in literally every falsification run. For Hypothesis fixing this cut 40% off the runtime of the test suite. The behaviour is quadratic in the number of examples so if you're running the default configuration this will be less extreme (Hypothesis's test suite runs at a higher number of examples than default), but you should still see a significant improvement. * Fix a bug in formatting of complex numbers where the string could get incorrectly truncated. .. _v0.4.1: ------------------ 0.4.1 - 2015-02-03 ------------------ Codename: Cruel and unusual edge cases This release is mostly about better test case generation. Enhancements: * Has a cool release name * text_type (str in python 3, unicode in python 2) example generation now actually produces interesting unicode instead of boring ascii strings. * floating point numbers are generated over a much wider range, with particular attention paid to generating nasty numbers - nan, infinity, large and small values, etc. * examples can be generated using pieces of examples previously saved in the database. This allows interesting behaviour that has previously been discovered to be propagated to other examples. * improved parameter exploration algorithm which should allow it to more reliably hit interesting edge cases. * Timeout can now be disabled entirely by setting it to any value <= 0. Bug fixes: * The descriptor on a OneOfStrategy could be wrong if you had descriptors which were equal but should not be coalesced. e.g. a strategy for one_of((frozenset({int}), {int})) would have reported its descriptor as {int}. This is unlikely to have caused you any problems * If you had strategies that could produce NaN (which float previously couldn't but e.g. a Just(float('nan')) could) then this would have sent hypothesis into an infinite loop that would have only been terminated when it hit the timeout. * Given elements that can take a long time to minimize, minimization of floats or tuples could be quadratic or worse in the that value. You should now see much better performance for simplification, albeit at some cost in quality. Other: * A lot of internals have been been rewritten. This shouldn't affect you at all, but it opens the way for certain of hypothesis's oddities to be a lot more extensible by users. Whether this is a good thing may be up for debate... .. _v0.4.0: ------------------ 0.4.0 - 2015-01-21 ------------------ FLAGSHIP FEATURE: Hypothesis now persists examples for later use. It stores data in a local SQLite database and will reuse it for all tests of the same type. LICENSING CHANGE: Hypothesis is now released under the Mozilla Public License 2.0. This applies to all versions from 0.4.0 onwards until further notice. The previous license remains applicable to all code prior to 0.4.0. Enhancements: * Printing of failing examples. I was finding that the pytest runner was not doing a good job of displaying these, and that Hypothesis itself could do much better. * Drop dependency on six for cross-version compatibility. It was easy enough to write the shim for the small set of features that we care about and this lets us avoid a moderately complex dependency. * Some improvements to statistical distribution of selecting from small (<= 3 elements) * Improvements to parameter selection for finding examples. Bugs fixed: * could_have_produced for lists, dicts and other collections would not have examined the elements and thus when using a union of different types of list this could result in Hypothesis getting confused and passing a value to the wrong strategy. This could potentially result in exceptions being thrown from within simplification. * sampled_from would not work correctly on a single element list. * Hypothesis could get *very* confused by values which are equal despite having different types being used in descriptors. Hypothesis now has its own more specific version of equality it uses for descriptors and tracking. It is always more fine grained than Python equality: Things considered != are not considered equal by hypothesis, but some things that are considered == are distinguished. If your test suite uses both frozenset and set tests this bug is probably affecting you. .. _v0.3.2: ------------------ 0.3.2 - 2015-01-16 ------------------ * Fix a bug where if you specified floats_in_range with integer arguments Hypothesis would error in example simplification. * Improve the statistical distribution of the floats you get for the floats_in_range strategy. I'm not sure whether this will affect users in practice but it took my tests for various conditions from flaky to rock solid so it at the very least improves discovery of the artificial cases I'm looking for. * Improved repr() for strategies and RandomWithSeed instances. * Add detection for flaky test cases where hypothesis managed to find an example which breaks it but on the final invocation of the test it does not raise an error. This will typically happen with too much recursion errors but could conceivably happen in other circumstances too. * Provide a "derandomized" mode. This allows you to run hypothesis with zero real randomization, making your build nice and deterministic. The tests run with a seed calculated from the function they're testing so you should still get a good distribution of test cases. * Add a mechanism for more conveniently defining tests which just sample from some collection. * Fix for a really subtle bug deep in the internals of the strategy table. In some circumstances if you were to define instance strategies for both a parent class and one or more of its subclasses you would under some circumstances get the strategy for the wrong superclass of an instance. It is very unlikely anyone has ever encountered this in the wild, but it is conceivably possible given that a mix of namedtuple and tuple are used fairly extensively inside hypothesis which do exhibit this pattern of strategy. .. _v0.3.1: ------------------ 0.3.1 - 2015-01-13 ------------------ * Support for generation of frozenset and Random values * Correct handling of the case where a called function mutates it argument. This involved introducing a notion of a strategies knowing how to copy their argument. The default method should be entirely acceptable and the worst case is that it will continue to have the old behaviour if you don't mark your strategy as mutable, so this shouldn't break anything. * Fix for a bug where some strategies did not correctly implement could_have_produced. It is very unlikely that any of these would have been seen in the wild, and the consequences if they had been would have been minor. * Re-export the @given decorator from the main hypothesis namespace. It's still available at the old location too. * Minor performance optimisation for simplifying long lists. .. _v0.3.0: ------------------ 0.3.0 - 2015-01-12 ------------------ * Complete redesign of the data generation system. Extreme breaking change for anyone who was previously writing their own SearchStrategy implementations. These will not work any more and you'll need to modify them. * New settings system allowing more global and modular control of Verifier behaviour. * Decouple SearchStrategy from the StrategyTable. This leads to much more composable code which is a lot easier to understand. * A significant amount of internal API renaming and moving. This may also break your code. * Expanded available descriptors, allowing for generating integers or floats in a specific range. * Significantly more robust. A very large number of small bug fixes, none of which anyone is likely to have ever noticed. * Deprecation of support for pypy and python 3 prior to 3.3. 3.3 and 3.4. Supported versions are 2.7.x, 3.3.x, 3.4.x. I expect all of these to remain officially supported for a very long time. I would not be surprised to add pypy support back in later but I'm not going to do so until I know someone cares about it. In the meantime it will probably still work. .. _v0.2.2: ------------------ 0.2.2 - 2015-01-08 ------------------ * Fix an embarrassing complete failure of the installer caused by my being bad at version control .. _v0.2.1: ------------------ 0.2.1 - 2015-01-07 ------------------ * Fix a bug in the new stateful testing feature where you could make __init__ a @requires method. Simplification would not always work if the prune method was able to successfully shrink the test. .. _v0.2.0: ------------------ 0.2.0 - 2015-01-07 ------------------ * It's aliiive. * Improve python 3 support using six. * Distinguish between byte and unicode types. * Fix issues where FloatStrategy could raise. * Allow stateful testing to request constructor args. * Fix for issue where test annotations would timeout based on when the module was loaded instead of when the test started .. _v0.1.4: ------------------ 0.1.4 - 2013-12-14 ------------------ * Make verification runs time bounded with a configurable timeout .. _v0.1.3: ------------------ 0.1.3 - 2013-05-03 ------------------ * Bugfix: Stateful testing behaved incorrectly with subclassing. * Complex number support * support for recursive strategies * different error for hypotheses with unsatisfiable assumptions .. _v0.1.2: ------------------ 0.1.2 - 2013-03-24 ------------------ * Bugfix: Stateful testing was not minimizing correctly and could throw exceptions. * Better support for recursive strategies. * Support for named tuples. * Much faster integer generation. .. _v0.1.1: ------------------ 0.1.1 - 2013-03-24 ------------------ * Python 3.x support via 2to3. * Use new style classes (oops). .. _v0.1.0: ------------------ 0.1.0 - 2013-03-23 ------------------ * Introduce stateful testing. * Massive rewrite of internals to add flags and strategies. .. _v0.0.5: ------------------ 0.0.5 - 2013-03-13 ------------------ * No changes except trying to fix packaging .. _v0.0.4: ------------------ 0.0.4 - 2013-03-13 ------------------ * No changes except that I checked in a failing test case for 0.0.3 so had to replace the release. Doh .. _v0.0.3: ------------------ 0.0.3 - 2013-03-13 ------------------ * Improved a few internals. * Opened up creating generators from instances as a general API. * Test integration. .. _v0.0.2: ------------------ 0.0.2 - 2013-03-12 ------------------ * Starting to tighten up on the internals. * Change API to allow more flexibility in configuration. * More testing. .. _v0.0.1: ------------------ 0.0.1 - 2013-03-10 ------------------ * Initial release. * Basic working prototype. Demonstrates idea, probably shouldn't be used. hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/community.rst000066400000000000000000000037151354103617500267600ustar00rootroot00000000000000========= Community ========= The Hypothesis community is small for the moment but is full of excellent people who can answer your questions and help you out. Please do join us. The two major places for community discussion are: * `The mailing list `_. * An IRC channel, #hypothesis on freenode, which is more active than the mailing list. Feel free to use these to ask for help, provide feedback, or discuss anything remotely Hypothesis related at all. If you post a question on StackOverflow, please use the `python-hypothesis `__ tag! Please note that `the Hypothesis code of conduct `_ applies in all Hypothesis community spaces. If you would like to cite Hypothesis, please consider `our suggested citation `_. If you like repo badges, we suggest the following badge, which you can add with reStructuredText or Markdown, respectively: .. image:: https://img.shields.io/badge/hypothesis-tested-brightgreen.svg .. code:: restructuredtext .. image:: https://img.shields.io/badge/hypothesis-tested-brightgreen.svg :alt: Tested with Hypothesis :target: https://hypothesis.readthedocs.io .. code:: md [![Tested with Hypothesis](https://img.shields.io/badge/hypothesis-tested-brightgreen.svg)](https://hypothesis.readthedocs.io/) Finally, we have a beautiful logo which appears online, and often on stickers: .. image:: ../../brand/dragonfly-rainbow.svg :alt: The Hypothesis logo, a dragonfly with rainbow wings :align: center :width: 50 % As well as being beautiful, dragonflies actively hunt down bugs for a living! You can find the images and a usage guide in the :gh-file:`brand` directory on GitHub, or find us at conferences where we often have stickers and sometimes other swag. hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/conf.py000066400000000000000000000070161354103617500254770ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import datetime import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), "..", "src")) autodoc_member_order = "bysource" extensions = [ "sphinx.ext.autodoc", "sphinx.ext.extlinks", "sphinx.ext.viewcode", "sphinx.ext.intersphinx", ] templates_path = ["_templates"] source_suffix = ".rst" # The master toctree document. master_doc = "index" # General information about the project. project = u"Hypothesis" copyright = u"2013-%s, David R. MacIver" % datetime.datetime.utcnow().year author = u"David R. MacIver" _d = {} with open( os.path.join(os.path.dirname(__file__), "..", "src", "hypothesis", "version.py") ) as f: exec(f.read(), _d) version = _d["__version__"] release = _d["__version__"] language = None exclude_patterns = ["_build"] pygments_style = "sphinx" todo_include_todos = False intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), "numpy": ("https://docs.scipy.org/doc/numpy/", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), "pytest": ("https://docs.pytest.org/en/stable/", None), "django": ("https://django.readthedocs.io/en/stable/", None), "attrs": ("https://www.attrs.org/en/stable/", None), } autodoc_mock_imports = ["pandas"] # This config value must be a dictionary of external sites, mapping unique # short alias names to a base URL and a prefix. # See http://sphinx-doc.org/ext/extlinks.html _repo = "https://github.com/HypothesisWorks/hypothesis/" extlinks = { "commit": (_repo + "commit/%s", "commit "), "gh-file": (_repo + "blob/master/%s", ""), "gh-link": (_repo + "%s", ""), "issue": (_repo + "issues/%s", "issue #"), "pull": (_repo + "pull/%s", "pull request #"), "pypi": ("https://pypi.org/project/%s", ""), "bpo": ("https://bugs.python.org/issue%s", "bpo-"), } # -- Options for HTML output ---------------------------------------------- if os.environ.get("READTHEDOCS", None) != "True": # only import and set the theme if we're building docs locally import sphinx_rtd_theme html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] html_static_path = ["_static"] htmlhelp_basename = "Hypothesisdoc" html_favicon = "../../brand/favicon.ico" # -- Options for LaTeX output --------------------------------------------- latex_elements = {} latex_documents = [ ( master_doc, "Hypothesis.tex", u"Hypothesis Documentation", u"David R. MacIver", "manual", ) ] man_pages = [(master_doc, "hypothesis", u"Hypothesis Documentation", [author], 1)] texinfo_documents = [ ( master_doc, "Hypothesis", u"Hypothesis Documentation", author, "Hypothesis", "Advanced property-based testing for Python.", "Miscellaneous", ) ] hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/data.rst000066400000000000000000000302711354103617500256420ustar00rootroot00000000000000============================= What you can generate and how ============================= *Most things should be easy to generate and everything should be possible.* To support this principle Hypothesis provides strategies for most built-in types with arguments to constrain or adjust the output, as well as higher-order strategies that can be composed to generate more complex types. This document is a guide to what strategies are available for generating data and how to build them. Strategies have a variety of other important internal features, such as how they simplify, but the data they can generate is the only public part of their API. ~~~~~~~~~~~~~~~ Core Strategies ~~~~~~~~~~~~~~~ Functions for building strategies are all available in the hypothesis.strategies module. The salient functions from it are as follows: .. automodule:: hypothesis.strategies :members: :exclude-members: SearchStrategy ~~~~~~~~~~~~~~~~~~~~~~ Provisional Strategies ~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: hypothesis.provisional :members: .. _shrinking: ~~~~~~~~~ Shrinking ~~~~~~~~~ When using strategies it is worth thinking about how the data *shrinks*. Shrinking is the process by which Hypothesis tries to produce human readable examples when it finds a failure - it takes a complex example and turns it into a simpler one. Each strategy defines an order in which it shrinks - you won't usually need to care about this much, but it can be worth being aware of as it can affect what the best way to write your own strategies is. The exact shrinking behaviour is not a guaranteed part of the API, but it doesn't change that often and when it does it's usually because we think the new way produces nicer examples. Possibly the most important one to be aware of is :func:`~hypothesis.strategies.one_of`, which has a preference for values produced by strategies earlier in its argument list. Most of the others should largely "do the right thing" without you having to think about it. ~~~~~~~~~~~~~~~~~~~ Adapting strategies ~~~~~~~~~~~~~~~~~~~ Often it is the case that a strategy doesn't produce exactly what you want it to and you need to adapt it. Sometimes you can do this in the test, but this hurts reuse because you then have to repeat the adaption in every test. Hypothesis gives you ways to build strategies from other strategies given functions for transforming the data. ------- Mapping ------- ``map`` is probably the easiest and most useful of these to use. If you have a strategy ``s`` and a function ``f``, then an example ``s.map(f).example()`` is ``f(s.example())``, i.e. we draw an example from ``s`` and then apply ``f`` to it. e.g.: .. code-block:: pycon >>> lists(integers()).map(sorted).example() [-25527, -24245, -23118, -93, -70, -7, 0, 39, 40, 65, 88, 112, 6189, 9480, 19469, 27256, 32526, 1566924430] Note that many things that you might use mapping for can also be done with :func:`~hypothesis.strategies.builds`. .. _filtering: --------- Filtering --------- ``filter`` lets you reject some examples. ``s.filter(f).example()`` is some example of ``s`` such that ``f(example)`` is truthy. .. code-block:: pycon >>> integers().filter(lambda x: x > 11).example() 26126 >>> integers().filter(lambda x: x > 11).example() 23324 It's important to note that ``filter`` isn't magic and if your condition is too hard to satisfy then this can fail: .. code-block:: pycon >>> integers().filter(lambda x: False).example() Traceback (most recent call last): ... hypothesis.errors.Unsatisfiable: Could not find any valid examples in 20 tries In general you should try to use ``filter`` only to avoid corner cases that you don't want rather than attempting to cut out a large chunk of the search space. A technique that often works well here is to use map to first transform the data and then use ``filter`` to remove things that didn't work out. So for example if you wanted pairs of integers (x,y) such that x < y you could do the following: .. code-block:: pycon >>> tuples(integers(), integers()).map(sorted).filter(lambda x: x[0] < x[1]).example() [-8543729478746591815, 3760495307320535691] .. _flatmap: ---------------------------- Chaining strategies together ---------------------------- Finally there is ``flatmap``. ``flatmap`` draws an example, then turns that example into a strategy, then draws an example from *that* strategy. It may not be obvious why you want this at first, but it turns out to be quite useful because it lets you generate different types of data with relationships to each other. For example suppose we wanted to generate a list of lists of the same length: .. code-block:: pycon >>> rectangle_lists = integers(min_value=0, max_value=10).flatmap( ... lambda n: lists(lists(integers(), min_size=n, max_size=n))) >>> rectangle_lists.example() [] >>> rectangle_lists.filter(lambda x: len(x) >= 10).example() [[], [], [], [], [], [], [], [], [], []] >>> rectangle_lists.filter(lambda t: len(t) >= 3 and len(t[0]) >= 3).example() [[0, 0, 0], [0, 0, 0], [0, 0, 0]] >>> rectangle_lists.filter(lambda t: sum(len(s) for s in t) >= 10).example() [[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]] In this example we first choose a length for our tuples, then we build a strategy which generates lists containing lists precisely of that length. The finds show what simple examples for this look like. Most of the time you probably don't want ``flatmap``, but unlike ``filter`` and ``map`` which are just conveniences for things you could just do in your tests, ``flatmap`` allows genuinely new data generation that you wouldn't otherwise be able to easily do. (If you know Haskell: Yes, this is more or less a monadic bind. If you don't know Haskell, ignore everything in these parentheses. You do not need to understand anything about monads to use this, or anything else in Hypothesis). -------------- Recursive data -------------- Sometimes the data you want to generate has a recursive definition. e.g. if you wanted to generate JSON data, valid JSON is: 1. Any float, any boolean, any unicode string. 2. Any list of valid JSON data 3. Any dictionary mapping unicode strings to valid JSON data. The problem is that you cannot call a strategy recursively and expect it to not just blow up and eat all your memory. The other problem here is that not all unicode strings display consistently on different machines, so we'll restrict them in our doctest. The way Hypothesis handles this is with the :func:`~hypothesis.strategies.recursive` strategy which you pass in a base case and a function that, given a strategy for your data type, returns a new strategy for it. So for example: .. code-block:: pycon >>> from string import printable; from pprint import pprint >>> json = recursive(none() | booleans() | floats() | text(printable), ... lambda children: lists(children, 1) | dictionaries(text(printable), children, min_size=1)) >>> pprint(json.example()) [[1.175494351e-38, ']', 1.9, True, False, '.M}Xl', ''], True] >>> pprint(json.example()) {'de(l': None, 'nK': {'(Rt)': None, '+hoZh1YU]gy8': True, '8z]EIFA06^l`i^': 'LFE{Q', '9,': 'l{cA=/'}} That is, we start with our leaf data and then we augment it by allowing lists and dictionaries of anything we can generate as JSON data. The size control of this works by limiting the maximum number of values that can be drawn from the base strategy. So for example if we wanted to only generate really small JSON we could do this as: .. code-block:: pycon >>> small_lists = recursive(booleans(), lists, max_leaves=5) >>> small_lists.example() True >>> small_lists.example() [False] .. _composite-strategies: ~~~~~~~~~~~~~~~~~~~~ Composite strategies ~~~~~~~~~~~~~~~~~~~~ The :func:`@composite ` decorator lets you combine other strategies in more or less arbitrary ways. It's probably the main thing you'll want to use for complicated custom strategies. The composite decorator works by converting a function that returns one example into a function that returns a strategy that produces such examples - which you can pass to :func:`@given `, modify with ``.map`` or ``.filter``, and generally use like any other strategy. It does this by giving you a special function ``draw`` as the first argument, which can be used just like the corresponding method of the :func:`~hypothesis.strategies.data` strategy within a test. In fact, the implementation is almost the same - but defining a strategy with :func:`@composite ` makes code reuse easier, and usually improves the display of failing examples. For example, the following gives you a list and an index into it: .. code-block:: pycon >>> @composite ... def list_and_index(draw, elements=integers()): ... xs = draw(lists(elements, min_size=1)) ... i = draw(integers(min_value=0, max_value=len(xs) - 1)) ... return (xs, i) ``draw(s)`` is a function that should be thought of as returning ``s.example()``, except that the result is reproducible and will minimize correctly. The decorated function has the initial argument removed from the list, but will accept all the others in the expected order. Defaults are preserved. .. code-block:: pycon >>> list_and_index() list_and_index() >>> list_and_index().example() ([15949, -35, 21764, 8167, 1607867656, -41, 104, 19, -90, 520116744169390387, 7107438879249457973], 0) >>> list_and_index(booleans()) list_and_index(elements=booleans()) >>> list_and_index(booleans()).example() ([True, False], 0) Note that the repr will work exactly like it does for all the built-in strategies: it will be a function that you can call to get the strategy in question, with values provided only if they do not match the defaults. You can use :func:`assume ` inside composite functions: .. code-block:: python @composite def distinct_strings_with_common_characters(draw): x = draw(text(min_size=1)) y = draw(text(alphabet=x)) assume(x != y) return (x, y) This works as :func:`assume ` normally would, filtering out any examples for which the passed in argument is falsey. .. _interactive-draw: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Drawing interactively in tests ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There is also the :func:`~hypothesis.strategies.data` strategy, which gives you a means of using strategies interactively. Rather than having to specify everything up front in :func:`@given ` you can draw from strategies in the body of your test. This is similar to :func:`@composite `, but even more powerful as it allows you to mix test code with example generation. The downside of this power is that :func:`~hypothesis.strategies.data` is incompatible with explicit :func:`@example(...) `\ s - and the mixed code is often harder to debug when something goes wrong. If you need values that are affected by previous draws but which *don't* depend on the execution of your test, stick to the simpler :func:`@composite `. .. code-block:: python @given(data()) def test_draw_sequentially(data): x = data.draw(integers()) y = data.draw(integers(min_value=x)) assert x < y If the test fails, each draw will be printed with the falsifying example. e.g. the above is wrong (it has a boundary condition error), so will print: .. code-block:: pycon Falsifying example: test_draw_sequentially(data=data(...)) Draw 1: 0 Draw 2: 0 As you can see, data drawn this way is simplified as usual. Optionally, you can provide a label to identify values generated by each call to ``data.draw()``. These labels can be used to identify values in the output of a falsifying example. For instance: .. code-block:: python @given(data()) def test_draw_sequentially(data): x = data.draw(integers(), label='First number') y = data.draw(integers(min_value=x), label='Second number') assert x < y will produce the output: .. code-block:: pycon Falsifying example: test_draw_sequentially(data=data(...)) Draw 1 (First number): 0 Draw 2 (Second number): 0 hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/database.rst000066400000000000000000000061771354103617500265050ustar00rootroot00000000000000=============================== The Hypothesis Example Database =============================== When Hypothesis finds a bug it stores enough information in its database to reproduce it. This enables you to have a classic testing workflow of find a bug, fix a bug, and be confident that this is actually doing the right thing because Hypothesis will start by retrying the examples that broke things last time. ----------- Limitations ----------- The database is best thought of as a cache that you never need to invalidate: Information may be lost when you upgrade a Hypothesis version or change your test, so you shouldn't rely on it for correctness - if there's an example you want to ensure occurs each time then :ref:`there's a feature for including them in your source code ` - but it helps the development workflow considerably by making sure that the examples you've just found are reproduced. The database also records examples that exercise less-used parts of your code, so the database may update even when no failing examples were found. -------------- File locations -------------- The default storage format is as a fairly opaque directory structure. Each test corresponds to a directory, and each example to a file within that directory. The standard location for it is ``.hypothesis/examples`` in your current working directory. You can override this by setting the :obj:`~hypothesis.settings.database` setting. If you have not configured a database and the default location is unusable (e.g. because you do not have read/write permission), Hypothesis will issue a warning and then fall back to an in-memory database. -------------------------------------------- Upgrading Hypothesis and changing your tests -------------------------------------------- The design of the Hypothesis database is such that you can put arbitrary data in the database and not get wrong behaviour. When you upgrade Hypothesis, old data *might* be invalidated, but this should happen transparently. It can never be the case that e.g. changing the strategy that generates an argument gives you data from the old strategy. ----------------------------- Sharing your example database ----------------------------- .. note:: If specific examples are important for correctness you should use the :func:`@example ` decorator, as the example database may discard entries due to changes in your code or dependencies. For most users, we therefore recommend using the example database locally and possibly persisting it between CI builds, but not tracking it under version control. The examples database can be shared simply by checking the directory into version control, for example with the following ``.gitignore``:: # Ignore files cached by Hypothesis... .hypothesis/* # except for the examples directory !.hypothesis/examples/ Like everything under ``.hypothesis/``, the examples directory will be transparently created on demand. Unlike the other subdirectories, ``examples/`` is designed to handle merges, deletes, etc if you just add the directory into git, mercurial, or any similar version control system. hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/details.rst000066400000000000000000000515551354103617500263660ustar00rootroot00000000000000============================= Details and advanced features ============================= This is an account of slightly less common Hypothesis features that you don't need to get started but will nevertheless make your life easier. ---------------------- Additional test output ---------------------- Normally the output of a failing test will look something like: .. code:: Falsifying example: test_a_thing(x=1, y="foo") With the ``repr`` of each keyword argument being printed. Sometimes this isn't enough, either because you have values with a ``repr`` that isn't very descriptive or because you need to see the output of some intermediate steps of your test. That's where the ``note`` function comes in: .. autofunction:: hypothesis.note .. code-block:: pycon >>> from hypothesis import given, note, strategies as st >>> @given(st.lists(st.integers()), st.randoms()) ... def test_shuffle_is_noop(ls, r): ... ls2 = list(ls) ... r.shuffle(ls2) ... note("Shuffle: %r" % (ls2)) ... assert ls == ls2 ... >>> try: ... test_shuffle_is_noop() ... except AssertionError: ... print('ls != ls2') Falsifying example: test_shuffle_is_noop(ls=[0, 1], r=RandomWithSeed(1)) Shuffle: [1, 0] ls != ls2 The note is printed in the final run of the test in order to include any additional information you might need in your test. .. _statistics: --------------- Test Statistics --------------- If you are using :pypi:`pytest` you can see a number of statistics about the executed tests by passing the command line argument ``--hypothesis-show-statistics``. This will include some general statistics about the test: For example if you ran the following with ``--hypothesis-show-statistics``: .. code-block:: python from hypothesis import given, strategies as st @given(st.integers()) def test_integers(i): pass You would see: .. code-block:: none test_integers: - 100 passing examples, 0 failing examples, 0 invalid examples - Typical runtimes: ~ 1ms - Fraction of time spent in data generation: ~ 12% - Stopped because settings.max_examples=100 The final "Stopped because" line is particularly important to note: It tells you the setting value that determined when the test should stop trying new examples. This can be useful for understanding the behaviour of your tests. Ideally you'd always want this to be :obj:`~hypothesis.settings.max_examples`. In some cases (such as filtered and recursive strategies) you will see events mentioned which describe some aspect of the data generation: .. code-block:: python from hypothesis import given, strategies as st @given(st.integers().filter(lambda x: x % 2 == 0)) def test_even_integers(i): pass You would see something like: .. code-block:: none test_even_integers: - 100 passing examples, 0 failing examples, 36 invalid examples - Typical runtimes: 0-1 ms - Fraction of time spent in data generation: ~ 16% - Stopped because settings.max_examples=100 - Events: * 80.88%, Retried draw from integers().filter(lambda x: ) to satisfy filter * 26.47%, Aborted test because unable to satisfy integers().filter(lambda x: ) You can also mark custom events in a test using the ``event`` function: .. autofunction:: hypothesis.event .. code:: python from hypothesis import given, event, strategies as st @given(st.integers().filter(lambda x: x % 2 == 0)) def test_even_integers(i): event("i mod 3 = %d" % (i % 3,)) You will then see output like: .. code-block:: none test_even_integers: - 100 passing examples, 0 failing examples, 38 invalid examples - Typical runtimes: 0-1 ms - Fraction of time spent in data generation: ~ 16% - Stopped because settings.max_examples=100 - Events: * 80.43%, Retried draw from integers().filter(lambda x: ) to satisfy filter * 31.88%, i mod 3 = 0 * 27.54%, Aborted test because unable to satisfy integers().filter(lambda x: ) * 21.74%, i mod 3 = 1 * 18.84%, i mod 3 = 2 Arguments to ``event`` can be any hashable type, but two events will be considered the same if they are the same when converted to a string with :obj:`python:str`. ------------------ Making assumptions ------------------ Sometimes Hypothesis doesn't give you exactly the right sort of data you want - it's mostly of the right shape, but some examples won't work and you don't want to care about them. You *can* just ignore these by aborting the test early, but this runs the risk of accidentally testing a lot less than you think you are. Also it would be nice to spend less time on bad examples - if you're running 100 examples per test (the default) and it turns out 70 of those examples don't match your needs, that's a lot of wasted time. .. autofunction:: hypothesis.assume For example suppose you had the following test: .. code:: python @given(floats()) def test_negation_is_self_inverse(x): assert x == -(-x) Running this gives us: .. code:: Falsifying example: test_negation_is_self_inverse(x=float('nan')) AssertionError This is annoying. We know about NaN and don't really care about it, but as soon as Hypothesis finds a NaN example it will get distracted by that and tell us about it. Also the test will fail and we want it to pass. So lets block off this particular example: .. code:: python from math import isnan @given(floats()) def test_negation_is_self_inverse_for_non_nan(x): assume(not isnan(x)) assert x == -(-x) And this passes without a problem. In order to avoid the easy trap where you assume a lot more than you intended, Hypothesis will fail a test when it can't find enough examples passing the assumption. If we'd written: .. code:: python @given(floats()) def test_negation_is_self_inverse_for_non_nan(x): assume(False) assert x == -(-x) Then on running we'd have got the exception: .. code:: Unsatisfiable: Unable to satisfy assumptions of hypothesis test_negation_is_self_inverse_for_non_nan. Only 0 examples considered satisfied assumptions ~~~~~~~~~~~~~~~~~~~ How good is assume? ~~~~~~~~~~~~~~~~~~~ Hypothesis has an adaptive exploration strategy to try to avoid things which falsify assumptions, which should generally result in it still being able to find examples in hard to find situations. Suppose we had the following: .. code:: python @given(lists(integers())) def test_sum_is_positive(xs): assert sum(xs) > 0 Unsurprisingly this fails and gives the falsifying example ``[]``. Adding ``assume(xs)`` to this removes the trivial empty example and gives us ``[0]``. Adding ``assume(all(x > 0 for x in xs))`` and it passes: the sum of a list of positive integers is positive. The reason that this should be surprising is not that it doesn't find a counter-example, but that it finds enough examples at all. In order to make sure something interesting is happening, suppose we wanted to try this for long lists. e.g. suppose we added an ``assume(len(xs) > 10)`` to it. This should basically never find an example: a naive strategy would find fewer than one in a thousand examples, because if each element of the list is negative with probability one-half, you'd have to have ten of these go the right way by chance. In the default configuration Hypothesis gives up long before it's tried 1000 examples (by default it tries 200). Here's what happens if we try to run this: .. code:: python @given(lists(integers())) def test_sum_is_positive(xs): assume(len(xs) > 10) assume(all(x > 0 for x in xs)) print(xs) assert sum(xs) > 0 In: test_sum_is_positive() [17, 12, 7, 13, 11, 3, 6, 9, 8, 11, 47, 27, 1, 31, 1] [6, 2, 29, 30, 25, 34, 19, 15, 50, 16, 10, 3, 16] [25, 17, 9, 19, 15, 2, 2, 4, 22, 10, 10, 27, 3, 1, 14, 17, 13, 8, 16, 9, 2... [17, 65, 78, 1, 8, 29, 2, 79, 28, 18, 39] [13, 26, 8, 3, 4, 76, 6, 14, 20, 27, 21, 32, 14, 42, 9, 24, 33, 9, 5, 15, ... [2, 1, 2, 2, 3, 10, 12, 11, 21, 11, 1, 16] As you can see, Hypothesis doesn't find *many* examples here, but it finds some - enough to keep it happy. In general if you *can* shape your strategies better to your tests you should - for example :py:func:`integers(1, 1000) ` is a lot better than ``assume(1 <= x <= 1000)``, but ``assume`` will take you a long way if you can't. --------------------- Defining strategies --------------------- The type of object that is used to explore the examples given to your test function is called a :class:`~hypothesis.strategies.SearchStrategy`. These are created using the functions exposed in the :mod:`hypothesis.strategies` module. Many of these strategies expose a variety of arguments you can use to customize generation. For example for integers you can specify ``min`` and ``max`` values of integers you want. If you want to see exactly what a strategy produces you can ask for an example: .. code-block:: pycon >>> integers(min_value=0, max_value=10).example() 1 Many strategies are built out of other strategies. For example, if you want to define a tuple you need to say what goes in each element: .. code-block:: pycon >>> from hypothesis.strategies import tuples >>> tuples(integers(), integers()).example() (-24597, 12566) Further details are :doc:`available in a separate document `. ------------------------------------ The gory details of given parameters ------------------------------------ .. autofunction:: hypothesis.given The :func:`@given ` decorator may be used to specify which arguments of a function should be parametrized over. You can use either positional or keyword arguments, but not a mixture of both. For example all of the following are valid uses: .. code:: python @given(integers(), integers()) def a(x, y): pass @given(integers()) def b(x, y): pass @given(y=integers()) def c(x, y): pass @given(x=integers()) def d(x, y): pass @given(x=integers(), y=integers()) def e(x, **kwargs): pass @given(x=integers(), y=integers()) def f(x, *args, **kwargs): pass class SomeTest(TestCase): @given(integers()) def test_a_thing(self, x): pass The following are not: .. code:: python @given(integers(), integers(), integers()) def g(x, y): pass @given(integers()) def h(x, *args): pass @given(integers(), x=integers()) def i(x, y): pass @given() def j(x, y): pass The rules for determining what are valid uses of ``given`` are as follows: 1. You may pass any keyword argument to ``given``. 2. Positional arguments to ``given`` are equivalent to the rightmost named arguments for the test function. 3. Positional arguments may not be used if the underlying test function has varargs, arbitrary keywords, or keyword-only arguments. 4. Functions tested with ``given`` may not have any defaults. The reason for the "rightmost named arguments" behaviour is so that using :func:`@given ` with instance methods works: ``self`` will be passed to the function as normal and not be parametrized over. The function returned by given has all the same arguments as the original test, minus those that are filled in by :func:`@given `. Check :ref:`the notes on framework compatibility ` to see how this affects other testing libraries you may be using. .. _custom-function-execution: ------------------------- Custom function execution ------------------------- Hypothesis provides you with a hook that lets you control how it runs examples. This lets you do things like set up and tear down around each example, run examples in a subprocess, transform coroutine tests into normal tests, etc. For example, :class:`~hypothesis.extra.django.TransactionTestCase` in the Django extra runs each example in a separate database transaction. The way this works is by introducing the concept of an executor. An executor is essentially a function that takes a block of code and run it. The default executor is: .. code:: python def default_executor(function): return function() You define executors by defining a method ``execute_example`` on a class. Any test methods on that class with :func:`@given ` used on them will use ``self.execute_example`` as an executor with which to run tests. For example, the following executor runs all its code twice: .. code:: python from unittest import TestCase class TestTryReallyHard(TestCase): @given(integers()) def test_something(self, i): perform_some_unreliable_operation(i) def execute_example(self, f): f() return f() Note: The functions you use in map, etc. will run *inside* the executor. i.e. they will not be called until you invoke the function passed to ``execute_example``. An executor must be able to handle being passed a function which returns None, otherwise it won't be able to run normal test cases. So for example the following executor is invalid: .. code:: python from unittest import TestCase class TestRunTwice(TestCase): def execute_example(self, f): return f()() and should be rewritten as: .. code:: python from unittest import TestCase class TestRunTwice(TestCase): def execute_example(self, f): result = f() if callable(result): result = result() return result An alternative hook is provided for use by test runner extensions such as :pypi:`pytest-trio`, which cannot use the ``execute_example`` method. This is **not** recommended for end-users - it is better to write a complete test function directly, perhaps by using a decorator to perform the same transformation before applying :func:`@given `. .. code:: python @given(x=integers()) @pytest.mark.trio async def test(x): ... # Illustrative code, inside the pytest-trio plugin test.hypothesis.inner_test = lambda x: trio.run(test, x) For authors of test runners however, assigning to the ``inner_test`` attribute of the ``hypothesis`` attribute of the test will replace the interior test. .. note:: The new ``inner_test`` must accept and pass through all the ``*args`` and ``**kwargs`` expected by the original test. If the end user has also specified a custom executor using the ``execute_example`` method, it - and all other execution-time logic - will be applied to the *new* inner test assigned by the test runner. -------------------------------- Making random code deterministic -------------------------------- While Hypothesis' example generation can be used for nondeterministic tests, debugging anything nondeterministic is usually a very frustrating exercise. To make things worse, our example *shrinking* relies on the same input causing the same failure each time - though we show the un-shrunk failure and a decent error message if it doesn't. By default, Hypothesis will handle the global ``random`` and ``numpy.random`` random number generators for you, and you can register others: .. autofunction:: hypothesis.register_random .. _type-inference: ------------------- Inferred Strategies ------------------- In some cases, Hypothesis can work out what to do when you omit arguments. This is based on introspection, *not* magic, and therefore has well-defined limits. :func:`~hypothesis.strategies.builds` will check the signature of the ``target`` (using :func:`~python:inspect.getfullargspec`). If there are required arguments with type annotations and no strategy was passed to :func:`~hypothesis.strategies.builds`, :func:`~hypothesis.strategies.from_type` is used to fill them in. You can also pass the special value :const:`hypothesis.infer` as a keyword argument, to force this inference for arguments with a default value. .. code-block:: pycon >>> def func(a: int, b: str): ... return [a, b] >>> builds(func).example() [-6993, ''] .. data:: hypothesis.infer :func:`@given ` does not perform any implicit inference for required arguments, as this would break compatibility with pytest fixtures. :const:`~hypothesis.infer` can be used as a keyword argument to explicitly fill in an argument from its type annotation. .. code:: python @given(a=infer) def test(a: int): pass # is equivalent to @given(a=integers()) def test(a): pass ~~~~~~~~~~~ Limitations ~~~~~~~~~~~ :pep:`3107` type annotations are not supported on Python 2, and Hypothesis does not inspect :pep:`484` type comments at runtime. While :func:`~hypothesis.strategies.from_type` will work as usual, inference in :func:`~hypothesis.strategies.builds` and :func:`@given ` will only work if you manually create the ``__annotations__`` attribute (e.g. by using ``@annotations(...)`` and ``@returns(...)`` decorators). The :mod:`python:typing` module is fully supported on Python 2 if you have the backport installed. The :mod:`python:typing` module is provisional and has a number of internal changes between Python 3.5.0 and 3.6.1, including at minor versions. These are all supported on a best-effort basis, but you may encounter problems with an old version of the module. Please report them to us, and consider updating to a newer version of Python as a workaround. .. _our-type-hints: ------------------------------ Type Annotations in Hypothesis ------------------------------ If you install Hypothesis and use :pypi:`mypy` 0.590+, or another :PEP:`561`-compatible tool, the type checker should automatically pick up our type hints. .. note:: Hypothesis' type hints may make breaking changes between minor releases. Upstream tools and conventions about type hints remain in flux - for example the :mod:`python:typing` module itself is provisional, and Mypy has not yet reached version 1.0 - and we plan to support the latest version of this ecosystem, as well as older versions where practical. We may also find more precise ways to describe the type of various interfaces, or change their type and runtime behaviour togther in a way which is otherwise backwards-compatible. We often omit type hints for deprecated features or arguments, as an additional form of warning. There are known issues inferring the type of examples generated by :func:`~hypothesis.strategies.deferred`, :func:`~hypothesis.strategies.recursive`, :func:`~hypothesis.strategies.one_of`, :func:`~hypothesis.strategies.dictionaries`, and :func:`~hypothesis.strategies.fixed_dictionaries`. We will fix these, and require correspondingly newer versions of Mypy for type hinting, as the ecosystem improves. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Writing downstream type hints ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Projects that :doc:`provide Hypothesis strategies ` and use type hints may wish to annotate their strategies too. This *is* a supported use-case, again on a best-effort provisional basis. For example: .. code:: python def foo_strategy() -> SearchStrategy[Foo]: ... .. class:: hypothesis.strategies.SearchStrategy :class:`~hypothesis.strategies.SearchStrategy` is the type of all strategy objects. It is a generic type, and covariant in the type of the examples it creates. For example: - ``integers()`` is of type ``SearchStrategy[int]``. - ``lists(integers())`` is of type ``SearchStrategy[List[int]]``. - ``SearchStrategy[Dog]`` is a subtype of ``SearchStrategy[Animal]`` if ``Dog`` is a subtype of ``Animal`` (as seems likely). .. warning:: :class:`~hypothesis.strategies.SearchStrategy` **should only be used in type hints.** Please do not inherit from, compare to, or otherwise use it in any way outside of type hints. The only supported way to construct objects of this type is to use the functions provided by the :mod:`hypothesis.strategies` module! .. _pytest-plugin: ---------------------------- The Hypothesis pytest Plugin ---------------------------- Hypothesis includes a tiny plugin to improve integration with :pypi:`pytest`, which is activated by default (but does not affect other test runners). It aims to improve the integration between Hypothesis and Pytest by providing extra information and convenient access to config options. - ``pytest --hypothesis-show-statistics`` can be used to :ref:`display test and data generation statistics `. - ``pytest --hypothesis-profile=`` can be used to :ref:`load a settings profile `. ``pytest --hypothesis-verbosity=`` can be used to :ref:`override the current verbosity level `. - ``pytest --hypothesis-seed=`` can be used to :ref:`reproduce a failure with a particular seed `. Finally, all tests that are defined with Hypothesis automatically have ``@pytest.mark.hypothesis`` applied to them. See :ref:`here for information on working with markers `. .. note:: Pytest will load the plugin automatically if Hypothesis is installed. You don't need to do anything at all to use it. hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/development.rst000066400000000000000000000041531354103617500272530ustar00rootroot00000000000000============================== Ongoing Hypothesis Development ============================== Hypothesis development is managed by me, `David R. MacIver `_. I am the primary author of Hypothesis. *However*, I no longer do unpaid feature development on Hypothesis. My roles as leader of the project are: 1. Helping other people do feature development on Hypothesis 2. Fixing bugs and other code health issues 3. Improving documentation 4. General release management work 5. Planning the general roadmap of the project 6. Doing sponsored development on tasks that are too large or in depth for other people to take on So all new features must either be sponsored or implemented by someone else. That being said, the maintenance team takes an active role in shepherding pull requests and helping people write a new feature (see :gh-file:`CONTRIBUTING.rst` for details and :pull:`154` for an example of how the process goes). This isn't "patches welcome", it's "we will help you write a patch". .. _release-policy: Release Policy ============== Hypothesis releases follow `semantic versioning `_. We maintain backwards-compatibility wherever possible, and use deprecation warnings to mark features that have been superseded by a newer alternative. If you want to detect this, you can :mod:`upgrade warnings to errors in the usual ways `. We use continuous deployment to ensure that you can always use our newest and shiniest features - every change to the source tree is automatically built and published on PyPI as soon as it's merged onto master, after code review and passing our extensive test suite. Project Roadmap =============== Hypothesis does not have a long-term release plan. However some visibility into our plans for future :doc:`compatibility ` may be useful: - We value compatibility, and maintain it as far as practical. This generally excludes things which are end-of-life upstream, or have an unstable API. - We would like to drop Python 2 support when it reaches end of life in 2020. Ongoing support is likely to depend on commercial funding. hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/django.rst000066400000000000000000000176731354103617500262060ustar00rootroot00000000000000.. _hypothesis-django: =========================== Hypothesis for Django users =========================== Hypothesis offers a number of features specific for Django testing, available in the ``hypothesis[django]`` :doc:`extra `. This is tested against each supported series with mainstream or extended support - if you're still getting security patches, you can test with Hypothesis. .. class:: hypothesis.extra.django.TestCase Using it is quite straightforward: All you need to do is subclass :class:`hypothesis.extra.django.TestCase` or :class:`hypothesis.extra.django.TransactionTestCase` and you can use :func:`@given ` as normal, and the transactions will be per example rather than per test function as they would be if you used :func:`@given ` with a normal django test suite (this is important because your test function will be called multiple times and you don't want them to interfere with each other). Test cases on these classes that do not use :func:`@given ` will be run as normal. .. class:: hypothesis.extra.django.TransactionTestCase We recommend avoiding :class:`~hypothesis.extra.django.TransactionTestCase` unless you really have to run each test case in a database transaction. Because Hypothesis runs this in a loop, the performance problems it normally has are significantly exacerbated and your tests will be really slow. If you are using :class:`~hypothesis.extra.django.TransactionTestCase`, you may need to use ``@settings(suppress_health_check=[HealthCheck.too_slow])`` to avoid :doc:`errors due to slow example generation `. Having set up a test class, you can now pass :func:`@given ` a strategy for Django models: .. autofunction:: hypothesis.extra.django.from_model For example, using `the trivial django project we have for testing `_: .. code-block:: python >>> from hypothesis.extra.django import from_model >>> from toystore.models import Customer >>> c = from_model(Customer).example() >>> c >>> c.email 'jaime.urbina@gmail.com' >>> c.name '\U00109d3d\U000e07be\U000165f8\U0003fabf\U000c12cd\U000f1910\U00059f12\U000519b0\U0003fabf\U000f1910\U000423fb\U000423fb\U00059f12\U000e07be\U000c12cd\U000e07be\U000519b0\U000165f8\U0003fabf\U0007bc31' >>> c.age -873375803 Hypothesis has just created this with whatever the relevant type of data is. Obviously the customer's age is implausible, which is only possible because we have not used (eg) :class:`~django:django.core.validators.MinValueValidator` to set the valid range for this field (or used a :class:`~django:django.db.models.PositiveSmallIntegerField`, which would only need a maximum value validator). If you *do* have validators attached, Hypothesis will only generate examples that pass validation. Sometimes that will mean that we fail a :class:`~hypothesis.HealthCheck` because of the filtering, so let's explicitly pass a strategy to skip validation at the strategy level: .. note:: Inference from validators will be much more powerful when :issue:`1116` is implemented, but there will always be some edge cases that require you to pass an explicit strategy. .. code-block:: python >>> from hypothesis.strategies import integers >>> c = from_model(Customer, age=integers(min_value=0, max_value=120)).example() >>> c >>> c.age 5 .. autofunction:: hypothesis.extra.django.from_form --------------- Tips and tricks --------------- Custom field types ================== If you have a custom Django field type you can register it with Hypothesis's model deriving functionality by registering a default strategy for it: .. code-block:: python >>> from toystore.models import CustomishField, Customish >>> from_model(Customish).example() hypothesis.errors.InvalidArgument: Missing arguments for mandatory field customish for model Customish >>> from hypothesis.extra.django import register_field_strategy >>> from hypothesis.strategies import just >>> register_field_strategy(CustomishField, just("hi")) >>> x = from_model(Customish).example() >>> x.customish 'hi' Note that this mapping is on exact type. Subtypes will not inherit it. .. autofunction:: hypothesis.extra.django.register_field_strategy Generating child models ======================= For the moment there's no explicit support in hypothesis-django for generating dependent models. i.e. a Company model will generate no Shops. However if you want to generate some dependent models as well, you can emulate this by using the *flatmap* function as follows: .. code:: python from hypothesis.strategies import lists, just def generate_with_shops(company): return lists(from_model(Shop, company=just(company))).map(lambda _: company) company_with_shops_strategy = from_model(Company).flatmap(generate_with_shops) Lets unpack what this is doing: The way flatmap works is that we draw a value from the original strategy, then apply a function to it which gives us a new strategy. We then draw a value from *that* strategy. So in this case we're first drawing a company, and then we're drawing a list of shops belonging to that company: The *just* strategy is a strategy such that drawing it always produces the individual value, so ``from_model(Shop, company=just(company))`` is a strategy that generates a Shop belonging to the original company. So the following code would give us a list of shops all belonging to the same company: .. code:: python from_model(Company).flatmap(lambda c: lists(from_model(Shop, company=just(c)))) The only difference from this and the above is that we want the company, not the shops. This is where the inner map comes in. We build the list of shops and then throw it away, instead returning the company we started for. This works because the models that Hypothesis generates are saved in the database, so we're essentially running the inner strategy purely for the side effect of creating those children in the database. .. _django-generating-primary-key: Generating primary key values ============================= If your model includes a custom primary key that you want to generate using a strategy (rather than a default auto-increment primary key) then Hypothesis has to deal with the possibility of a duplicate primary key. If a model strategy generates a value for the primary key field, Hypothesis will create the model instance with :meth:`~django:django.db.models.query.QuerySet.update_or_create`, overwriting any existing instance in the database for this test case with the same primary key. On the subject of ``MultiValueField`` ===================================== Django forms feature the :class:`~django:django.forms.MultiValueField` which allows for several fields to be combined under a single named field, the default example of this is the :class:`~django:django.forms.SplitDateTimeField`. .. code:: python class CustomerForm(forms.Form): name = forms.CharField() birth_date_time = forms.SplitDateTimeField() ``from_form`` supports ``MultiValueField`` subclasses directly, however if you want to define your own strategy be forewarned that Django binds data for a ``MultiValueField`` in a peculiar way. Specifically each sub-field is expected to have its own entry in ``data`` addressed by the field name (e.g. ``birth_date_time``) and the index of the sub-field within the ``MultiValueField``, so form ``data`` for the example above might look like this: .. code:: python { 'name': 'Samuel John', 'birth_date_time_0': '2018-05-19', # the date, as the first sub-field 'birth_date_time_1': '15:18:00' # the time, as the second sub-field } Thus, if you want to define your own strategies for such a field you must address your sub-fields appropriately: .. code:: python from_form(CustomerForm, birth_date_time_0=just('2018-05-19')) hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/endorsements.rst000066400000000000000000000250421354103617500274370ustar00rootroot00000000000000============ Testimonials ============ This is a page for listing people who are using Hypothesis and how excited they are about that. If that's you and your name is not on the list, `this file is in Git `_ and I'd love it if you sent me a pull request to fix that. --------------------------------------------------------------------------------------- `Stripe `_ --------------------------------------------------------------------------------------- At Stripe we use Hypothesis to test every piece of our machine learning model training pipeline (powered by scikit). Before we migrated, our tests were filled with hand-crafted pandas Dataframes that weren't representative at all of our actual very complex data. Because we needed to craft examples for each test, we took the easy way out and lived with extremely low test coverage. Hypothesis changed all that. Once we had our strategies for generating Dataframes of features it became trivial to slightly customize each strategy for new tests. Our coverage is now close to 90%. Full-stop, property-based testing is profoundly more powerful - and has caught or prevented far more bugs - than our old style of example-based testing. --------------------------------------------------------------------------------------- Kristian Glass - Director of Technology at `LaterPay GmbH `_ --------------------------------------------------------------------------------------- Hypothesis has been brilliant for expanding the coverage of our test cases, and also for making them much easier to read and understand, so we're sure we're testing the things we want in the way we want. ----------------------------------------------- `Seth Morton `_ ----------------------------------------------- When I first heard about Hypothesis, I knew I had to include it in my two open-source Python libraries, `natsort `_ and `fastnumbers `_ . Quite frankly, I was a little appalled at the number of bugs and "holes" I found in the code. I can now say with confidence that my libraries are more robust to "the wild." In addition, Hypothesis gave me the confidence to expand these libraries to fully support Unicode input, which I never would have had the stomach for without such thorough testing capabilities. Thanks! ------------------------------------------- `Sixty North `_ ------------------------------------------- At Sixty North we use Hypothesis for testing `Segpy `_ an open source Python library for shifting data between Python data structures and SEG Y files which contain geophysical data from the seismic reflection surveys used in oil and gas exploration. This is our first experience of property-based testing – as opposed to example-based testing. Not only are our tests more powerful, they are also much better explanations of what we expect of the production code. In fact, the tests are much closer to being specifications. Hypothesis has located real defects in our code which went undetected by traditional test cases, simply because Hypothesis is more relentlessly devious about test case generation than us mere humans! We found Hypothesis particularly beneficial for Segpy because SEG Y is an antiquated format that uses legacy text encodings (EBCDIC) and even a legacy floating point format we implemented from scratch in Python. Hypothesis is sure to find a place in most of our future Python codebases and many existing ones too. ------------------------------------------- `mulkieran `_ ------------------------------------------- Just found out about this excellent QuickCheck for Python implementation and ran up a few tests for my `bytesize `_ package last night. Refuted a few hypotheses in the process. Looking forward to using it with a bunch of other projects as well. ----------------------------------------------- `Adam Johnson `_ ----------------------------------------------- I have written a small library to serialize ``dict``\s to MariaDB's dynamic columns binary format, `mariadb-dyncol `_. When I first developed it, I thought I had tested it really well - there were hundreds of test cases, some of them even taken from MariaDB's test suite itself. I was ready to release. Lucky for me, I tried Hypothesis with David at the PyCon UK sprints. Wow! It found bug after bug after bug. Even after a first release, I thought of a way to make the tests do more validation, which revealed a further round of bugs! Most impressively, Hypothesis found a complicated off-by-one error in a condition with 4095 versus 4096 bytes of data - something that I would never have found. Long live Hypothesis! (Or at least, property-based testing). ------------------------------------------- `Josh Bronson `_ ------------------------------------------- Adopting Hypothesis improved `bidict `_'s test coverage and significantly increased our ability to make changes to the code with confidence that correct behavior would be preserved. Thank you, David, for the great testing tool. -------------------------------------------- `Cory Benfield `_ -------------------------------------------- Hypothesis is the single most powerful tool in my toolbox for working with algorithmic code, or any software that produces predictable output from a wide range of sources. When using it with `Priority `_, Hypothesis consistently found errors in my assumptions and extremely subtle bugs that would have taken months of real-world use to locate. In some cases, Hypothesis found subtle deviations from the correct output of the algorithm that may never have been noticed at all. When it comes to validating the correctness of your tools, nothing comes close to the thoroughness and power of Hypothesis. ------------------------------------------ `Jon Moore `_ ------------------------------------------ One extremely satisfied user here. Hypothesis is a really solid implementation of property-based testing, adapted well to Python, and with good features such as failure-case shrinkers. I first used it on a project where we needed to verify that a vendor's Python and non-Python implementations of an algorithm matched, and it found about a dozen cases that previous example-based testing and code inspections had not. Since then I've been evangelizing for it at our firm. -------------------------------------------- `Russel Winder `_ -------------------------------------------- I am using Hypothesis as an integral part of my Python workshops. Testing is an integral part of Python programming and whilst unittest and, better, pytest can handle example-based testing, property-based testing is increasingly far more important than example-base testing, and Hypothesis fits the bill. --------------------------------------------- `Wellfire Interactive `_ --------------------------------------------- We've been using Hypothesis in a variety of client projects, from testing Django-related functionality to domain-specific calculations. It both speeds up and simplifies the testing process since there's so much less tedious and error-prone work to do in identifying edge cases. Test coverage is nice but test depth is even nicer, and it's much easier to get meaningful test depth using Hypothesis. -------------------------------------------------- `Cody Kochmann `_ -------------------------------------------------- Hypothesis is being used as the engine for random object generation with my open source function fuzzer `battle_tested `_ which maps all behaviors of a function allowing you to minimize the chance of unexpected crashes when running code in production. With how efficient Hypothesis is at generating the edge cases that cause unexpected behavior occur, `battle_tested `_ is able to map out the entire behavior of most functions in less than a few seconds. Hypothesis truly is a masterpiece. I can't thank you enough for building it. --------------------------------------------------- `Merchise Autrement `_ --------------------------------------------------- Just minutes after our first use of hypothesis `we uncovered a subtle bug`__ in one of our most used library. Since then, we have increasingly used hypothesis to improve the quality of our testing in libraries and applications as well. __ https://github.com/merchise/xoutil/commit/0a4a0f529812fed363efb653f3ade2d2bc203945 ---------------------------------------------- `Florian Kromer `_ ---------------------------------------------- At `Roboception GmbH `_ I use Hypothesis to implement fully automated stateless and stateful reliability tests for the `3D sensor rc_visard `_ and `robotic software components `_ . Thank you very much for creating the (probably) most powerful property-based testing framework. ------------------------------------------- `Reposit Power `_ ------------------------------------------- With a micro-service architecture, testing between services is made easy using Hypothesis in integration testing. Ensuring everything is running smoothly is vital to help maintain a secure network of Virtual Power Plants. It allows us to find potential bugs and edge cases with relative ease and minimal overhead. As our architecture relies on services communicating effectively, Hypothesis allows us to strictly test for the kind of data which moves around our services, particularly our backend Python applications. ------------------------------------------- `Your name goes here `_ ------------------------------------------- I know there are many more, because I keep finding out about new people I'd never even heard of using Hypothesis. If you're looking to way to give back to a tool you love, adding your name here only takes a moment and would really help a lot. As per instructions at the top, just send me a pull request and I'll add you to the list. hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/examples.rst000066400000000000000000000351411354103617500265500ustar00rootroot00000000000000================== Some more examples ================== This is a collection of examples of how to use Hypothesis in interesting ways. It's small for now but will grow over time. All of these examples are designed to be run under :pypi:`pytest`, and :pypi:`nose` should work too. ---------------------------------- How not to sort by a partial order ---------------------------------- The following is an example that's been extracted and simplified from a real bug that occurred in an earlier version of Hypothesis. The real bug was a lot harder to find. Suppose we've got the following type: .. code:: python class Node(object): def __init__(self, label, value): self.label = label self.value = tuple(value) def __repr__(self): return "Node(%r, %r)" % (self.label, self.value) def sorts_before(self, other): if len(self.value) >= len(other.value): return False return other.value[:len(self.value)] == self.value Each node is a label and a sequence of some data, and we have the relationship sorts_before meaning the data of the left is an initial segment of the right. So e.g. a node with value ``[1, 2]`` will sort before a node with value ``[1, 2, 3]``, but neither of ``[1, 2]`` nor ``[1, 3]`` will sort before the other. We have a list of nodes, and we want to topologically sort them with respect to this ordering. That is, we want to arrange the list so that if ``x.sorts_before(y)`` then x appears earlier in the list than y. We naively think that the easiest way to do this is to extend the partial order defined here to a total order by breaking ties arbitrarily and then using a normal sorting algorithm. So we define the following code: .. code:: python from functools import total_ordering @total_ordering class TopoKey(object): def __init__(self, node): self.value = node def __lt__(self, other): if self.value.sorts_before(other.value): return True if other.value.sorts_before(self.value): return False return self.value.label < other.value.label def sort_nodes(xs): xs.sort(key=TopoKey) This takes the order defined by ``sorts_before`` and extends it by breaking ties by comparing the node labels. But now we want to test that it works. First we write a function to verify that our desired outcome holds: .. code:: python def is_prefix_sorted(xs): for i in range(len(xs)): for j in range(i+1, len(xs)): if xs[j].sorts_before(xs[i]): return False return True This will return false if it ever finds a pair in the wrong order and return true otherwise. Given this function, what we want to do with Hypothesis is assert that for all sequences of nodes, the result of calling ``sort_nodes`` on it is sorted. First we need to define a strategy for Node: .. code:: python from hypothesis import settings, strategies import hypothesis.strategies as s NodeStrategy = s.builds( Node, s.integers(), s.lists(s.booleans(), max_size=10)) We want to generate *short* lists of values so that there's a decent chance of one being a prefix of the other (this is also why the choice of bool as the elements). We then define a strategy which builds a node out of an integer and one of those short lists of booleans. We can now write a test: .. code:: python from hypothesis import given @given(s.lists(NodeStrategy)) def test_sorting_nodes_is_prefix_sorted(xs): sort_nodes(xs) assert is_prefix_sorted(xs) this immediately fails with the following example: .. code:: python [Node(0, (False, True)), Node(0, (True,)), Node(0, (False,))] The reason for this is that because False is not a prefix of (True, True) nor vice versa, sorting things the first two nodes are equal because they have equal labels. This makes the whole order non-transitive and produces basically nonsense results. But this is pretty unsatisfying. It only works because they have the same label. Perhaps we actually wanted our labels to be unique. Lets change the test to do that. .. code:: python def deduplicate_nodes_by_label(nodes): table = {node.label: node for node in nodes} return list(table.values()) We define a function to deduplicate nodes by labels, and can now map that over a strategy for lists of nodes to give us a strategy for lists of nodes with unique labels: .. code:: python @given(s.lists(NodeStrategy).map(deduplicate_nodes_by_label)) def test_sorting_nodes_is_prefix_sorted(xs): sort_nodes(xs) assert is_prefix_sorted(xs) Hypothesis quickly gives us an example of this *still* being wrong: .. code:: python [Node(0, (False,)), Node(-1, (True,)), Node(-2, (False, False))]) Now this is a more interesting example. None of the nodes will sort equal. What is happening here is that the first node is strictly less than the last node because (False,) is a prefix of (False, False). This is in turn strictly less than the middle node because neither is a prefix of the other and -2 < -1. The middle node is then less than the first node because -1 < 0. So, convinced that our implementation is broken, we write a better one: .. code:: python def sort_nodes(xs): for i in hrange(1, len(xs)): j = i - 1 while j >= 0: if xs[j].sorts_before(xs[j+1]): break xs[j], xs[j+1] = xs[j+1], xs[j] j -= 1 This is just insertion sort slightly modified - we swap a node backwards until swapping it further would violate the order constraints. The reason this works is because our order is a partial order already (this wouldn't produce a valid result for a general topological sorting - you need the transitivity). We now run our test again and it passes, telling us that this time we've successfully managed to sort some nodes without getting it completely wrong. Go us. -------------------- Time zone arithmetic -------------------- This is an example of some tests for :pypi:`pytz` which check that various timezone conversions behave as you would expect them to. These tests should all pass, and are mostly a demonstration of some useful sorts of thing to test with Hypothesis, and how the :func:`~hypothesis.strategies.datetimes` strategy works. .. code-block:: pycon >>> from datetime import timedelta >>> from hypothesis.extra.pytz import timezones >>> from hypothesis.strategies import datetimes >>> # The datetimes strategy is naive by default, so tell it to use timezones >>> aware_datetimes = datetimes(timezones=timezones()) >>> @given(aware_datetimes, timezones(), timezones()) ... def test_convert_via_intermediary(dt, tz1, tz2): ... """Test that converting between timezones is not affected ... by a detour via another timezone. ... """ ... assert dt.astimezone(tz1).astimezone(tz2) == dt.astimezone(tz2) >>> @given(aware_datetimes, timezones()) ... def test_convert_to_and_fro(dt, tz2): ... """If we convert to a new timezone and back to the old one ... this should leave the result unchanged. ... """ ... tz1 = dt.tzinfo ... assert dt == dt.astimezone(tz2).astimezone(tz1) >>> @given(aware_datetimes, timezones()) ... def test_adding_an_hour_commutes(dt, tz): ... """When converting between timezones it shouldn't matter ... if we add an hour here or add an hour there. ... """ ... an_hour = timedelta(hours=1) ... assert (dt + an_hour).astimezone(tz) == dt.astimezone(tz) + an_hour >>> @given(aware_datetimes, timezones()) ... def test_adding_a_day_commutes(dt, tz): ... """When converting between timezones it shouldn't matter ... if we add a day here or add a day there. ... """ ... a_day = timedelta(days=1) ... assert (dt + a_day).astimezone(tz) == dt.astimezone(tz) + a_day >>> # And we can check that our tests pass >>> test_convert_via_intermediary() >>> test_convert_to_and_fro() >>> test_adding_an_hour_commutes() >>> test_adding_a_day_commutes() ------------------- Condorcet's Paradox ------------------- A classic paradox in voting theory, called Condorcet's paradox, is that majority preferences are not transitive. That is, there is a population and a set of three candidates A, B and C such that the majority of the population prefer A to B, B to C and C to A. Wouldn't it be neat if we could use Hypothesis to provide an example of this? Well as you can probably guess from the presence of this section, we can! The main trick is to decide how we want to represent the result of an election - for this example, we'll use a list of "votes", where each vote is a list of candidates in the voters preferred order. Without further ado, here is the code: .. code:: python from hypothesis import given, assume from hypothesis.strategies import lists, permutations from collections import Counter # We need at least three candidates and at least three voters to have a # paradox; anything less can only lead to victories or at worst ties. @given(lists(permutations(['A', 'B', 'C']), min_size=3)) def test_elections_are_transitive(election): all_candidates = {"A", "B", "C"} # First calculate the pairwise counts of how many prefer each candidate # to the other counts = Counter() for vote in election: for i in range(len(vote)): for j in range(i+1, len(vote)): counts[(vote[i], vote[j])] += 1 # Now look at which pairs of candidates one has a majority over the # other and store that. graph = {} for i in all_candidates: for j in all_candidates: if counts[(i, j)] > counts[(j, i)]: graph.setdefault(i, set()).add(j) # Now for each triple assert that it is transitive. for x in all_candidates: for y in graph.get(x, ()): for z in graph.get(y, ()): assert x not in graph.get(z, ()) The example Hypothesis gives me on my first run (your mileage may of course vary) is: .. code:: python [['A', 'B', 'C'], ['B', 'C', 'A'], ['C', 'A', 'B']] Which does indeed do the job: The majority (votes 0 and 1) prefer B to C, the majority (votes 0 and 2) prefer A to B and the majority (votes 1 and 2) prefer C to A. This is in fact basically the canonical example of the voting paradox. ------------------- Fuzzing an HTTP API ------------------- Hypothesis's support for testing HTTP services is somewhat nascent. There are plans for some fully featured things around this, but right now they're probably quite far down the line. But you can do a lot yourself without any explicit support! Here's a script I wrote to throw arbitrary data against the API for an entirely fictitious service called Waspfinder (this is only lightly obfuscated and you can easily figure out who I'm actually talking about, but I don't want you to run this code and hammer their API without their permission). All this does is use Hypothesis to generate arbitrary JSON data matching the format their API asks for and check for 500 errors. More advanced tests which then use the result and go on to do other things are definitely also possible. The :pypi:`swagger-conformance` package provides an excellent example of this! .. code:: python import unittest from hypothesis import given, assume, settings, strategies as st from collections import namedtuple import requests import os import random import time import math Goal = namedtuple("Goal", ("slug",)) # We just pass in our API credentials via environment variables. waspfinder_token = os.getenv('WASPFINDER_TOKEN') waspfinder_user = os.getenv('WASPFINDER_USER') assert waspfinder_token is not None assert waspfinder_user is not None GoalData = st.fixed_dictionaries({ 'title': st.text(), 'goal_type': st.sampled_from([ "hustler", "biker", "gainer", "fatloser", "inboxer", "drinker", "custom"]), 'goaldate': st.one_of(st.none(), st.floats()), 'goalval': st.one_of(st.none(), st.floats()), 'rate': st.one_of(st.none(), st.floats()), 'initval': st.floats(), 'panic': st.floats(), 'secret': st.booleans(), 'datapublic': st.booleans(), }) needs2 = ['goaldate', 'goalval', 'rate'] class WaspfinderTest(unittest.TestCase): @given(GoalData) def test_create_goal_dry_run(self, data): # We want slug to be unique for each run so that multiple test runs # don't interfere with each other. If for some reason some slugs trigger # an error and others don't we'll get a Flaky error, but that's OK. slug = hex(random.getrandbits(32))[2:] # Use assume to guide us through validation we know about, otherwise # we'll spend a lot of time generating boring examples. # Title must not be empty assume(data["title"]) # Exactly two of these values should be not None. The other will be # inferred by the API. assume(len([1 for k in needs2 if data[k] is not None]) == 2) for v in data.values(): if isinstance(v, float): assume(not math.isnan(v)) data["slug"] = slug # The API nicely supports a dry run option, which means we don't have # to worry about the user account being spammed with lots of fake goals # Otherwise we would have to make sure we cleaned up after ourselves # in this test. data["dryrun"] = True data["auth_token"] = waspfinder_token for d, v in data.items(): if v is None: data[d] = "null" else: data[d] = str(v) result = requests.post( "https://waspfinder.example.com/api/v1/users/" "%s/goals.json" % (waspfinder_user,), data=data) # Lets not hammer the API too badly. This will of course make the # tests even slower than they otherwise would have been, but that's # life. time.sleep(1.0) # For the moment all we're testing is that this doesn't generate an # internal error. If we didn't use the dry run option we could have # then tried doing more with the result, but this is a good start. self.assertNotEqual(result.status_code, 500) if __name__ == '__main__': unittest.main() hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/extras.rst000066400000000000000000000023511354103617500262350ustar00rootroot00000000000000====================== First-party extensions ====================== Hypothesis has minimal dependencies (just :pypi:`attrs`), to maximise compatibility and make installing Hypothesis as easy as possible. Our integrations with specific packages are therefore provided by ``extra`` modules that need their individual dependencies installed in order to work. You can install these dependencies using the setuptools extra feature as e.g. ``pip install hypothesis[django]``. This will check installation of compatible versions. You can also just install hypothesis into a project using them, ignore the version constraints, and hope for the best. In general "Which version is Hypothesis compatible with?" is a hard question to answer and even harder to regularly test. Hypothesis is always tested against the latest compatible version and each package will note the expected compatibility range. If you run into a bug with any of these please specify the dependency version. There are separate pages for :doc:`django` and :doc:`numpy`. .. automodule:: hypothesis.extra.dpcontracts :members: .. automodule:: hypothesis.extra.lark :members: .. automodule:: hypothesis.extra.pytz :members: .. automodule:: hypothesis.extra.dateutil :members: hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/healthchecks.rst000066400000000000000000000032611354103617500273560ustar00rootroot00000000000000============= Health checks ============= Hypothesis tries to detect common mistakes and things that will cause difficulty at run time in the form of a number of 'health checks'. These include detecting and warning about: * Strategies with very slow data generation * Strategies which filter out too much * Recursive strategies which branch too much * Tests that are unlikely to complete in a reasonable amount of time. If any of these scenarios are detected, Hypothesis will emit a warning about them. The general goal of these health checks is to warn you about things that you are doing that might appear to work but will either cause Hypothesis to not work correctly or to perform badly. To selectively disable health checks, use the :obj:`~hypothesis.settings.suppress_health_check` setting. The argument for this parameter is a list with elements drawn from any of the class-level attributes of the HealthCheck class. Using a value of ``HealthCheck.all()`` will disable all health checks. .. module:: hypothesis .. autoclass:: HealthCheck :undoc-members: :inherited-members: :exclude-members: all .. _deprecation-policy: ------------ Deprecations ------------ We also use a range of custom exception and warning types, so you can see exactly where an error came from - or turn only our warnings into errors. .. autoclass:: hypothesis.errors.HypothesisDeprecationWarning Deprecated features will be continue to emit warnings for at least six months, and then be removed in the following major release. Note however that not all warnings are subject to this grace period; sometimes we strengthen validation by adding a warning and these may become errors immediately at a major release. hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/index.rst000066400000000000000000000053511354103617500260410ustar00rootroot00000000000000====================== Welcome to Hypothesis! ====================== `Hypothesis `_ is a Python library for creating unit tests which are simpler to write and more powerful when run, finding edge cases in your code you wouldn't have thought to look for. It is stable, powerful and easy to add to any existing test suite. It works by letting you write tests that assert that something should be true for every case, not just the ones you happen to think of. Think of a normal unit test as being something like the following: 1. Set up some data. 2. Perform some operations on the data. 3. Assert something about the result. Hypothesis lets you write tests which instead look like this: 1. For all data matching some specification. 2. Perform some operations on the data. 3. Assert something about the result. This is often called property based testing, and was popularised by the Haskell library `Quickcheck `_. It works by generating arbitrary data matching your specification and checking that your guarantee still holds in that case. If it finds an example where it doesn't, it takes that example and cuts it down to size, simplifying it until it finds a much smaller example that still causes the problem. It then saves that example for later, so that once it has found a problem with your code it will not forget it in the future. Writing tests of this form usually consists of deciding on guarantees that your code should make - properties that should always hold true, regardless of what the world throws at you. Examples of such guarantees might be: * Your code shouldn't throw an exception, or should only throw a particular type of exception (this works particularly well if you have a lot of internal assertions). * If you delete an object, it is no longer visible. * If you serialize and then deserialize a value, then you get the same value back. Now you know the basics of what Hypothesis does, the rest of this documentation will take you through how and why. It's divided into a number of sections, which you can see in the sidebar (or the menu at the top if you're on mobile), but you probably want to begin with the :doc:`Quick start guide `, which will give you a worked example of how to use Hypothesis and a detailed outline of the things you need to know to begin testing your code with it, or check out some of the `introductory articles `_. .. toctree:: :maxdepth: 1 :hidden: quickstart details settings data extras django numpy healthchecks database stateful supported examples community manifesto endorsements usage strategies changes development support packaging reproducing hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/manifesto.rst000066400000000000000000000064351354103617500267230ustar00rootroot00000000000000========================= The Purpose of Hypothesis ========================= What is Hypothesis for? From the perspective of a user, the purpose of Hypothesis is to make it easier for you to write better tests. From my perspective as the author, that is of course also a purpose of Hypothesis, but (if you will permit me to indulge in a touch of megalomania for a moment), the larger purpose of Hypothesis is to drag the world kicking and screaming into a new and terrifying age of high quality software. Software is, as they say, eating the world. Software is also `terrible`_. It's buggy, insecure and generally poorly thought out. This combination is clearly a recipe for disaster. And the state of software testing is even worse. Although it's fairly uncontroversial at this point that you *should* be testing your code, can you really say with a straight face that most projects you've worked on are adequately tested? A lot of the problem here is that it's too hard to write good tests. Your tests encode exactly the same assumptions and fallacies that you had when you wrote the code, so they miss exactly the same bugs that you missed when you wrote the code. Meanwhile, there are all sorts of tools for making testing better that are basically unused. The original Quickcheck is from *1999* and the majority of developers have not even heard of it, let alone used it. There are a bunch of half-baked implementations for most languages, but very few of them are worth using. The goal of Hypothesis is to bring advanced testing techniques to the masses, and to provide an implementation that is so high quality that it is easier to use them than it is not to use them. Where I can, I will beg, borrow and steal every good idea I can find that someone has had to make software testing better. Where I can't, I will invent new ones. Quickcheck is the start, but I also plan to integrate ideas from fuzz testing (a planned future feature is to use coverage information to drive example selection, and the example saving database is already inspired by the workflows people use for fuzz testing), and am open to and actively seeking out other suggestions and ideas. The plan is to treat the social problem of people not using these ideas as a bug to which there is a technical solution: Does property-based testing not match your workflow? That's a bug, let's fix it by figuring out how to integrate Hypothesis into it. Too hard to generate custom data for your application? That's a bug. Let's fix it by figuring out how to make it easier, or how to take something you're already using to specify your data and derive a generator from that automatically. Find the explanations of these advanced ideas hopelessly obtuse and hard to follow? That's a bug. Let's provide you with an easy API that lets you test your code better without a PhD in software verification. Grand ambitions, I know, and I expect ultimately the reality will be somewhat less grand, but so far in about three months of development, Hypothesis has become the most solid implementation of Quickcheck ever seen in a mainstream language (as long as we don't count Scala as mainstream yet), and at the same time managed to significantly push forward the state of the art, so I think there's reason to be optimistic. .. _terrible: https://www.youtube.com/watch?v=csyL9EC0S0c hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/numpy.rst000066400000000000000000000035631354103617500261050ustar00rootroot00000000000000=================================== Hypothesis for the Scientific Stack =================================== .. _hypothesis-numpy: ----- numpy ----- Hypothesis offers a number of strategies for `NumPy `_ testing, available in the ``hypothesis[numpy]`` :doc:`extra `. It lives in the ``hypothesis.extra.numpy`` package. The centerpiece is the :func:`~hypothesis.extra.numpy.arrays` strategy, which generates arrays with any dtype, shape, and contents you can specify or give a strategy for. To make this as useful as possible, strategies are provided to generate array shapes and generate all kinds of fixed-size or compound dtypes. .. automodule:: hypothesis.extra.numpy :members: :exclude-members: ArrayStrategy, BroadcastShapeStrategy .. _hypothesis-pandas: ------ pandas ------ Hypothesis provides strategies for several of the core pandas data types: :class:`pandas.Index`, :class:`pandas.Series` and :class:`pandas.DataFrame`. The general approach taken by the pandas module is that there are multiple strategies for generating indexes, and all of the other strategies take the number of entries they contain from their index strategy (with sensible defaults). So e.g. a Series is specified by specifying its :class:`numpy.dtype` (and/or a strategy for generating elements for it). .. automodule:: hypothesis.extra.pandas :members: ~~~~~~~~~~~~~~~~~~ Supported Versions ~~~~~~~~~~~~~~~~~~ There is quite a lot of variation between pandas versions. We only commit to supporting the latest version of pandas, but older minor versions are supported on a "best effort" basis. Hypothesis is currently tested against and confirmed working with Pandas 0.19, 0.20, 0.21, 0.22, and 0.23. Releases that are not the latest patch release of their minor version are not tested or officially supported, but will probably also work unless you hit a pandas bug. hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/packaging.rst000066400000000000000000000056351354103617500266630ustar00rootroot00000000000000==================== Packaging Guidelines ==================== Downstream packagers often want to package Hypothesis. Here are some guidelines. The primary guideline is this: If you are not prepared to keep up with the Hypothesis release schedule, don't. You will annoy me and are doing your users a disservice. Hypothesis has a very frequent release schedule. It's rare that it goes a week without a release, and there are often multiple releases in a given week. If you *are* prepared to keep up with this schedule, you might find the rest of this document useful. ---------------- Release tarballs ---------------- These are available from :gh-link:`the GitHub releases page `. The tarballs on PyPI are intended for installation from a Python tool such as pip and should not be considered complete releases. Requests to include additional files in them will not be granted. Their absence is not a bug. ------------ Dependencies ------------ ~~~~~~~~~~~~~~~ Python versions ~~~~~~~~~~~~~~~ Hypothesis is designed to work with a range of Python versions. We always support `all versisions of CPython with upstream support `_, and plan to drop Python 2 at EOL in 2020. We also support the latest versions of PyPy for Python 3, and for Python 2 until the CPython 2 EOL. If you feel the need to have separate Python 3 and Python 2 packages you can, but Hypothesis works unmodified on either. ~~~~~~~~~~~~~~~~~~~~~~ Other Python libraries ~~~~~~~~~~~~~~~~~~~~~~ Hypothesis has *mandatory* dependencies on the following libraries: * :pypi:`attrs` * :pypi:`enum34` is required on Python 2.7 Hypothesis has *optional* dependencies on the following libraries: * :pypi:`pytz` (almost any version should work) * `Django `_, all supported versions * :pypi:`numpy`, 1.10 or later (earlier versions will probably work fine) * :pypi:`pandas`, 1.19 or later * :pypi:`pytest` (3.0 or greater). This is a mandatory dependency for testing Hypothesis itself but optional for users. The way this works when installing Hypothesis normally is that these features become available if the relevant library is installed. ------------------ Testing Hypothesis ------------------ If you want to test Hypothesis as part of your packaging you will probably not want to use the mechanisms Hypothesis itself uses for running its tests, because it has a lot of logic for installing and testing against different versions of Python. The tests must be run with pytest >= 3.0; check the :gh-file:`requirements/` directory for details. The organisation of the tests is described in the :gh-file:`hypothesis-python/tests/README.rst`. -------- Examples -------- * `arch linux `_ * `fedora `_ * `gentoo `_ hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/quickstart.rst000066400000000000000000000214741354103617500271300ustar00rootroot00000000000000================= Quick start guide ================= This document should talk you through everything you need to get started with Hypothesis. ---------- An example ---------- Suppose we've written a `run length encoding `_ system and we want to test it out. We have the following code which I took straight from the `Rosetta Code `_ wiki (OK, I removed some commented out code and fixed the formatting, but there are no functional modifications): .. code:: python def encode(input_string): count = 1 prev = '' lst = [] for character in input_string: if character != prev: if prev: entry = (prev, count) lst.append(entry) count = 1 prev = character else: count += 1 entry = (character, count) lst.append(entry) return lst def decode(lst): q = '' for character, count in lst: q += character * count return q We want to write a test for this that will check some invariant of these functions. The invariant one tends to try when you've got this sort of encoding / decoding is that if you encode something and then decode it then you get the same value back. Lets see how you'd do that with Hypothesis: .. code:: python from hypothesis import given from hypothesis.strategies import text @given(text()) def test_decode_inverts_encode(s): assert decode(encode(s)) == s (For this example we'll just let pytest discover and run the test. We'll cover other ways you could have run it later). The text function returns what Hypothesis calls a search strategy. An object with methods that describe how to generate and simplify certain kinds of values. The :func:`@given ` decorator then takes our test function and turns it into a parametrized one which, when called, will run the test function over a wide range of matching data from that strategy. Anyway, this test immediately finds a bug in the code: .. code:: Falsifying example: test_decode_inverts_encode(s='') UnboundLocalError: local variable 'character' referenced before assignment Hypothesis correctly points out that this code is simply wrong if called on an empty string. If we fix that by just adding the following code to the beginning of the function then Hypothesis tells us the code is correct (by doing nothing as you'd expect a passing test to). .. code:: python if not input_string: return [] If we wanted to make sure this example was always checked we could add it in explicitly by using the :func:`@example ` decorator: .. code:: python from hypothesis import given, example from hypothesis.strategies import text @given(text()) @example('') def test_decode_inverts_encode(s): assert decode(encode(s)) == s This can be useful to show other developers (or your future self) what kinds of data are valid inputs, or to ensure that particular edge cases such as ``""`` are tested every time. It's also great for regression tests because although Hypothesis will :doc:`remember failing examples `, we don't recommend distributing that database. It's also worth noting that both example and given support keyword arguments as well as positional. The following would have worked just as well: .. code:: python @given(s=text()) @example(s='') def test_decode_inverts_encode(s): assert decode(encode(s)) == s Suppose we had a more interesting bug and forgot to reset the count each time. Say we missed a line in our ``encode`` method: .. code:: python def encode(input_string): count = 1 prev = '' lst = [] for character in input_string: if character != prev: if prev: entry = (prev, count) lst.append(entry) # count = 1 # Missing reset operation prev = character else: count += 1 entry = (character, count) lst.append(entry) return lst Hypothesis quickly informs us of the following example: .. code:: Falsifying example: test_decode_inverts_encode(s='001') Note that the example provided is really quite simple. Hypothesis doesn't just find *any* counter-example to your tests, it knows how to simplify the examples it finds to produce small easy to understand ones. In this case, two identical values are enough to set the count to a number different from one, followed by another distinct value which should have reset the count but in this case didn't. The examples Hypothesis provides are valid Python code you can run. Any arguments that you explicitly provide when calling the function are not generated by Hypothesis, and if you explicitly provide *all* the arguments Hypothesis will just call the underlying function once rather than running it multiple times. ---------- Installing ---------- Hypothesis is :pypi:`available on PyPI as "hypothesis" `. You can install it with: .. code:: bash pip install hypothesis You can install the dependencies for :doc:`optional extensions ` with e.g. ``pip install hypothesis[pandas,django]``. If you want to install directly from the source code (e.g. because you want to make changes and install the changed version), check out the instructions in :gh-file:`CONTRIBUTING.rst`. ------------- Running tests ------------- In our example above we just let pytest discover and run our tests, but we could also have run it explicitly ourselves: .. code:: python if __name__ == '__main__': test_decode_inverts_encode() We could also have done this as a :class:`python:unittest.TestCase`: .. code:: python import unittest class TestEncoding(unittest.TestCase): @given(text()) def test_decode_inverts_encode(self, s): self.assertEqual(decode(encode(s)), s) if __name__ == '__main__': unittest.main() A detail: This works because Hypothesis ignores any arguments it hasn't been told to provide (positional arguments start from the right), so the self argument to the test is simply ignored and works as normal. This also means that Hypothesis will play nicely with other ways of parameterizing tests. e.g it works fine if you use pytest fixtures for some arguments and Hypothesis for others. ------------- Writing tests ------------- A test in Hypothesis consists of two parts: A function that looks like a normal test in your test framework of choice but with some additional arguments, and a :func:`@given ` decorator that specifies how to provide those arguments. Here are some other examples of how you could use that: .. code:: python from hypothesis import given import hypothesis.strategies as st @given(st.integers(), st.integers()) def test_ints_are_commutative(x, y): assert x + y == y + x @given(x=st.integers(), y=st.integers()) def test_ints_cancel(x, y): assert (x + y) - y == x @given(st.lists(st.integers())) def test_reversing_twice_gives_same_list(xs): # This will generate lists of arbitrary length (usually between 0 and # 100 elements) whose elements are integers. ys = list(xs) ys.reverse() ys.reverse() assert xs == ys @given(st.tuples(st.booleans(), st.text())) def test_look_tuples_work_too(t): # A tuple is generated as the one you provided, with the corresponding # types in those positions. assert len(t) == 2 assert isinstance(t[0], bool) assert isinstance(t[1], str) Note that as we saw in the above example you can pass arguments to :func:`@given ` either as positional or as keywords. -------------- Where to start -------------- You should now know enough of the basics to write some tests for your code using Hypothesis. The best way to learn is by doing, so go have a try. If you're stuck for ideas for how to use this sort of test for your code, here are some good starting points: 1. Try just calling functions with appropriate arbitrary data and see if they crash. You may be surprised how often this works. e.g. note that the first bug we found in the encoding example didn't even get as far as our assertion: It crashed because it couldn't handle the data we gave it, not because it did the wrong thing. 2. Look for duplication in your tests. Are there any cases where you're testing the same thing with multiple different examples? Can you generalise that to a single test using Hypothesis? 3. `This piece is designed for an F# implementation `_, but is still very good advice which you may find helps give you good ideas for using Hypothesis. If you have any trouble getting started, don't feel shy about :doc:`asking for help `. hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/reproducing.rst000066400000000000000000000125461354103617500272570ustar00rootroot00000000000000==================== Reproducing Failures ==================== One of the things that is often concerning for people using randomized testing is the question of how to reproduce failing test cases. .. note:: It is better to think about the data Hypothesis generates as being *arbitrary*, rather than *random*. We deliberately generate any valid data that seems likely to cause errors, so you shouldn't rely on any expected distribution of or relationships between generated data. You can read about "swarm testing" and "coverage guided fuzzing" if you're interested, because you don't need to know for Hypothesis! Fortunately Hypothesis has a number of features to support reproducing test failures. The one you will use most commonly when developing locally is :doc:`the example database `, which means that you shouldn't have to think about the problem at all for local use - test failures will just automatically reproduce without you having to do anything. The example database is perfectly suitable for sharing between machines, but there currently aren't very good work flows for that, so Hypothesis provides a number of ways to make examples reproducible by adding them to the source code of your tests. This is particularly useful when e.g. you are trying to run an example that has failed on your CI, or otherwise share them between machines. .. _providing-explicit-examples: --------------------------- Providing explicit examples --------------------------- The simplest way to reproduce a failed test is to ask Hypothesis to run the failing example it printed. For example, if ``Falsifying example: test(n=1)`` was printed you can decorate ``test`` with ``@example(n=1)``. ``@example`` can also be used to ensure a specific example is *always* executed as a regression test or to cover some edge case - basically combining a Hypothesis test and a traditional parametrized test. .. autofunction:: hypothesis.example Hypothesis will run all examples you've asked for first. If any of them fail it will not go on to look for more examples. It doesn't matter whether you put the example decorator before or after given. Any permutation of the decorators in the above will do the same thing. Note that examples can be positional or keyword based. If they're positional then they will be filled in from the right when calling, so either of the following styles will work as expected: .. code:: python @given(text()) @example("Hello world") @example(x="Some very long string") def test_some_code(x): assert True from unittest import TestCase class TestThings(TestCase): @given(text()) @example("Hello world") @example(x="Some very long string") def test_some_code(self, x): assert True As with ``@given``, it is not permitted for a single example to be a mix of positional and keyword arguments. Either are fine, and you can use one in one example and the other in another example if for some reason you really want to, but a single example must be consistent. .. _reproducing-with-seed: ------------------------------------- Reproducing a test run with ``@seed`` ------------------------------------- .. autofunction:: hypothesis.seed When a test fails unexpectedly, usually due to a health check failure, Hypothesis will print out a seed that led to that failure, if the test is not already running with a fixed seed. You can then recreate that failure using either the ``@seed`` decorator or (if you are running :pypi:`pytest`) with ``--hypothesis-seed``. The seed will not be printed if you could simply use ``@example`` instead. .. _reproduce_failure: ------------------------------------------------------- Reproducing an example with ``@reproduce_failure`` ------------------------------------------------------- Hypothesis has an opaque binary representation that it uses for all examples it generates. This representation is not intended to be stable across versions or with respect to changes in the test, but can be used to to reproduce failures with the ``@reproduce_example`` decorator. .. autofunction:: hypothesis.reproduce_failure The intent is that you should never write this decorator by hand, but it is instead provided by Hypothesis. When a test fails with a falsifying example, Hypothesis may print out a suggestion to use ``@reproduce_failure`` on the test to recreate the problem as follows: .. code-block:: pycon >>> from hypothesis import settings, given, PrintSettings >>> import hypothesis.strategies as st >>> @given(st.floats()) ... @settings(print_blob=True) ... def test(f): ... assert f == f ... >>> try: ... test() ... except AssertionError: ... pass Falsifying example: test(f=nan) You can reproduce this example by temporarily adding @reproduce_failure(..., b'AAAA//AAAAAAAAEA') as a decorator on your test case Adding the suggested decorator to the test should reproduce the failure (as long as everything else is the same - changing the versions of Python or anything else involved, might of course affect the behaviour of the test! Note that changing the version of Hypothesis will result in a different error - each ``@reproduce_failure`` invocation is specific to a Hypothesis version). By default these messages are not printed. If you want to see these you must set the :attr:`~hypothesis.settings.print_blob` setting to ``True``. hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/settings.rst000066400000000000000000000155711354103617500265770ustar00rootroot00000000000000======== Settings ======== Hypothesis tries to have good defaults for its behaviour, but sometimes that's not enough and you need to tweak it. The mechanism for doing this is the :class:`~hypothesis.settings` object. You can set up a :func:`@given ` based test to use this using a settings decorator: :func:`@given ` invocation is as follows: .. code:: python from hypothesis import given, settings @given(integers()) @settings(max_examples=500) def test_this_thoroughly(x): pass This uses a :class:`~hypothesis.settings` object which causes the test to receive a much larger set of examples than normal. This may be applied either before or after the given and the results are the same. The following is exactly equivalent: .. code:: python from hypothesis import given, settings @settings(max_examples=500) @given(integers()) def test_this_thoroughly(x): pass ------------------ Available settings ------------------ .. autoclass:: hypothesis.settings :members: :exclude-members: register_profile, get_profile, load_profile, buffer_size .. _phases: ~~~~~~~~~~~~~~~~~~~~~ Controlling What Runs ~~~~~~~~~~~~~~~~~~~~~ Hypothesis divides tests into four logically distinct phases: 1. Running explicit examples :ref:`provided with the @example decorator `. 2. Rerunning a selection of previously failing examples to reproduce a previously seen error 3. Generating new examples. 4. Attempting to shrink an example found in phases 2 or 3 to a more manageable one (explicit examples cannot be shrunk). The phases setting provides you with fine grained control over which of these run, with each phase corresponding to a value on the :class:`~hypothesis.Phase` enum: .. class:: hypothesis.Phase 1. ``Phase.explicit`` controls whether explicit examples are run. 2. ``Phase.reuse`` controls whether previous examples will be reused. 3. ``Phase.generate`` controls whether new examples will be generated. 4. ``Phase.shrink`` controls whether examples will be shrunk. The phases argument accepts a collection with any subset of these. e.g. ``settings(phases=[Phase.generate, Phase.shrink])`` will generate new examples and shrink them, but will not run explicit examples or reuse previous failures, while ``settings(phases=[Phase.explicit])`` will only run the explicit examples. .. _verbose-output: ~~~~~~~~~~~~~~~~~~~~~~~~~~ Seeing intermediate result ~~~~~~~~~~~~~~~~~~~~~~~~~~ To see what's going on while Hypothesis runs your tests, you can turn up the verbosity setting. .. code-block:: pycon >>> from hypothesis import find, settings, Verbosity >>> from hypothesis.strategies import lists, integers >>> @given(lists(integers()) ... @settings(verbosity=Verbosity.verbose)) ... def f(x): assert not any(x) ... f() Trying example: [] Falsifying example: [-1198601713, -67, 116, -29578] Shrunk example to [-1198601713] Shrunk example to [-1198601600] Shrunk example to [-1191228800] Shrunk example to [-8421504] Shrunk example to [-32896] Shrunk example to [-128] Shrunk example to [64] Shrunk example to [32] Shrunk example to [16] Shrunk example to [8] Shrunk example to [4] Shrunk example to [3] Shrunk example to [2] Shrunk example to [1] [1] The four levels are quiet, normal, verbose and debug. normal is the default, while in quiet mode Hypothesis will not print anything out, not even the final falsifying example. debug is basically verbose but a bit more so. You probably don't want it. If you are using :pypi:`pytest`, you may also need to :doc:`disable output capturing for passing tests `. ------------------------- Building settings objects ------------------------- Settings can be created by calling :class:`~hypothesis.settings` with any of the available settings values. Any absent ones will be set to defaults: .. code-block:: pycon >>> from hypothesis import settings >>> settings().max_examples 100 >>> settings(max_examples=10).max_examples 10 You can also pass a 'parent' settings object as the first argument, and any settings you do not specify as keyword arguments will be copied from the parent settings: .. code-block:: pycon >>> parent = settings(max_examples=10) >>> child = settings(parent, deadline=None) >>> parent.max_examples == child.max_examples == 10 True >>> parent.deadline 200 >>> child.deadline is None True ---------------- Default settings ---------------- At any given point in your program there is a current default settings, available as ``settings.default``. As well as being a settings object in its own right, all newly created settings objects which are not explicitly based off another settings are based off the default, so will inherit any values that are not explicitly set from it. You can change the defaults by using profiles. .. _settings_profiles: ~~~~~~~~~~~~~~~~~ settings Profiles ~~~~~~~~~~~~~~~~~ Depending on your environment you may want different default settings. For example: during development you may want to lower the number of examples to speed up the tests. However, in a CI environment you may want more examples so you are more likely to find bugs. Hypothesis allows you to define different settings profiles. These profiles can be loaded at any time. .. autoclass:: hypothesis.settings :members: register_profile, get_profile, load_profile Loading a profile changes the default settings but will not change the behavior of tests that explicitly change the settings. .. code-block:: pycon >>> from hypothesis import settings >>> settings.register_profile("ci", max_examples=1000) >>> settings().max_examples 100 >>> settings.load_profile("ci") >>> settings().max_examples 1000 Instead of loading the profile and overriding the defaults you can retrieve profiles for specific tests. .. code-block:: pycon >>> settings.get_profile("ci").max_examples 1000 Optionally, you may define the environment variable to load a profile for you. This is the suggested pattern for running your tests on CI. The code below should run in a `conftest.py` or any setup/initialization section of your test suite. If this variable is not defined the Hypothesis defined defaults will be loaded. .. code-block:: pycon >>> import os >>> from hypothesis import settings, Verbosity >>> settings.register_profile("ci", max_examples=1000) >>> settings.register_profile("dev", max_examples=10) >>> settings.register_profile("debug", max_examples=10, verbosity=Verbosity.verbose) >>> settings.load_profile(os.getenv(u'HYPOTHESIS_PROFILE', 'default')) If you are using the hypothesis pytest plugin and your profiles are registered by your conftest you can load one with the command line option ``--hypothesis-profile``. .. code:: bash $ pytest tests --hypothesis-profile hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/stateful.rst000066400000000000000000000315051354103617500265610ustar00rootroot00000000000000================ Stateful testing ================ With :func:`@given `, your tests are still something that you mostly write yourself, with Hypothesis providing some data. With Hypothesis's *stateful testing*, Hypothesis instead tries to generate not just data but entire tests. You specify a number of primitive actions that can be combined together, and then Hypothesis will try to find sequences of those actions that result in a failure. .. note:: This style of testing is often called *model-based testing*, but in Hypothesis is called *stateful testing* (mostly for historical reasons - the original implementation of this idea in Hypothesis was more closely based on `ScalaCheck's stateful testing `_ where the name is more apt). Both of these names are somewhat misleading: You don't really need any sort of formal model of your code to use this, and it can be just as useful for pure APIs that don't involve any state as it is for stateful ones. It's perhaps best to not take the name of this sort of testing too seriously. Regardless of what you call it, it is a powerful form of testing which is useful for most non-trivial APIs. Hypothesis has two stateful testing APIs: A high level one, providing what we call *rule based state machines*, and a low level one, providing what we call *generic state machines*. You probably want to use the rule based state machines - they provide a high level API for describing the sort of actions you want to perform, based on a structured representation of actions. However the generic state machines are more flexible, and are particularly useful if you want the set of currently possible actions to depend primarily on external state. .. _data-as-state-machine: ------------------------------- You may not need state machines ------------------------------- The basic idea of stateful testing is to make Hypothesis choose actions as well as values for your test, and state machines are a great declarative way to do just that. For simpler cases though, you might not need them at all - a standard test with :func:`@given ` might be enough, since you can use :func:`~hypothesis.strategies.data` in branches or loops. In fact, that's how the state machine explorer works internally. For more complex workloads though, where a higher level API comes into it's own, keep reading! .. _rulebasedstateful: ------------------------- Rule based state machines ------------------------- Rule based state machines are the ones you're most likely to want to use. They're significantly more user friendly and should be good enough for most things you'd want to do. .. autoclass:: hypothesis.stateful.RuleBasedStateMachine A rule is very similar to a normal ``@given`` based test in that it takes values drawn from strategies and passes them to a user defined test function. The key difference is that where ``@given`` based tests must be independent, rules can be chained together - a single test run may involve multiple rule invocations, which may interact in various ways. Rules can take normal strategies as arguments, or a specific kind of strategy called a Bundle. A Bundle is a named collection of generated values that can be reused by other operations in the test. They are populated with the results of rules, and may be used as arguments to rules, allowing data to flow from one rule to another, and rules to work on the results of previous computations or actions. You can think of each value that gets added to any Bundle as being assigned to a new variable. Drawing a value from the bundle strategy means choosing one of the corresponding variables and using that value, and :func:`~hypothesis.stateful.consumes` as a ``del`` statement for that variable. If you can replace use of Bundles with instance attributes of the class that is often simpler, but often Bundles are strictly more powerful. The following rule based state machine example is a simplified version of a test for Hypothesis's example database implementation. An example database maps keys to sets of values, and in this test we compare one implementation of it to a simplified in memory model of its behaviour, which just stores the same values in a Python ``dict``. The test then runs operations against both the real database and the in-memory representation of it and looks for discrepancies in their behaviour. .. code:: python import shutil import tempfile from collections import defaultdict import hypothesis.strategies as st from hypothesis.database import DirectoryBasedExampleDatabase from hypothesis.stateful import Bundle, RuleBasedStateMachine, rule class DatabaseComparison(RuleBasedStateMachine): def __init__(self): super(DatabaseComparison, self).__init__() self.tempd = tempfile.mkdtemp() self.database = DirectoryBasedExampleDatabase(self.tempd) self.model = defaultdict(set) keys = Bundle('keys') values = Bundle('values') @rule(target=keys, k=st.binary()) def add_key(self, k): return k @rule(target=values, v=st.binary()) def add_value(self, v): return v @rule(k=keys, v=values) def save(self, k, v): self.model[k].add(v) self.database.save(k, v) @rule(k=keys, v=values) def delete(self, k, v): self.model[k].discard(v) self.database.delete(k, v) @rule(k=keys) def values_agree(self, k): assert set(self.database.fetch(k)) == self.model[k] def teardown(self): shutil.rmtree(self.tempd) TestDBComparison = DatabaseComparison.TestCase In this we declare two bundles - one for keys, and one for values. We have two trivial rules which just populate them with data (``k`` and ``v``), and three non-trivial rules: ``save`` saves a value under a key and ``delete`` removes a value from a key, in both cases also updating the model of what *should* be in the database. ``values_agree`` then checks that the contents of the database agrees with the model for a particular key. We can then integrate this into our test suite by getting a unittest TestCase from it: .. code:: python TestTrees = DatabaseComparison.TestCase # Or just run with pytest's unittest support if __name__ == '__main__': unittest.main() This test currently passes, but if we comment out the line where we call ``self.model[k].discard(v)``, we would see the following output when run under pytest: :: AssertionError: assert set() == {b''} ------------ Hypothesis ------------ state = DatabaseComparison() var1 = state.add_key(k=b'') var2 = state.add_value(v=var1) state.save(k=var1, v=var2) state.delete(k=var1, v=var2) state.values_agree(k=var1) state.teardown() Note how it's printed out a very short program that will demonstrate the problem. The output from a rule based state machine should generally be pretty close to Python code - if you have custom ``repr`` implementations that don't return valid Python then it might not be, but most of the time you should just be able to copy and paste the code into a test to reproduce it. You can control the detailed behaviour with a settings object on the TestCase (this is a normal hypothesis settings object using the defaults at the time the TestCase class was first referenced). For example if you wanted to run fewer examples with larger programs you could change the settings to: .. code:: python DatabaseComparison.TestCase.settings = settings(max_examples=50, stateful_step_count=100) Which doubles the number of steps each program runs and halves the number of test cases that will be run. ----- Rules ----- As said earlier, rules are the most common feature used in RuleBasedStateMachine. They are defined by applying the :func:`~hypothesis.stateful.rule` decorator on a function. Note that RuleBasedStateMachine must have at least one rule defined and that a single function cannot be used to define multiple rules (this to avoid having multiple rules doing the same things). Due to the stateful execution method, rules generally cannot take arguments from other sources such as fixtures or ``pytest.mark.parametrize`` - consider providing them via a strategy such as :func:`~hypothesis.strategies.sampled_from` instead. .. autofunction:: hypothesis.stateful.rule .. autofunction:: hypothesis.stateful.consumes .. autofunction:: hypothesis.stateful.multiple ----------- Initializes ----------- Initializes are a special case of rules that are guaranteed to be run at most once at the beginning of a run (i.e. before any normal rule is called). Note if multiple initialize rules are defined, they may be called in any order, and that order will vary from run to run. Initializes are typically useful to populate bundles: .. autofunction:: hypothesis.stateful.initialize .. code:: python import hypothesis.strategies as st from hypothesis.stateful import RuleBasedStateMachine, Bundle, rule, initialize name_strategy = st.text(min_size=1).filter(lambda x: "/" not in x) class NumberModifier(RuleBasedStateMachine): folders = Bundle('folders') files = Bundle('files') @initialize(target=folders) def init_folders(self): return '/' @rule(target=folders, name=name_strategy) def create_folder(self, parent, name): return '%s/%s' % (parent, name) @rule(target=files, name=name_strategy) def create_file(self, parent, name): return '%s/%s' % (parent, name) ------------- Preconditions ------------- While it's possible to use :func:`~hypothesis.assume` in RuleBasedStateMachine rules, if you use it in only a few rules you can quickly run into a situation where few or none of your rules pass their assumptions. Thus, Hypothesis provides a :func:`~hypothesis.stateful.precondition` decorator to avoid this problem. The :func:`~hypothesis.stateful.precondition` decorator is used on ``rule``-decorated functions, and must be given a function that returns True or False based on the RuleBasedStateMachine instance. .. autofunction:: hypothesis.stateful.precondition .. code:: python from hypothesis.stateful import RuleBasedStateMachine, rule, precondition class NumberModifier(RuleBasedStateMachine): num = 0 @rule() def add_one(self): self.num += 1 @precondition(lambda self: self.num != 0) @rule() def divide_with_one(self): self.num = 1 / self.num By using :func:`~hypothesis.stateful.precondition` here instead of :func:`~hypothesis.assume`, Hypothesis can filter the inapplicable rules before running them. This makes it much more likely that a useful sequence of steps will be generated. Note that currently preconditions can't access bundles; if you need to use preconditions, you should store relevant data on the instance instead. ---------- Invariants ---------- Often there are invariants that you want to ensure are met after every step in a process. It would be possible to add these as rules that are run, but they would be run zero or multiple times between other rules. Hypothesis provides a decorator that marks a function to be run after every step. .. autofunction:: hypothesis.stateful.invariant .. code:: python from hypothesis.stateful import RuleBasedStateMachine, rule, invariant class NumberModifier(RuleBasedStateMachine): num = 0 @rule() def add_two(self): self.num += 2 if self.num > 50: self.num += 1 @invariant() def divide_with_one(self): assert self.num % 2 == 0 NumberTest = NumberModifier.TestCase Invariants can also have :func:`~hypothesis.stateful.precondition`\ s applied to them, in which case they will only be run if the precondition function returns true. Note that currently invariants can't access bundles; if you need to use invariants, you should store relevant data on the instance instead. ---------------------- Generic state machines ---------------------- .. warning:: ``GenericStateMachine`` is deprecated and will be removed in a future version. .. autoclass:: hypothesis.stateful.GenericStateMachine ------------------------- More fine grained control ------------------------- If you want to bypass the TestCase infrastructure you can invoke these manually. The stateful module exposes the function ``run_state_machine_as_test``, which takes an arbitrary function returning a RuleBasedStateMachine and an optional settings parameter and does the same as the class based runTest provided. This is not recommended as it bypasses some important internal functions, including reporting of statistics such as runtimes and :func:`~hypothesis.event` calls. It was originally added to support custom ``__init__`` methods, but you can now use :func:`~hypothesis.stateful.initialize` rules instead. hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/strategies.rst000066400000000000000000000103541354103617500271030ustar00rootroot00000000000000============================= Projects extending Hypothesis ============================= Hypothesis has been eagerly used and extended by the open source community. This page lists extensions and applications; you can find more or newer packages by searching PyPI `by keyword `_ or `filter by classifier `_, or search `libraries.io `_. If there's something missing which you think should be here, let us know! .. note:: Being listed on this page does not imply that the Hypothesis maintainers endorse a package. ------------------- External Strategies ------------------- Some packages provide strategies directly: * :pypi:`hypothesis-fspaths` - strategy to generate filesystem paths. * :pypi:`hypothesis-geojson` - strategy to generate `GeoJson `_. * :pypi:`hs-dbus-signature` - strategy to generate arbitrary `D-Bus signatures `_. * :pypi:`hypothesis_sqlalchemy` - strategies to generate :pypi:`SQLAlchemy` objects. * :pypi:`hypothesis-ros` - strategies to generate messages and parameters for the `Robot Operating System `_. * :pypi:`hypothesis-csv` - strategy to generate CSV files. * :pypi:`hypothesis-networkx` - strategy to generate :pypi:`networkx` graphs. Others provide a function to infer a strategy from some other schema: * :pypi:`hypothesis-jsonschema` - infer strategies from `JSON schemas `_. * :pypi:`lollipop-hypothesis` - infer strategies from :pypi:`lollipop` schemas. * :pypi:`hypothesis-drf` - infer strategies from a :pypi:`djangorestframework` serialiser. * :pypi:`hypothesis-mongoengine` - infer strategies from a :pypi:`mongoengine` model. * :pypi:`hypothesis-pb` - infer strategies from `Protocol Buffer `_ schemas. ----------------- Other Cool Things ----------------- :pypi:`swagger-conformance` is powered by Hypothesis and :pypi:`pyswagger`. Based on a `Swagger specification `_, it can build and run an entire test suite to check that the implementation matches the spec. The command-line version can test apps written in any language, simply by passing the file or URL path to the schema to check! `Trio `_ is an async framework with "an obsessive focus on usability and correctness", so naturally it works with Hypothesis! :pypi:`pytest-trio` includes :ref:`a custom hook ` that allows ``@given(...)`` to work with Trio-style async test functions, and :pypi:`hypothesis-trio` includes stateful testing extensions to support concurrent programs. :pypi:`libarchimedes` makes it easy to use Hypothesis in `the Hy language `_, a Lisp embedded in Python. :pypi:`battle_tested` is a fuzzing tool that will show you how your code can fail - by trying all kinds of inputs and reporting whatever happens. :pypi:`pytest-subtesthack` functions as a workaround for :issue:`377`. -------------------- Writing an Extension -------------------- *See* :gh-file:`CONTRIBUTING.rst` *for more information.* New strategies can be added to Hypothesis, or published as an external package on PyPI - either is fine for most strategies. If in doubt, ask! It's generally much easier to get things working outside, because there's more freedom to experiment and fewer requirements in stability and API style. We're happy to review and help with external packages as well as pull requests! If you're thinking about writing an extension, please name it ``hypothesis-{something}`` - a standard prefix makes the community more visible and searching for extensions easier. And make sure you use the ``Framework :: Hypothesis`` trove classifier! On the other hand, being inside gets you access to some deeper implementation features (if you need them) and better long-term guarantees about maintenance. We particularly encourage pull requests for new composable primitives that make implementing other strategies easier, or for widely used types in the standard library. Strategies for other things are also welcome; anything with external dependencies just goes in hypothesis.extra. hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/support.rst000066400000000000000000000022501354103617500264410ustar00rootroot00000000000000================ Help and Support ================ For questions you are happy to ask in public, the :doc:`Hypothesis community ` is a friendly place where I or others will be more than happy to help you out. You're also welcome to ask questions on Stack Overflow. If you do, please tag them with `'python-hypothesis' `_ so someone sees them. For bugs and enhancements, please file an issue on the :issue:`GitHub issue tracker <>`. Note that as per the :doc:`development policy `, enhancements will probably not get implemented unless you're willing to pay for development or implement them yourself (with assistance from me). Bugs will tend to get fixed reasonably promptly, though it is of course on a best effort basis. To see the versions of Python, optional dependencies, test runners, and operating systems Hypothesis supports (meaning incompatibility is treated as a bug), see :doc:`supported`. If you need to ask questions privately or want more of a guarantee of bugs being fixed promptly, please contact me on hypothesis-support@drmaciver.com to talk about availability of support contracts. hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/supported.rst000066400000000000000000000111471354103617500267570ustar00rootroot00000000000000============= Compatibility ============= Hypothesis does its level best to be compatible with everything you could possibly need it to be compatible with. Generally you should just try it and expect it to work. If it doesn't, you can be surprised and check this document for the details. --------------- Python versions --------------- Hypothesis is supported and tested on CPython 2.7 and CPython 3.5+, i.e. `all versions of CPython with upstream support `_, Hypothesis also supports the latest PyPy for both Python 2 (until 2020) and Python 3. Hypothesis does not currently work on Jython, though it probably could (:issue:`174`). IronPython might work but hasn't been tested. 32-bit and narrow builds should work, though this is currently only tested on Windows. In general Hypothesis does not officially support anything except the latest patch release of any version of Python it supports. Earlier releases should work and bugs in them will get fixed if reported, but they're not tested in CI and no guarantees are made. ----------------- Operating systems ----------------- In theory Hypothesis should work anywhere that Python does. In practice it is only known to work and regularly tested on OS X, Windows and Linux, and you may experience issues running it elsewhere. If you're using something else and it doesn't work, do get in touch and I'll try to help, but unless you can come up with a way for me to run a CI server on that operating system it probably won't stay fixed due to the inevitable march of time. .. _framework-compatibility: ------------------ Testing frameworks ------------------ In general Hypothesis goes to quite a lot of effort to generate things that look like normal Python test functions that behave as closely to the originals as possible, so it should work sensibly out of the box with every test framework. If your testing relies on doing something other than calling a function and seeing if it raises an exception then it probably *won't* work out of the box. In particular things like tests which return generators and expect you to do something with them (e.g. nose's yield based tests) will not work. Use a decorator or similar to wrap the test to take this form, or ask the framework maintainer to support our :ref:`hooks for inserting such a wrapper later `. In terms of what's actually *known* to work: * Hypothesis integrates as smoothly with pytest and unittest as we can make it, and this is verified as part of the CI. * :pypi:`pytest` fixtures work in the usual way for tests that have been decorated with :func:`@given ` - just avoid passing a strategy for each argument that will be supplied by a fixture. However, each fixture will run once for the whole function, not once per example. Decorating a fixture function with :func:`@given ` is meaningless. * The :func:`python:unittest.mock.patch` decorator works with :func:`@given `, but we recommend using it as a context manager within the decorated test to ensure that the mock is per-test-case and avoid poor interactions with Pytest fixtures. * Nose works fine with Hypothesis, and this is tested as part of the CI. ``yield`` based tests simply won't work. * Integration with Django's testing requires use of the :ref:`hypothesis-django` package. The issue is that in Django's tests' normal mode of execution it will reset the database once per test rather than once per example, which is not what you want. * :pypi:`Coverage` works out of the box with Hypothesis; our own test suite has 100% branch coverage. ----------------- Optional Packages ----------------- The supported versions of optional packages, for strategies in ``hypothesis.extra``, are listed in the documentation for that extra. Our general goal is to support all versions that are supported upstream. ------------------------ Regularly verifying this ------------------------ Everything mentioned above as explicitly supported is checked on every commit with `Travis `__, and `Azure Pipelines `__. Our continuous delivery pipeline runs all of these checks before publishing each release, so when we say they're supported we really mean it. ------------------- Hypothesis versions ------------------- Backwards compatibility is better than backporting fixes, so we use :ref:`semantic versioning ` and only support the most recent version of Hypothesis. See :doc:`support` for more information. hypothesis-hypothesis-python-4.36.2/hypothesis-python/docs/usage.rst000066400000000000000000000054641354103617500260430ustar00rootroot00000000000000===================================== Open Source Projects using Hypothesis ===================================== The following is a non-exhaustive list of open source projects I know are using Hypothesis. If you're aware of any others please add them to the list! The only inclusion criterion right now is that if it's a Python library then it should be available on PyPI. You can find hundreds more from `the Hypothesis page at libraries.io `_, and over a thousand `on GitHub `_. * `aur `_ * `argon2_cffi `_ * `attrs `_ * `axelrod `_ * `bidict `_ * `binaryornot `_ * `brotlipy `_ * :pypi:`chardet` * `cmph-cffi `_ * `cryptography `_ * `dbus-signature-pyparsing `_ * `fastnumbers `_ * `flocker `_ * `flownetpy `_ * `funsize `_ * `fusion-index `_ * `hyper-h2 `_ * `into-dbus-python `_ * `justbases `_ * `justbytes `_ * `loris `_ * `mariadb-dyncol `_ * `mercurial `_ * `natsort `_ * `pretext `_ * `priority `_ * `PyCEbox `_ * `PyPy `_ * `pyrsistent `_ * `python-humble-utils `_ * `pyudev `_ * `qutebrowser `_ * `RubyMarshal `_ * `Segpy `_ * `simoa `_ * `srt `_ * `tchannel `_ * `vdirsyncer `_ * `wcag-contrast-ratio `_ * `yacluster `_ * `yturl `_ hypothesis-hypothesis-python-4.36.2/hypothesis-python/examples/000077500000000000000000000000001354103617500250625ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/examples/README.rst000066400000000000000000000006621354103617500265550ustar00rootroot00000000000000============================ Examples of Hypothesis usage ============================ This is a directory for examples of using Hypothesis that show case its features or demonstrate a useful way of testing something. Right now it's a bit small and fairly algorithmically focused. Pull requests to add more examples would be *greatly* appreciated, especially ones using e.g. the Django integration or testing something "Businessy". hypothesis-hypothesis-python-4.36.2/hypothesis-python/examples/test_binary_search.py000066400000000000000000000115141354103617500313060ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """This file demonstrates testing a binary search. It's a useful example because the result of the binary search is so clearly determined by the invariants it must satisfy, so we can simply test for those invariants. It also demonstrates the useful testing technique of testing how the answer should change (or not) in response to movements in the underlying data. """ from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from hypothesis import given def binary_search(ls, v): """Take a list ls and a value v such that ls is sorted and v is comparable with the elements of ls. Return an index i such that 0 <= i <= len(v) with the properties: 1. ls.insert(i, v) is sorted 2. ls.insert(j, v) is not sorted for j < i """ # Without this check we will get an index error on the next line when the # list is empty. if not ls: return 0 # Without this check we will miss the case where the insertion point should # be zero: The invariant we maintain in the next section is that lo is # always strictly lower than the insertion point. if v <= ls[0]: return 0 # Invariant: There is no insertion point i with i <= lo lo = 0 # Invariant: There is an insertion point i with i <= hi hi = len(ls) while lo + 1 < hi: mid = (lo + hi) // 2 if v > ls[mid]: # Inserting v anywhere below mid would result in an unsorted list # because it's > the value at mid. Therefore mid is a valid new lo lo = mid # Uncommenting the following lines will cause this to return a valid # insertion point which is not always minimal. # elif v == ls[mid]: # return mid else: # Either v == ls[mid] in which case mid is a valid insertion point # or v < ls[mid], in which case all valid insertion points must be # < hi. Either way, mid is a valid new hi. hi = mid assert lo + 1 == hi # We now know that there is a valid insertion point <= hi and there is no # valid insertion point < hi because hi - 1 is lo. Therefore hi is the # answer we were seeking return hi def is_sorted(ls): """Is this list sorted?""" for i in range(len(ls) - 1): if ls[i] > ls[i + 1]: return False return True Values = st.integers() # We generate arbitrary lists and turn this into generating sorting lists # by just sorting them. SortedLists = st.lists(Values).map(sorted) # We could also do it this way, but that would be a bad idea: # SortedLists = st.lists(Values).filter(is_sorted) # The problem is that Hypothesis will only generate long sorted lists with very # low probability, so we are much better off post-processing values into the # form we want than filtering them out. @given(ls=SortedLists, v=Values) def test_insert_is_sorted(ls, v): """We test the first invariant: binary_search should return an index such that inserting the value provided at that index would result in a sorted set.""" ls.insert(binary_search(ls, v), v) assert is_sorted(ls) @given(ls=SortedLists, v=Values) def test_is_minimal(ls, v): """We test the second invariant: binary_search should return an index such that no smaller index is a valid insertion point for v.""" for i in range(binary_search(ls, v)): ls2 = list(ls) ls2.insert(i, v) assert not is_sorted(ls2) @given(ls=SortedLists, v=Values) def test_inserts_into_same_place_twice(ls, v): """In this we test a *consequence* of the second invariant: When we insert a value into a list twice, the insertion point should be the same both times. This is because we know that v is > the previous element and == the next element. In theory if the former passes, this should always pass. In practice, failures are detected by this test with much higher probability because it deliberately puts the data into a shape that is likely to trigger a failure. This is an instance of a good general category of test: Testing how the function moves in responses to changes in the underlying data. """ i = binary_search(ls, v) ls.insert(i, v) assert binary_search(ls, v) == i hypothesis-hypothesis-python-4.36.2/hypothesis-python/examples/test_rle.py000066400000000000000000000070401354103617500272560ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """This example demonstrates testing a run length encoding scheme. That is, we take a sequence and represent it by a shorter sequence where each 'run' of consecutive equal elements is represented as a single element plus a count. So e.g. [1, 1, 1, 1, 2, 1] is represented as [[1, 4], [2, 1], [1, 1]] This demonstrates the useful decode(encode(x)) == x invariant that is often a fruitful source of testing with Hypothesis. It also has an example of testing invariants in response to changes in the underlying data. """ from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from hypothesis import assume, given def run_length_encode(seq): """Encode a sequence as a new run-length encoded sequence.""" if not seq: return [] # By starting off the count at zero we simplify the iteration logic # slightly. result = [[seq[0], 0]] for s in seq: if ( # If you uncomment this line this branch will be skipped and we'll # always append a new run of length 1. Note which tests fail. # False and s == result[-1][0] # Try uncommenting this line and see what problems occur: # and result[-1][-1] < 2 ): result[-1][1] += 1 else: result.append([s, 1]) return result def run_length_decode(seq): """Take a previously encoded sequence and reconstruct the original from it.""" result = [] for s, i in seq: for _ in range(i): result.append(s) return result # We use lists of a type that should have a relatively high duplication rate, # otherwise we'd almost never get any runs. Lists = st.lists(st.integers(0, 10)) @given(Lists) def test_decodes_to_starting_sequence(ls): """If we encode a sequence and then decode the result, we should get the original sequence back. Otherwise we've done something very wrong. """ assert run_length_decode(run_length_encode(ls)) == ls @given(Lists, st.data()) def test_duplicating_an_element_does_not_increase_length(ls, data): """The previous test could be passed by simply returning the input sequence so we need something that tests the compression property of our encoding. In this test we deliberately introduce or extend a run and assert that this does not increase the length of our encoding, because they should be part of the same run in the final result. """ # We use assume to get a valid index into the list. We could also have used # e.g. flatmap, but this is relatively straightforward and will tend to # perform better. assume(ls) i = data.draw(st.integers(0, len(ls) - 1)) ls2 = list(ls) # duplicating the value at i right next to it guarantees they are part of # the same run in the resulting compression. ls2.insert(i, ls2[i]) assert len(run_length_encode(ls2)) == len(run_length_encode(ls)) hypothesis-hypothesis-python-4.36.2/hypothesis-python/scripts/000077500000000000000000000000001354103617500247335ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/scripts/basic-test.sh000077500000000000000000000031271354103617500273330ustar00rootroot00000000000000#!/bin/bash set -e -o xtrace HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" cd "$HERE/.." python -c ' import os for k, v in sorted(dict(os.environ).items()): print("%s=%s" % (k, v)) ' pip install . PYTEST="python -m pytest -n2" # Run all the no-extra-dependency tests for this version (except slow nocover tests) if [ "$(python -c 'import sys; print(sys.version_info[0] == 2)')" = "True" ] ; then $PYTEST tests/cover tests/pytest tests/py2 else $PYTEST tests/cover tests/pytest tests/py3 fi # Run tests for each extra module while the requirements are installed pip install ".[pytz, dateutil]" $PYTEST tests/datetime/ pip uninstall -y pytz python-dateutil pip install ".[dpcontracts]" $PYTEST tests/dpcontracts/ pip uninstall -y dpcontracts pip install ".[lark]" $PYTEST tests/lark/ pip install lark-parser==0.7.1 $PYTEST tests/lark/ pip uninstall -y lark-parser if [ "$(python -c 'import sys; print(sys.version_info[:2] in ((2, 7), (3, 6)))')" = "False" ] ; then exit 0 fi $PYTEST tests/nocover/ # Run some tests without docstrings or assertions, to catch bugs # like issue #822 in one of the test decorators. See also #1541. PYTHONOPTIMIZE=2 $PYTEST tests/cover/test_testdecorators.py if [ "$(python -c 'import platform; print(platform.python_implementation())')" != "PyPy" ]; then pip install .[django] HYPOTHESIS_DJANGO_USETZ=TRUE python -m tests.django.manage test tests.django HYPOTHESIS_DJANGO_USETZ=FALSE python -m tests.django.manage test tests.django pip uninstall -y django pytz pip install numpy $PYTEST tests/numpy pip install pandas $PYTEST tests/pandas fi hypothesis-hypothesis-python-4.36.2/hypothesis-python/scripts/unicodechecker.py000066400000000000000000000027121354103617500302620ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import inspect import os import sys import warnings from tempfile import mkdtemp import unicodenazi from hypothesis.configuration import set_hypothesis_home_dir from hypothesis.errors import HypothesisDeprecationWarning warnings.filterwarnings("error", category=UnicodeWarning) warnings.filterwarnings("error", category=HypothesisDeprecationWarning) unicodenazi.enable() set_hypothesis_home_dir(mkdtemp()) TESTS = ["test_testdecorators"] sys.path.append(os.path.join("tests", "cover")) def main(): for t in TESTS: module = __import__(t) for k, v in sorted(module.__dict__.items(), key=lambda x: x[0]): if k.startswith("test_") and inspect.isfunction(v): print(k) v() if __name__ == "__main__": main() hypothesis-hypothesis-python-4.36.2/hypothesis-python/scripts/validate_branch_check.py000066400000000000000000000032731354103617500315550ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import json import sys from collections import defaultdict if __name__ == "__main__": with open("branch-check") as i: data = [json.loads(l) for l in i] checks = defaultdict(set) for d in data: checks[d["name"]].add(d["value"]) always_true = [] always_false = [] for c, vs in sorted(checks.items()): if len(vs) < 2: v = list(vs)[0] assert v in (False, True) if v: always_true.append(c) else: always_false.append(c) failure = always_true or always_false if failure: print("Some branches were not properly covered.") print() if always_true: print("The following were always True:") print() for c in always_true: print(" * %s" % (c,)) if always_false: print("The following were always False:") print() for c in always_false: print(" * %s" % (c,)) if failure: sys.exit(1) hypothesis-hypothesis-python-4.36.2/hypothesis-python/setup.cfg000066400000000000000000000001251354103617500250630ustar00rootroot00000000000000[metadata] # This includes the license file in the wheel. license_file = LICENSE.txt hypothesis-hypothesis-python-4.36.2/hypothesis-python/setup.py000066400000000000000000000105621354103617500247620ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import sys import warnings import setuptools def local_file(name): return os.path.relpath(os.path.join(os.path.dirname(__file__), name)) SOURCE = local_file("src") README = local_file("README.rst") setuptools_version = tuple(map(int, setuptools.__version__.split(".")[:2])) if setuptools_version < (36, 2): # Warning only - very bad if uploading bdist but fine if installing sdist. warnings.warn( "This version of setuptools is too old to correctly store " "conditional dependencies in binary wheels. For more info, see: " "https://hynek.me/articles/conditional-python-dependencies/" ) # Assignment to placate pyflakes. The actual version is from the exec that # follows. __version__ = None with open(local_file("src/hypothesis/version.py")) as o: exec(o.read()) assert __version__ is not None extras = { "pytz": ["pytz>=2014.1"], "dateutil": ["python-dateutil>=1.4"], "lark": ["lark-parser>=0.6.5"], "numpy": ["numpy>=1.9.0"], "pandas": ["pandas>=0.19"], "pytest": ["pytest>=3.0"], "dpcontracts": ["dpcontracts>=0.4"], # We only support Django versions with upstream support - see # https://www.djangoproject.com/download/#supported-versions "django": ["pytz", "django>=1.11"], } extras["all"] = sorted(sum(extras.values(), [])) install_requires = ["attrs>=16.0.0"] # Using an environment marker on enum34 makes the dependency condition # independent of the build environemnt, which is important for wheels. # https://www.python.org/dev/peps/pep-0345/#environment-markers if sys.version_info[0] < 3 and setuptools_version < (8, 0): # Except really old systems, where we give up and install unconditionally install_requires.append("enum34") else: install_requires.append('enum34; python_version=="2.7"') setuptools.setup( name="hypothesis", version=__version__, author="David R. MacIver", author_email="david@drmaciver.com", packages=setuptools.find_packages(SOURCE), package_dir={"": SOURCE}, package_data={"hypothesis": ["py.typed", "vendor/tlds-alpha-by-domain.txt"]}, url="https://github.com/HypothesisWorks/hypothesis/tree/master/hypothesis-python", project_urls={ "Website": "https://hypothesis.works", "Documentation": "https://hypothesis.readthedocs.io", "Issues": "https://github.com/HypothesisWorks/hypothesis/issues", }, license="MPL v2", description="A library for property based testing", zip_safe=False, extras_require=extras, install_requires=install_requires, python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*", classifiers=[ "Development Status :: 5 - Production/Stable", "Framework :: Hypothesis", "Framework :: Pytest", "Intended Audience :: Developers", "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)", "Operating System :: Unix", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Testing", ], entry_points={"pytest11": ["hypothesispytest = hypothesis.extra.pytestplugin"]}, long_description=open(README).read(), long_description_content_type="text/x-rst", keywords="python testing fuzzing property-based-testing", ) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/000077500000000000000000000000001354103617500240335ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/000077500000000000000000000000001354103617500262325ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/__init__.py000066400000000000000000000031661354103617500303510ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """Hypothesis is a library for writing unit tests which are parametrized by some source of data. It verifies your code against a wide range of input and minimizes any failing examples it finds. """ from __future__ import absolute_import, division, print_function from hypothesis._settings import ( HealthCheck, Phase, PrintSettings, Verbosity, settings, unlimited, ) from hypothesis.control import assume, event, note, reject from hypothesis.core import example, find, given, reproduce_failure, seed from hypothesis.internal.entropy import register_random from hypothesis.utils.conventions import infer from hypothesis.version import __version__, __version_info__ __all__ = [ "settings", "Verbosity", "HealthCheck", "Phase", "PrintSettings", "assume", "reject", "seed", "given", "unlimited", "reproduce_failure", "find", "example", "note", "event", "infer", "register_random", "__version__", "__version_info__", ] hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/_settings.py000066400000000000000000000644621354103617500306170ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """A module controlling settings for Hypothesis to use in falsification. Either an explicit settings object can be used or the default object on this module can be modified. """ from __future__ import absolute_import, division, print_function import contextlib import datetime import inspect import threading import warnings from enum import Enum, IntEnum, unique import attr from hypothesis.errors import ( HypothesisDeprecationWarning, InvalidArgument, InvalidState, ) from hypothesis.internal.compat import integer_types, quiet_raise, string_types from hypothesis.internal.reflection import get_pretty_function_description, proxies from hypothesis.internal.validation import check_type, try_convert from hypothesis.utils.conventions import UniqueIdentifier, not_set from hypothesis.utils.dynamicvariables import DynamicVariable if False: from typing import Any, Dict, List # noqa __all__ = ["settings"] unlimited = UniqueIdentifier("unlimited") all_settings = {} # type: Dict[str, Setting] class settingsProperty(object): def __init__(self, name, show_default): self.name = name self.show_default = show_default def __get__(self, obj, type=None): if obj is None: return self else: try: result = obj.__dict__[self.name] # This is a gross hack, but it preserves the old behaviour that # you can change the storage directory and it will be reflected # in the default database. if self.name == "database" and result is not_set: from hypothesis.database import ExampleDatabase result = ExampleDatabase(not_set) return result except KeyError: raise AttributeError(self.name) def __set__(self, obj, value): obj.__dict__[self.name] = value def __delete__(self, obj): raise AttributeError("Cannot delete attribute %s" % (self.name,)) @property def __doc__(self): description = all_settings[self.name].description deprecation_message = all_settings[self.name].deprecation_message default = ( repr(getattr(settings.default, self.name)) if self.show_default else "(dynamically calculated)" ) return "\n\n".join( [ description, "default value: ``%s``" % (default,), (deprecation_message or "").strip(), ] ).strip() default_variable = DynamicVariable(None) class settingsMeta(type): def __init__(self, *args, **kwargs): super(settingsMeta, self).__init__(*args, **kwargs) @property def default(self): v = default_variable.value if v is not None: return v if hasattr(settings, "_current_profile"): settings.load_profile(settings._current_profile) assert default_variable.value is not None return default_variable.value def _assign_default_internal(self, value): default_variable.value = value def __setattr__(self, name, value): if name == "default": raise AttributeError( "Cannot assign to the property settings.default - " "consider using settings.load_profile instead." ) elif not (isinstance(value, settingsProperty) or name.startswith("_")): raise AttributeError( "Cannot assign hypothesis.settings.%s=%r - the settings " "class is immutable. You can change the global default " "settings with settings.load_profile, or use @settings(...) " "to decorate your test instead." % (name, value) ) return type.__setattr__(self, name, value) class settings(settingsMeta("settings", (object,), {})): # type: ignore """A settings object controls a variety of parameters that are used in falsification. These may control both the falsification strategy and the details of the data that is generated. Default values are picked up from the settings.default object and changes made there will be picked up in newly created settings. """ _WHITELISTED_REAL_PROPERTIES = ["_construction_complete", "storage"] __definitions_are_locked = False _profiles = {} # type: dict __module__ = "hypothesis" def __getattr__(self, name): if name in all_settings: return all_settings[name].default else: raise AttributeError("settings has no attribute %s" % (name,)) def __init__(self, parent=None, **kwargs): # type: (settings, **Any) -> None if kwargs.get("derandomize"): if kwargs.get("database") is not None: raise InvalidArgument( "derandomize=True implies database=None, so passing " "database=%r too is invalid." % (kwargs["database"],) ) kwargs["database"] = None self._construction_complete = False deprecations = [] defaults = parent or settings.default if defaults is not None: for setting in all_settings.values(): if kwargs.get(setting.name, not_set) is not_set: kwargs[setting.name] = getattr(defaults, setting.name) else: if setting.validator: kwargs[setting.name] = setting.validator(kwargs[setting.name]) if setting.deprecation_message is not None: deprecations.append(setting) for name, value in kwargs.items(): if name not in all_settings: raise InvalidArgument( "Invalid argument: %r is not a valid setting" % (name,) ) setattr(self, name, value) self.storage = threading.local() self._construction_complete = True for d in deprecations: note_deprecation( d.deprecation_message, since=d.deprecated_since, verbosity=self.verbosity, ) def __call__(self, test): """Make the settings object (self) an attribute of the test. The settings are later discovered by looking them up on the test itself. """ if not callable(test): raise InvalidArgument( "settings objects can be called as a decorator with @given, " "but decorated test=%r is not callable." % (test,) ) if inspect.isclass(test): from hypothesis.stateful import GenericStateMachine if issubclass(test, GenericStateMachine): attr_name = "_hypothesis_internal_settings_applied" if getattr(test, attr_name, False): raise InvalidArgument( "Applying the @settings decorator twice would " "overwrite the first version; merge their arguments " "instead." ) setattr(test, attr_name, True) test.TestCase.settings = self return test else: raise InvalidArgument( "@settings(...) can only be used as a decorator on " "functions, or on subclasses of GenericStateMachine." ) if hasattr(test, "_hypothesis_internal_settings_applied"): # Can't use _hypothesis_internal_use_settings as an indicator that # @settings was applied, because @given also assigns that attribute. raise InvalidArgument( "%s has already been decorated with a settings object." "\n Previous: %r\n This: %r" % ( get_pretty_function_description(test), test._hypothesis_internal_use_settings, self, ) ) test._hypothesis_internal_use_settings = self test._hypothesis_internal_settings_applied = True if getattr(test, "is_hypothesis_test", False): return test @proxies(test) def new_test(*args, **kwargs): """@given has not been applied to `test`, so we replace it with this wrapper so that using *only* @settings is an error. We then attach the actual test as an attribute of this function, so that we can unwrap it if @given is applied after the settings decorator. """ raise InvalidArgument( "Using `@settings` on a test without `@given` is completely pointless." ) new_test._hypothesis_internal_test_function_without_warning = test new_test._hypothesis_internal_use_settings = self new_test._hypothesis_internal_settings_applied = True return new_test @classmethod def _define_setting( cls, name, description, default, options=None, validator=None, show_default=True, deprecation_message=None, deprecated_since=None, ): """Add a new setting. - name is the name of the property that will be used to access the setting. This must be a valid python identifier. - description will appear in the property's docstring - default is the default value. This may be a zero argument function in which case it is evaluated and its result is stored the first time it is accessed on any given settings object. """ if settings.__definitions_are_locked: raise InvalidState( "settings have been locked and may no longer be defined." ) if options is not None: options = tuple(options) assert default in options else: assert validator is not None all_settings[name] = Setting( name=name, description=description.strip(), default=default, options=options, validator=validator, deprecation_message=deprecation_message, deprecated_since=deprecated_since, ) setattr(settings, name, settingsProperty(name, show_default)) @classmethod def lock_further_definitions(cls): settings.__definitions_are_locked = True def __setattr__(self, name, value): if name in settings._WHITELISTED_REAL_PROPERTIES: return object.__setattr__(self, name, value) elif name in all_settings: if self._construction_complete: raise AttributeError( "settings objects are immutable and may not be assigned to" " after construction." ) else: setting = all_settings[name] if setting.options is not None and value not in setting.options: raise InvalidArgument( "Invalid %s, %r. Valid options: %r" % (name, value, setting.options) ) return object.__setattr__(self, name, value) else: raise AttributeError("No such setting %s" % (name,)) def __repr__(self): bits = [] for name, setting in all_settings.items(): value = getattr(self, name) # The only settings that are not shown are those that are # deprecated and left at their default values. if value != setting.default or not setting.deprecation_message: bits.append("%s=%r" % (name, value)) return "settings(%s)" % ", ".join(sorted(bits)) def show_changed(self): bits = [] for name, setting in all_settings.items(): value = getattr(self, name) if value != setting.default: bits.append("%s=%r" % (name, value)) return ", ".join(sorted(bits, key=len)) @staticmethod def register_profile(name, parent=None, **kwargs): # type: (str, settings, **Any) -> None """Registers a collection of values to be used as a settings profile. Settings profiles can be loaded by name - for example, you might create a 'fast' profile which runs fewer examples, keep the 'default' profile, and create a 'ci' profile that increases the number of examples and uses a different database to store failures. The arguments to this method are exactly as for :class:`~hypothesis.settings`: optional ``parent`` settings, and keyword arguments for each setting that will be set differently to parent (or settings.default, if parent is None). """ check_type(string_types, name, "name") settings._profiles[name] = settings(parent=parent, **kwargs) @staticmethod def get_profile(name): # type: (str) -> settings """Return the profile with the given name.""" check_type(string_types, name, "name") try: return settings._profiles[name] except KeyError: raise InvalidArgument("Profile %r is not registered" % (name,)) @staticmethod def load_profile(name): # type: (str) -> None """Loads in the settings defined in the profile provided. If the profile does not exist, InvalidArgument will be raised. Any setting not defined in the profile will be the library defined default for that setting. """ check_type(string_types, name, "name") settings._current_profile = name settings._assign_default_internal(settings.get_profile(name)) @contextlib.contextmanager def local_settings(s): default_context_manager = default_variable.with_value(s) with default_context_manager: yield s @attr.s() class Setting(object): name = attr.ib() description = attr.ib() default = attr.ib() options = attr.ib() validator = attr.ib() deprecation_message = attr.ib() deprecated_since = attr.ib() def _ensure_positive_int(x, name, since, min_value=0): if not isinstance(x, integer_types): note_deprecation( "Passing non-integer %s=%r is deprecated" % (name, x), since=since ) x = try_convert(int, x, name) if x < min_value: raise InvalidArgument("%s=%r must be at least %r." % (name, x, min_value)) return x def _max_examples_validator(x): x = _ensure_positive_int(x, "max_examples", since="2019-03-06", min_value=0) if x == 0: note_deprecation( "max_examples=%r should be at least one. You can disable example " "generation with the `phases` setting instead." % (x,), since="2019-03-06", ) return x settings._define_setting( "max_examples", default=100, validator=_max_examples_validator, description=""" Once this many satisfying examples have been considered without finding any counter-example, falsification will terminate. The default value is chosen to suit a workflow where the test will be part of a suite that is regularly executed locally or on a CI server, balancing total running time against the chance of missing a bug. If you are writing one-off tests, running tens of thousands of examples is quite reasonable as Hypothesis may miss uncommon bugs with default settings. For very complex code, we have observed Hypothesis finding novel bugs after *several million* examples while testing :pypi:`SymPy`. """, ) settings._define_setting( "buffer_size", default=not_set, validator=lambda x: _ensure_positive_int(x, "buffer_size", since="2019-03-06"), description="The buffer_size setting has been deprecated and no longer does anything.", deprecation_message="The buffer_size setting can safely be removed with no effect.", deprecated_since="2019-07-03", ) settings._define_setting( "timeout", default=not_set, description="The timeout setting has been deprecated and no longer does anything.", deprecation_message="The timeout setting can safely be removed with no effect.", deprecated_since="2017-11-02", options=(not_set, unlimited), ) def _derandomize_validator(x): if not isinstance(x, bool): note_deprecation("derandomize=%r should be a bool." % (x,), since="2019-03-06") return bool(x) settings._define_setting( "derandomize", default=False, validator=_derandomize_validator, description=""" If this is True then hypothesis will run in deterministic mode where each falsification uses a random number generator that is seeded based on the hypothesis to falsify, which will be consistent across multiple runs. This has the advantage that it will eliminate any randomness from your tests, which may be preferable for some situations. It does have the disadvantage of making your tests less likely to find novel breakages. """, ) def _validate_database(db): from hypothesis.database import ExampleDatabase if db is None or isinstance(db, ExampleDatabase): return db raise InvalidArgument( "Arguments to the database setting must be None or an instance of " "ExampleDatabase. Try passing database=ExampleDatabase(%r), or " "construct and use one of the specific subclasses in " "hypothesis.database" % (db,) ) settings._define_setting( "database", default=not_set, show_default=False, description=""" An instance of hypothesis.database.ExampleDatabase that will be used to save examples to and load previous examples from. May be ``None`` in which case no storage will be used, ``":memory:"`` for an in-memory database, or any path for a directory-based example database. """, validator=_validate_database, ) @unique class Phase(IntEnum): explicit = 0 reuse = 1 generate = 2 shrink = 3 def __repr__(self): return "Phase.%s" % (self.name,) @unique class HealthCheck(Enum): """Arguments for :attr:`~hypothesis.settings.suppress_health_check`. Each member of this enum is a type of health check to suppress. """ def __repr__(self): return "%s.%s" % (self.__class__.__name__, self.name) @classmethod def all(cls): # type: () -> List[HealthCheck] deprecated = [HealthCheck.hung_test] return [x for x in list(HealthCheck) if x not in deprecated] data_too_large = 1 """Check for when the typical size of the examples you are generating exceeds the maximum allowed size too often.""" filter_too_much = 2 """Check for when the test is filtering out too many examples, either through use of :func:`~hypothesis.assume()` or :ref:`filter() `, or occasionally for Hypothesis internal reasons.""" too_slow = 3 """Check for when your data generation is extremely slow and likely to hurt testing.""" return_value = 5 """Checks if your tests return a non-None value (which will be ignored and is unlikely to do what you want).""" hung_test = 6 """This health check is deprecated and no longer has any effect. You can use the ``max_examples`` and ``deadline`` settings together to cap the total runtime of your tests, rather than the previous fixed limit.""" large_base_example = 7 """Checks if the natural example to shrink towards is very large.""" not_a_test_method = 8 """Checks if :func:`@given ` has been applied to a method defined by :class:`python:unittest.TestCase` (i.e. not a test).""" @unique class Verbosity(IntEnum): quiet = 0 normal = 1 verbose = 2 debug = 3 def __repr__(self): return "Verbosity.%s" % (self.name,) settings._define_setting( "verbosity", options=tuple(Verbosity), default=Verbosity.normal, description="Control the verbosity level of Hypothesis messages", ) def _validate_phases(phases): if phases is None: phases = tuple(Phase) note_deprecation("Use phases=%r, not None." % (phases,), since="2019-08-05") phases = tuple(phases) for a in phases: if not isinstance(a, Phase): raise InvalidArgument("%r is not a valid phase" % (a,)) return tuple(p for p in list(Phase) if p in phases) settings._define_setting( "phases", default=tuple(Phase), description=( "Control which phases should be run. " "See :ref:`the full documentation for more details `" ), validator=_validate_phases, ) settings._define_setting( name="stateful_step_count", default=50, validator=lambda x: _ensure_positive_int(x, "stateful_step_count", "2019-03-06"), description=""" Number of steps to run a stateful program for before giving up on it breaking. """, ) settings._define_setting( name="report_multiple_bugs", default=True, options=(True, False), description=""" Because Hypothesis runs the test many times, it can sometimes find multiple bugs in a single run. Reporting all of them at once is usually very useful, but replacing the exceptions can occasionally clash with debuggers. If disabled, only the exception with the smallest minimal example is raised. """, ) def validate_health_check_suppressions(suppressions): suppressions = try_convert(list, suppressions, "suppress_health_check") for s in suppressions: if not isinstance(s, HealthCheck): raise InvalidArgument( "Non-HealthCheck value %r of type %s is invalid in suppress_health_check." % (s, type(s).__name__) ) if s is HealthCheck.hung_test: note_deprecation( "HealthCheck.hung_test is deprecated and has no " "effect, as we no longer run this health check.", since="2019-01-24", ) return suppressions settings._define_setting( "suppress_health_check", default=(), description="""A list of :class:`~hypothesis.HealthCheck` items to disable.""", validator=validate_health_check_suppressions, ) class duration(datetime.timedelta): """A timedelta specifically measured in milliseconds.""" def __repr__(self): ms = self.total_seconds() * 1000 return "timedelta(milliseconds=%r)" % (int(ms) if ms == int(ms) else ms,) def _validate_deadline(x): if isinstance(x, bool): note_deprecation( "The deadline=%r must be a duration in milliseconds, or None to disable." " Boolean deadlines are treated as ints, and deprecated." % (x,), since="2019-03-06", ) if x is None: return x if isinstance(x, integer_types + (float,)): try: x = duration(milliseconds=x) except OverflowError: quiet_raise( InvalidArgument( "deadline=%r is invalid, because it is too large to represent " "as a timedelta. Use deadline=None to disable deadlines." % (x,) ) ) if isinstance(x, datetime.timedelta): if x <= datetime.timedelta(0): raise InvalidArgument( "deadline=%r is invalid, because it is impossible to meet a " "deadline <= 0. Use deadline=None to disable deadlines." % (x,) ) return duration(seconds=x.total_seconds()) raise InvalidArgument( "deadline=%r (type %s) must be a timedelta object, an integer or float number of milliseconds, " "or None to disable the per-test-case deadline." % (x, type(x).__name__) ) settings._define_setting( "deadline", default=duration(milliseconds=200), validator=_validate_deadline, description=u""" If set, a duration (as timedelta, or integer or float number of milliseconds) that each individual example (i.e. each time your test function is called, not the whole decorated test) within a test is not allowed to exceed. Tests which take longer than that may be converted into errors (but will not necessarily be if close to the deadline, to allow some variability in test run time). Set this to None to disable this behaviour entirely. """, ) class PrintSettings(Enum): """Flags to determine whether or not to print a detailed example blob to use with :func:`~hypothesis.reproduce_failure` for failing test cases.""" NEVER = 0 """Never print a blob.""" INFER = 1 """This option is deprecated and will be treated as equivalent to ALWAYS.""" ALWAYS = 2 """Always print a blob on failure.""" def __repr__(self): return "PrintSettings.%s" % (self.name,) def _validate_print_blob(value): if isinstance(value, PrintSettings): replacement = value != PrintSettings.NEVER note_deprecation( "Setting print_blob=%r is deprecated and will become an error " "in a future version of Hypothesis. Use print_blob=%r instead." % (value, replacement), since="2018-09-30", ) return replacement check_type(bool, value, "print_blob") return value settings._define_setting( "print_blob", default=False, description=""" If set to True, Hypothesis will print code for failing examples that can be used with :func:`@reproduce_failure ` to reproduce the failing example. """, validator=_validate_print_blob, ) settings.lock_further_definitions() def note_deprecation(message, since, verbosity=None): # type: (str, str, Verbosity) -> None if verbosity is None: verbosity = settings.default.verbosity assert verbosity is not None if since != "RELEASEDAY": date = datetime.datetime.strptime(since, "%Y-%m-%d").date() assert datetime.date(2016, 1, 1) <= date warning = HypothesisDeprecationWarning(message) if verbosity > Verbosity.quiet: warnings.warn(warning, stacklevel=2) settings.register_profile("default", settings()) settings.load_profile("default") assert settings.default is not None hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/_strategies.py000066400000000000000000002572651354103617500311360ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import datetime as dt import enum import math import operator import string import sys from decimal import Context, Decimal, localcontext from fractions import Fraction from functools import reduce from inspect import isabstract, isclass from uuid import UUID import attr from hypothesis._settings import note_deprecation from hypothesis.control import cleanup, note, reject from hypothesis.errors import InvalidArgument, ResolutionFailed from hypothesis.internal.cache import LRUReusedCache from hypothesis.internal.cathetus import cathetus from hypothesis.internal.charmap import as_general_categories from hypothesis.internal.compat import ( ceil, floor, gcd, get_type_hints, getfullargspec, hrange, implements_iterator, string_types, typing_root_type, ) from hypothesis.internal.conjecture.utils import ( calc_label_from_cls, check_sample, integer_range, ) from hypothesis.internal.entropy import get_seeder_and_restorer from hypothesis.internal.floats import ( count_between_floats, float_of, float_to_int, int_to_float, is_negative, next_down, next_up, ) from hypothesis.internal.reflection import ( define_function_signature, is_typed_named_tuple, nicerepr, proxies, required_args, ) from hypothesis.internal.validation import ( check_type, check_valid_bound, check_valid_integer, check_valid_interval, check_valid_magnitude, check_valid_size, check_valid_sizes, try_convert, ) from hypothesis.searchstrategy import SearchStrategy, check_strategy from hypothesis.searchstrategy.collections import ( FixedKeysDictStrategy, ListStrategy, TupleStrategy, UniqueListStrategy, UniqueSampledListStrategy, ) from hypothesis.searchstrategy.datetime import ( DateStrategy, DatetimeStrategy, TimedeltaStrategy, ) from hypothesis.searchstrategy.deferred import DeferredStrategy from hypothesis.searchstrategy.functions import FunctionStrategy from hypothesis.searchstrategy.lazy import LazyStrategy from hypothesis.searchstrategy.misc import ( BoolStrategy, JustStrategy, SampledFromStrategy, ) from hypothesis.searchstrategy.numbers import ( BoundedIntStrategy, FixedBoundedFloatStrategy, FloatStrategy, WideRangeIntStrategy, ) from hypothesis.searchstrategy.recursive import RecursiveStrategy from hypothesis.searchstrategy.shared import SharedStrategy from hypothesis.searchstrategy.strategies import OneOfStrategy from hypothesis.searchstrategy.strings import ( BinaryStringStrategy, FixedSizeBytes, OneCharStringStrategy, StringStrategy, ) from hypothesis.types import RandomWithSeed from hypothesis.utils.conventions import infer, not_set typing = None # type: Union[None, ModuleType] try: import typing as typing_module typing = typing_module except ImportError: pass try: import numpy except ImportError: numpy = None if False: import random # noqa from types import ModuleType # noqa from typing import Any, Dict, Union, Sequence, Callable, Pattern # noqa from typing import TypeVar, Tuple, List, Set, FrozenSet, overload # noqa from typing import Type, Text, AnyStr, Optional # noqa from hypothesis.utils.conventions import InferType # noqa from hypothesis.searchstrategy.strategies import T, Ex # noqa K, V = TypeVar["K"], TypeVar["V"] # See https://github.com/python/mypy/issues/3186 - numbers.Real is wrong! Real = Union[int, float, Fraction, Decimal] else: def overload(f): return f _strategies = set() class FloatKey(object): def __init__(self, f): self.value = float_to_int(f) def __eq__(self, other): return isinstance(other, FloatKey) and (other.value == self.value) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self.value) def convert_value(v): if isinstance(v, float): return FloatKey(v) return (type(v), v) STRATEGY_CACHE = LRUReusedCache(1024) def cacheable(fn): # type: (T) -> T @proxies(fn) def cached_strategy(*args, **kwargs): try: kwargs_cache_key = {(k, convert_value(v)) for k, v in kwargs.items()} except TypeError: return fn(*args, **kwargs) cache_key = (fn, tuple(map(convert_value, args)), frozenset(kwargs_cache_key)) try: if cache_key in STRATEGY_CACHE: return STRATEGY_CACHE[cache_key] except TypeError: return fn(*args, **kwargs) else: result = fn(*args, **kwargs) if not isinstance(result, SearchStrategy) or result.is_cacheable: STRATEGY_CACHE[cache_key] = result return result cached_strategy.__clear_cache = STRATEGY_CACHE.clear return cached_strategy def base_defines_strategy(force_reusable): # type: (bool) -> Callable[[T], T] """Returns a decorator for strategy functions. If force_reusable is True, the generated values are assumed to be reusable, i.e. immutable and safe to cache, across multiple test invocations. """ def decorator(strategy_definition): """A decorator that registers the function as a strategy and makes it lazily evaluated.""" _strategies.add(strategy_definition.__name__) @proxies(strategy_definition) def accept(*args, **kwargs): result = LazyStrategy(strategy_definition, args, kwargs) if force_reusable: result.force_has_reusable_values = True assert result.has_reusable_values return result accept.is_hypothesis_strategy_function = True return accept return decorator defines_strategy = base_defines_strategy(False) defines_strategy_with_reusable_values = base_defines_strategy(True) class Nothing(SearchStrategy): def calc_is_empty(self, recur): return True def do_draw(self, data): # This method should never be called because draw() will mark the # data as invalid immediately because is_empty is True. assert False # pragma: no cover def calc_has_reusable_values(self, recur): return True def __repr__(self): return "nothing()" def map(self, f): return self def filter(self, f): return self def flatmap(self, f): return self NOTHING = Nothing() @cacheable def nothing(): # type: () -> SearchStrategy """This strategy never successfully draws a value and will always reject on an attempt to draw. Examples from this strategy do not shrink (because there are none). """ return NOTHING def just(value): # type: (T) -> SearchStrategy[T] """Return a strategy which only generates ``value``. Note: ``value`` is not copied. Be wary of using mutable values. If ``value`` is the result of a callable, you can use :func:`builds(callable) ` instead of ``just(callable())`` to get a fresh value each time. Examples from this strategy do not shrink (because there is only one). """ return JustStrategy(value) @defines_strategy_with_reusable_values def none(): # type: () -> SearchStrategy[None] """Return a strategy which only generates None. Examples from this strategy do not shrink (because there is only one). """ return just(None) @overload def one_of(args): # type: (Sequence[SearchStrategy[Any]]) -> SearchStrategy[Any] pass # pragma: no cover @overload def one_of(*args): # type: (SearchStrategy[Any]) -> SearchStrategy[Any] pass # pragma: no cover def one_of(*args): # Mypy workaround alert: Any is too loose above; the return paramater # should be the union of the input parameters. Unfortunately, Mypy <=0.600 # raises errors due to incompatible inputs instead. See #1270 for links. # v0.610 doesn't error; it gets inference wrong for 2+ arguments instead. """Return a strategy which generates values from any of the argument strategies. This may be called with one iterable argument instead of multiple strategy arguments, in which case ``one_of(x)`` and ``one_of(*x)`` are equivalent. Examples from this strategy will generally shrink to ones that come from strategies earlier in the list, then shrink according to behaviour of the strategy that produced them. In order to get good shrinking behaviour, try to put simpler strategies first. e.g. ``one_of(none(), text())`` is better than ``one_of(text(), none())``. This is especially important when using recursive strategies. e.g. ``x = st.deferred(lambda: st.none() | st.tuples(x, x))`` will shrink well, but ``x = st.deferred(lambda: st.tuples(x, x) | st.none())`` will shrink very badly indeed. """ if len(args) == 1 and not isinstance(args[0], SearchStrategy): try: args = tuple(args[0]) except TypeError: pass return OneOfStrategy(args) @cacheable @defines_strategy_with_reusable_values def integers(min_value=None, max_value=None): # type: (Real, Real) -> SearchStrategy[int] """Returns a strategy which generates integers; in Python 2 these may be ints or longs. If min_value is not None then all values will be >= min_value. If max_value is not None then all values will be <= max_value Examples from this strategy will shrink towards zero, and negative values will also shrink towards positive (i.e. -n may be replaced by +n). """ check_valid_bound(min_value, "min_value") check_valid_bound(max_value, "max_value") check_valid_interval(min_value, max_value, "min_value", "max_value") min_int_value = None if min_value is None else ceil(min_value) max_int_value = None if max_value is None else floor(max_value) if min_value != min_int_value: note_deprecation( "min_value=%r of type %r cannot be exactly represented as an " "integer, which will be an error in a future version. " "Use %r instead." % (min_value, type(min_value), min_int_value), since="2018-10-10", ) if max_value != max_int_value: note_deprecation( "max_value=%r of type %r cannot be exactly represented as an " "integer, which will be an error in a future version. " "Use %r instead." % (max_value, type(max_value), max_int_value), since="2018-10-10", ) if ( min_int_value is not None and max_int_value is not None and min_int_value > max_int_value ): raise InvalidArgument( "No integers between min_value=%r and " "max_value=%r" % (min_value, max_value) ) if min_int_value is None: if max_int_value is None: return WideRangeIntStrategy() else: if max_int_value > 0: return WideRangeIntStrategy().filter(lambda x: x <= max_int_value) return WideRangeIntStrategy().map(lambda x: max_int_value - abs(x)) else: if max_int_value is None: if min_int_value < 0: return WideRangeIntStrategy().filter(lambda x: x >= min_int_value) return WideRangeIntStrategy().map(lambda x: min_int_value + abs(x)) else: assert min_int_value <= max_int_value if min_int_value == max_int_value: return just(min_int_value) elif min_int_value >= 0: return BoundedIntStrategy(min_int_value, max_int_value) elif max_int_value <= 0: return BoundedIntStrategy(-max_int_value, -min_int_value).map( lambda t: -t ) else: return integers(min_value=0, max_value=max_int_value) | integers( min_value=min_int_value, max_value=0 ) @cacheable @defines_strategy def booleans(): # type: () -> SearchStrategy[bool] """Returns a strategy which generates instances of :class:`python:bool`. Examples from this strategy will shrink towards False (i.e. shrinking will try to replace True with False where possible). """ return BoolStrategy() @cacheable @defines_strategy_with_reusable_values def floats( min_value=None, # type: Real max_value=None, # type: Real allow_nan=None, # type: bool allow_infinity=None, # type: bool width=64, # type: int exclude_min=False, # type: bool exclude_max=False, # type: bool ): # type: (...) -> SearchStrategy[float] """Returns a strategy which generates floats. - If min_value is not None, all values will be >= min_value. - If max_value is not None, all values will be <= max_value. - If min_value or max_value is not None, it is an error to enable allow_nan. - If both min_value and max_value are not None, it is an error to enable allow_infinity. Where not explicitly ruled out by the bounds, all of infinity, -infinity and NaN are possible values generated by this strategy. The width argument specifies the maximum number of bits of precision required to represent the generated float. Valid values are 16, 32, or 64. Passing ``width=32`` will still use the builtin 64-bit ``float`` class, but always for values which can be exactly represented as a 32-bit float. Half-precision floats (``width=16``) are only supported on Python 3.6, or if :pypi:`Numpy` is installed. The exclude_min and exclude_max argument can be used to generate numbers from open or half-open intervals, by excluding the respective endpoints. Attempting to exclude an endpoint which is None will raise an error; use ``allow_infinity=False`` to generate finite floats. You can however use e.g. ``min_value=float("-inf"), exclude_min=True`` to exclude only one infinite endpoint. Examples from this strategy have a complicated and hard to explain shrinking behaviour, but it tries to improve "human readability". Finite numbers will be preferred to infinity and infinity will be preferred to NaN. """ check_type(bool, exclude_min, "exclude_min") check_type(bool, exclude_max, "exclude_max") if allow_nan is None: allow_nan = bool(min_value is None and max_value is None) elif allow_nan: if min_value is not None or max_value is not None: raise InvalidArgument( "Cannot have allow_nan=%r, with min_value or max_value" % (allow_nan) ) if width not in (16, 32, 64): raise InvalidArgument( "Got width=%r, but the only valid values are the integers 16, " "32, and 64." % (width,) ) if width == 16 and sys.version_info[:2] < (3, 6) and numpy is None: raise InvalidArgument( # pragma: no cover "width=16 requires either Numpy, or Python >= 3.6" ) check_valid_bound(min_value, "min_value") check_valid_bound(max_value, "max_value") min_arg, max_arg = min_value, max_value if min_value is not None: min_value = float_of(min_value, width) assert isinstance(min_value, float) if max_value is not None: max_value = float_of(max_value, width) assert isinstance(max_value, float) if min_value != min_arg: note_deprecation( "min_value=%r cannot be exactly represented as a float of width " "%d, which will be an error in a future version. Use min_value=%r " "instead." % (min_arg, width, min_value), since="2018-10-10", ) if max_value != max_arg: note_deprecation( "max_value=%r cannot be exactly represented as a float of width " "%d, which will be an error in a future version. Use max_value=%r " "instead" % (max_arg, width, max_value), since="2018-10-10", ) if exclude_min and (min_value is None or min_value == float("inf")): raise InvalidArgument("Cannot exclude min_value=%r" % (min_value,)) if exclude_max and (max_value is None or max_value == float("-inf")): raise InvalidArgument("Cannot exclude max_value=%r" % (max_value,)) if min_value is not None and ( exclude_min or (min_arg is not None and min_value < min_arg) ): min_value = next_up(min_value, width) assert min_value > min_arg or min_value == min_arg == 0 # type: ignore if max_value is not None and ( exclude_max or (max_arg is not None and max_value > max_arg) ): max_value = next_down(max_value, width) assert max_value < max_arg or max_value == max_arg == 0 # type: ignore if min_value == float(u"-inf"): min_value = None if max_value == float(u"inf"): max_value = None bad_zero_bounds = ( min_value == max_value == 0 and is_negative(max_value) and not is_negative(min_value) ) if ( min_value is not None and max_value is not None and (min_value > max_value or bad_zero_bounds) ): # This is a custom alternative to check_valid_interval, because we want # to include the bit-width and exclusion information in the message. msg = ( "There are no %s-bit floating-point values between min_value=%r " "and max_value=%r" % (width, min_arg, max_arg) ) if exclude_min or exclude_max: msg += ", exclude_min=%r and exclude_max=%r" % (exclude_min, exclude_max) if bad_zero_bounds: note_deprecation(msg, since="2019-03-19") else: raise InvalidArgument(msg) if allow_infinity is None: allow_infinity = bool(min_value is None or max_value is None) elif allow_infinity: if min_value is not None and max_value is not None: raise InvalidArgument( "Cannot have allow_infinity=%r, with both min_value and " "max_value" % (allow_infinity) ) elif min_value == float("inf"): raise InvalidArgument("allow_infinity=False excludes min_value=inf") elif max_value == float("-inf"): raise InvalidArgument("allow_infinity=False excludes max_value=-inf") if min_value is None and max_value is None: result = FloatStrategy( allow_infinity=allow_infinity, allow_nan=allow_nan ) # type: SearchStrategy[float] elif min_value is not None and max_value is not None: if min_value == max_value: assert isinstance(min_value, float) result = just(min_value) elif is_negative(min_value): if is_negative(max_value): result = floats(min_value=-max_value, max_value=-min_value).map( operator.neg ) else: result = floats(min_value=0.0, max_value=max_value) | floats( min_value=0.0, max_value=-min_value ).map(operator.neg) elif count_between_floats(min_value, max_value) > 1000: result = FixedBoundedFloatStrategy( lower_bound=min_value, upper_bound=max_value ) else: ub_int = float_to_int(max_value, width) lb_int = float_to_int(min_value, width) assert lb_int <= ub_int result = integers(min_value=lb_int, max_value=ub_int).map( lambda x: int_to_float(x, width) ) elif min_value is not None: assert isinstance(min_value, float) if min_value < 0: result = floats(min_value=0.0, allow_infinity=allow_infinity) | floats( min_value=min_value, max_value=-0.0 ) else: result = floats(allow_infinity=allow_infinity, allow_nan=False).map( lambda x: min_value + abs(x) # type: ignore ) if not allow_infinity: result = result.filter(lambda x: not math.isinf(x)) if min_value == 0 and not is_negative(min_value): result = result.filter(lambda x: math.copysign(1.0, x) == 1) else: assert isinstance(max_value, float) if max_value > 0: result = floats(min_value=0.0, max_value=max_value) | floats( max_value=-0.0, allow_infinity=allow_infinity ) else: result = floats(allow_infinity=allow_infinity, allow_nan=False).map( lambda x: max_value - abs(x) # type: ignore ) if not allow_infinity: result = result.filter(lambda x: not math.isinf(x)) if max_value == 0 and is_negative(max_value): result = result.filter(is_negative) if width < 64: def downcast(x): try: return float_of(x, width) except OverflowError: reject() return result.map(downcast) return result @cacheable @defines_strategy def tuples(*args): # type: (*SearchStrategy) -> SearchStrategy[tuple] """Return a strategy which generates a tuple of the same length as args by generating the value at index i from args[i]. e.g. tuples(integers(), integers()) would generate a tuple of length two with both values an integer. Examples from this strategy shrink by shrinking their component parts. """ for arg in args: check_strategy(arg) return TupleStrategy(args) @overload def sampled_from(elements): # type: (Sequence[T]) -> SearchStrategy[T] pass # pragma: no cover @overload def sampled_from(elements): # type: (Type[enum.Enum]) -> SearchStrategy[Any] # `SearchStrategy[Enum]` is unreliable due to metaclass issues. pass # pragma: no cover @defines_strategy def sampled_from(elements): """Returns a strategy which generates any value present in ``elements``. Note that as with :func:`~hypothesis.strategies.just`, values will not be copied and thus you should be careful of using mutable data. ``sampled_from`` supports ordered collections, as well as :class:`~python:enum.Enum` objects. :class:`~python:enum.Flag` objects may also generate any combination of their members. Examples from this strategy shrink by replacing them with values earlier in the list. So e.g. sampled_from((10, 1)) will shrink by trying to replace 1 values with 10, and sampled_from((1, 10)) will shrink by trying to replace 10 values with 1. """ values = check_sample(elements, "sampled_from") if not values: note_deprecation( "sampled_from() with nothing to sample is deprecated and will be an " "error in a future version. It currently returns `st.nothing()`, " "which if unexpected can make parts of a strategy silently vanish.", since="2019-03-12", ) return nothing() if len(values) == 1: return just(values[0]) if hasattr(enum, "Flag") and isclass(elements) and issubclass(elements, enum.Flag): # Combinations of enum.Flag members are also members. We generate # these dynamically, because static allocation takes O(2^n) memory. return sets(sampled_from(values), min_size=1).map( lambda s: reduce(operator.or_, s) ) return SampledFromStrategy(values) @cacheable @defines_strategy def lists( elements, # type: SearchStrategy[Ex] min_size=0, # type: int max_size=None, # type: int unique_by=None, # type: Union[Callable, Tuple[Callable, ...]] unique=False, # type: bool ): # type: (...) -> SearchStrategy[List[Ex]] """Returns a list containing values drawn from elements with length in the interval [min_size, max_size] (no bounds in that direction if these are None). If max_size is 0, only the empty list will be drawn. If ``unique`` is True (or something that evaluates to True), we compare direct object equality, as if unique_by was ``lambda x: x``. This comparison only works for hashable types. If ``unique_by`` is not None it must be a callable or tuple of callables returning a hashable type when given a value drawn from elements. The resulting list will satisfy the condition that for ``i`` != ``j``, ``unique_by(result[i])`` != ``unique_by(result[j])``. If ``unique_by`` is a tuple of callables the uniqueness will be respective to each callable. For example, the following will produce two columns of integers with both columns being unique respectively. .. code-block:: pycon >>> twoints = st.tuples(st.integers(), st.integers()) >>> st.lists(twoints, unique_by=(lambda x: x[0], lambda x: x[1])) Examples from this strategy shrink by trying to remove elements from the list, and by shrinking each individual element of the list. """ check_valid_sizes(min_size, max_size) check_strategy(elements, "elements") if unique: if unique_by is not None: raise InvalidArgument( "cannot specify both unique and unique_by " "(you probably only want to set unique_by)" ) else: def unique_by(x): return x if max_size == 0: return builds(list) if unique_by is not None: if not (callable(unique_by) or isinstance(unique_by, tuple)): raise InvalidArgument( "unique_by=%r is not a callable or tuple of callables" % (unique_by) ) if callable(unique_by): unique_by = (unique_by,) if len(unique_by) == 0: raise InvalidArgument("unique_by is empty") for i, f in enumerate(unique_by): if not callable(f): raise InvalidArgument("unique_by[%i]=%r is not a callable" % (i, f)) # Note that lazy strategies automatically unwrap when passed to a defines_strategy # function. if isinstance(elements, SampledFromStrategy): element_count = len(elements.elements) if min_size > element_count: raise InvalidArgument( "Cannot create a collection of min_size=%r unique elements with " "values drawn from only %d distinct elements" % (min_size, element_count) ) if max_size is not None: max_size = min(max_size, element_count) else: max_size = element_count return UniqueSampledListStrategy( elements=elements, max_size=max_size, min_size=min_size, keys=unique_by ) return UniqueListStrategy( elements=elements, max_size=max_size, min_size=min_size, keys=unique_by ) return ListStrategy(elements, min_size=min_size, max_size=max_size) @cacheable @defines_strategy def sets( elements, # type: SearchStrategy[Ex] min_size=0, # type: int max_size=None, # type: int ): # type: (...) -> SearchStrategy[Set[Ex]] """This has the same behaviour as lists, but returns sets instead. Note that Hypothesis cannot tell if values are drawn from elements are hashable until running the test, so you can define a strategy for sets of an unhashable type but it will fail at test time. Examples from this strategy shrink by trying to remove elements from the set, and by shrinking each individual element of the set. """ return lists( elements=elements, min_size=min_size, max_size=max_size, unique=True ).map(set) @cacheable @defines_strategy def frozensets( elements, # type: SearchStrategy[Ex] min_size=0, # type: int max_size=None, # type: int ): # type: (...) -> SearchStrategy[FrozenSet[Ex]] """This is identical to the sets function but instead returns frozensets.""" return lists( elements=elements, min_size=min_size, max_size=max_size, unique=True ).map(frozenset) @implements_iterator class PrettyIter(object): def __init__(self, values): self._values = values self._iter = iter(self._values) def __iter__(self): return self._iter def __next__(self): return next(self._iter) def __repr__(self): return "iter({!r})".format(self._values) @defines_strategy def iterables(elements, min_size=0, max_size=None, unique_by=None, unique=False): """This has the same behaviour as lists, but returns iterables instead. Some iterables cannot be indexed (e.g. sets) and some do not have a fixed length (e.g. generators). This strategy produces iterators, which cannot be indexed and do not have a fixed length. This ensures that you do not accidentally depend on sequence behaviour. """ return lists( elements=elements, min_size=min_size, max_size=max_size, unique_by=unique_by, unique=unique, ).map(PrettyIter) @defines_strategy def fixed_dictionaries(mapping): # type: (Dict[T, SearchStrategy[Ex]]) -> SearchStrategy[Dict[T, Ex]] """Generates a dictionary of the same type as mapping with a fixed set of keys mapping to strategies. mapping must be a dict subclass. Generated values have all keys present in mapping, with the corresponding values drawn from mapping[key]. If mapping is an instance of OrderedDict the keys will also be in the same order, otherwise the order is arbitrary. Examples from this strategy shrink by shrinking each individual value in the generated dictionary. """ check_type(dict, mapping, "mapping") for v in mapping.values(): check_strategy(v) return FixedKeysDictStrategy(mapping) @cacheable @defines_strategy def dictionaries( keys, # type: SearchStrategy[Ex] values, # type: SearchStrategy[T] dict_class=dict, # type: type min_size=0, # type: int max_size=None, # type: int ): # type: (...) -> SearchStrategy[Dict[Ex, T]] # Describing the exact dict_class to Mypy drops the key and value types, # so we report Dict[K, V] instead of Mapping[Any, Any] for now. Sorry! """Generates dictionaries of type ``dict_class`` with keys drawn from the ``keys`` argument and values drawn from the ``values`` argument. The size parameters have the same interpretation as for :func:`~hypothesis.strategies.lists`. Examples from this strategy shrink by trying to remove keys from the generated dictionary, and by shrinking each generated key and value. """ check_valid_sizes(min_size, max_size) if max_size == 0: return fixed_dictionaries(dict_class()) check_strategy(keys) check_strategy(values) return lists( tuples(keys, values), min_size=min_size, max_size=max_size, unique_by=lambda x: x[0], ).map(dict_class) @cacheable @defines_strategy_with_reusable_values def characters( whitelist_categories=None, # type: Sequence[Text] blacklist_categories=None, # type: Sequence[Text] blacklist_characters=None, # type: Sequence[Text] min_codepoint=None, # type: int max_codepoint=None, # type: int whitelist_characters=None, # type: Sequence[Text] ): # type: (...) -> SearchStrategy[Text] """Generates unicode text type (unicode on python 2, str on python 3) characters following specified filtering rules. - When no filtering rules are specifed, any character can be produced. - If ``min_codepoint`` or ``max_codepoint`` is specifed, then only characters having a codepoint in that range will be produced. - If ``whitelist_categories`` is specified, then only characters from those Unicode categories will be produced. This is a further restriction, characters must also satisfy ``min_codepoint`` and ``max_codepoint``. - If ``blacklist_categories`` is specified, then any character from those categories will not be produced. Any overlap between ``whitelist_categories`` and ``blacklist_categories`` will raise an exception, as each character can only belong to a single class. - If ``whitelist_characters`` is specified, then any additional characters in that list will also be produced. - If ``blacklist_characters`` is specified, then any characters in that list will be not be produced. Any overlap between ``whitelist_characters`` and ``blacklist_characters`` will raise an exception. The ``_codepoint`` arguments must be integers between zero and :obj:`python:sys.maxunicode`. The ``_characters`` arguments must be collections of length-one unicode strings, such as a unicode string. The ``_categories`` arguments must be used to specify either the one-letter Unicode major category or the two-letter Unicode `general category`_. For example, ``('Nd', 'Lu')`` signifies "Number, decimal digit" and "Letter, uppercase". A single letter ('major category') can be given to match all corresponding categories, for example ``'P'`` for characters in any punctuation category. .. _general category: https://wikipedia.org/wiki/Unicode_character_property Examples from this strategy shrink towards the codepoint for ``'0'``, or the first allowable codepoint after it if ``'0'`` is excluded. """ check_valid_size(min_codepoint, "min_codepoint") check_valid_size(max_codepoint, "max_codepoint") check_valid_interval(min_codepoint, max_codepoint, "min_codepoint", "max_codepoint") if ( min_codepoint is None and max_codepoint is None and whitelist_categories is None and blacklist_categories is None and whitelist_characters is not None ): raise InvalidArgument( "Nothing is excluded by other arguments, so passing only " "whitelist_characters=%(chars)r would have no effect. Also pass " "whitelist_categories=(), or use sampled_from(%(chars)r) instead." % dict(chars=whitelist_characters) ) blacklist_characters = blacklist_characters or "" whitelist_characters = whitelist_characters or "" overlap = set(blacklist_characters).intersection(whitelist_characters) if overlap: raise InvalidArgument( "Characters %r are present in both whitelist_characters=%r, and " "blacklist_characters=%r" % (sorted(overlap), whitelist_characters, blacklist_characters) ) blacklist_categories = as_general_categories( blacklist_categories, "blacklist_categories" ) if ( whitelist_categories is not None and not whitelist_categories and not whitelist_characters ): raise InvalidArgument( "When whitelist_categories is an empty collection and there are " "no characters specified in whitelist_characters, nothing can " "be generated by the characters() strategy." ) whitelist_categories = as_general_categories( whitelist_categories, "whitelist_categories" ) both_cats = set(blacklist_categories or ()).intersection(whitelist_categories or ()) if both_cats: raise InvalidArgument( "Categories %r are present in both whitelist_categories=%r, and " "blacklist_categories=%r" % (sorted(both_cats), whitelist_categories, blacklist_categories) ) return OneCharStringStrategy( whitelist_categories=whitelist_categories, blacklist_categories=blacklist_categories, blacklist_characters=blacklist_characters, min_codepoint=min_codepoint, max_codepoint=max_codepoint, whitelist_characters=whitelist_characters, ) @cacheable @defines_strategy_with_reusable_values def text( alphabet=characters( blacklist_categories=("Cs",) ), # type: Union[Sequence[Text], SearchStrategy[Text]] min_size=0, # type: int max_size=None, # type: int ): # type: (...) -> SearchStrategy[Text] """Generates values of a unicode text type (unicode on python 2, str on python 3) with values drawn from ``alphabet``, which should be an iterable of length one strings or a strategy generating such strings. The default alphabet strategy can generate the full unicode range but excludes surrogate characters because they are invalid in the UTF-8 encoding. You can use :func:`~hypothesis.strategies.characters` without arguments to find surrogate-related bugs such as :bpo:`34454`. ``min_size`` and ``max_size`` have the usual interpretations. Note that Python measures string length by counting codepoints: U+00C5 ``Å`` is a single character, while U+0041 U+030A ``Å`` is two - the ``A``, and a combining ring above. Examples from this strategy shrink towards shorter strings, and with the characters in the text shrinking as per the alphabet strategy. This strategy does not :func:`~python:unicodedata.normalize` examples, so generated strings may be in any or none of the 'normal forms'. """ check_valid_sizes(min_size, max_size) if alphabet is None: note_deprecation( "alphabet=None is deprecated; just omit the argument", since="2018-10-05" ) char_strategy = characters(blacklist_categories=("Cs",)) elif isinstance(alphabet, SearchStrategy): char_strategy = alphabet else: non_string = [c for c in alphabet if not isinstance(c, string_types)] if non_string: raise InvalidArgument( "The following elements in alphabet are not unicode " "strings: %r" % (non_string,) ) not_one_char = [c for c in alphabet if len(c) != 1] if not_one_char: raise InvalidArgument( "The following elements in alphabet are not of length " "one, which leads to violation of size constraints: %r" % (not_one_char,) ) char_strategy = ( characters(whitelist_categories=(), whitelist_characters=alphabet) if alphabet else nothing() ) if (max_size == 0 or char_strategy.is_empty) and not min_size: return just(u"") return StringStrategy(lists(char_strategy, min_size=min_size, max_size=max_size)) @cacheable @defines_strategy def from_regex(regex, fullmatch=False): # type: (Union[AnyStr, Pattern[AnyStr]], bool) -> SearchStrategy[AnyStr] r"""Generates strings that contain a match for the given regex (i.e. ones for which :func:`python:re.search` will return a non-None result). ``regex`` may be a pattern or :func:`compiled regex `. Both byte-strings and unicode strings are supported, and will generate examples of the same type. You can use regex flags such as :obj:`python:re.IGNORECASE` or :obj:`python:re.DOTALL` to control generation. Flags can be passed either in compiled regex or inside the pattern with a ``(?iLmsux)`` group. Some regular expressions are only partly supported - the underlying strategy checks local matching and relies on filtering to resolve context-dependent expressions. Using too many of these constructs may cause health-check errors as too many examples are filtered out. This mainly includes (positive or negative) lookahead and lookbehind groups. If you want the generated string to match the whole regex you should use boundary markers. So e.g. ``r"\A.\Z"`` will return a single character string, while ``"."`` will return any string, and ``r"\A.$"`` will return a single character optionally followed by a ``"\n"``. Alternatively, passing ``fullmatch=True`` will ensure that the whole string is a match, as if you had used the ``\A`` and ``\Z`` markers. Examples from this strategy shrink towards shorter strings and lower character values, with exact behaviour that may depend on the pattern. """ check_type(bool, fullmatch, "fullmatch") # TODO: We would like to move this to the top level, but pending some major # refactoring it's hard to do without creating circular imports. from hypothesis.searchstrategy.regex import regex_strategy return regex_strategy(regex, fullmatch) @cacheable @defines_strategy_with_reusable_values def binary(min_size=0, max_size=None): # type: (int, int) -> SearchStrategy[bytes] """Generates the appropriate binary type (str in python 2, bytes in python 3). min_size and max_size have the usual interpretations. Examples from this strategy shrink towards smaller strings and lower byte values. """ check_valid_sizes(min_size, max_size) if min_size == max_size is not None: return FixedSizeBytes(min_size) return BinaryStringStrategy( lists( integers(min_value=0, max_value=255), min_size=min_size, max_size=max_size ) ) @cacheable @defines_strategy def randoms(): # type: () -> SearchStrategy[random.Random] """Generates instances of ``random.Random``, tweaked to show the seed value in the repr for reproducibility. Examples from this strategy shrink to seeds closer to zero. """ return integers().map(RandomWithSeed) class RandomSeeder(object): def __init__(self, seed): self.seed = seed def __repr__(self): return "RandomSeeder(%r)" % (self.seed,) class RandomModule(SearchStrategy): def do_draw(self, data): data.can_reproduce_example_from_repr = False seed = data.draw(integers(0, 2 ** 32 - 1)) seed_all, restore_all = get_seeder_and_restorer(seed) seed_all() cleanup(restore_all) return RandomSeeder(seed) @cacheable @defines_strategy def random_module(): # type: () -> SearchStrategy[RandomSeeder] """The Hypothesis engine handles PRNG state for the stdlib and Numpy random modules internally, always seeding them to zero and restoring the previous state after the test. If having a fixed seed would unacceptably weaken your tests, and you cannot use a ``random.Random`` instance provided by :func:`~hypothesis.strategies.randoms`, this strategy calls :func:`python:random.seed` with an arbitrary integer and passes you an opaque object whose repr displays the seed value for debugging. If ``numpy.random`` is available, that state is also managed. Examples from these strategy shrink to seeds closer to zero. """ return shared(RandomModule(), "hypothesis.strategies.random_module()") @cacheable @defines_strategy def builds( *callable_and_args, # type: Any **kwargs # type: Union[SearchStrategy[Any], InferType] ): # type: (...) -> SearchStrategy[Any] """Generates values by drawing from ``args`` and ``kwargs`` and passing them to the callable (provided as the first positional argument) in the appropriate argument position. e.g. ``builds(target, integers(), flag=booleans())`` would draw an integer ``i`` and a boolean ``b`` and call ``target(i, flag=b)``. If the callable has type annotations, they will be used to infer a strategy for required arguments that were not passed to builds. You can also tell builds to infer a strategy for an optional argument by passing the special value :const:`hypothesis.infer` as a keyword argument to builds, instead of a strategy for that argument to the callable. If the callable is a class defined with :pypi:`attrs`, missing required arguments will be inferred from the attribute on a best-effort basis, e.g. by checking :ref:`attrs standard validators `. Dataclasses are handled natively by the inference from type hints. Examples from this strategy shrink by shrinking the argument values to the callable. """ if not callable_and_args: raise InvalidArgument( "builds() must be passed a callable as the first positional " "argument, but no positional arguments were given." ) target, args = callable_and_args[0], callable_and_args[1:] if not callable(target): raise InvalidArgument( "The first positional argument to builds() must be a callable " "target to construct." ) if infer in args: # Avoid an implementation nightmare juggling tuples and worse things raise InvalidArgument( "infer was passed as a positional argument to " "builds(), but is only allowed as a keyword arg" ) required = required_args(target, args, kwargs) or set() to_infer = {k for k, v in kwargs.items() if v is infer} if required or to_infer: if isclass(target) and attr.has(target): # Use our custom introspection for attrs classes from hypothesis.searchstrategy.attrs import from_attrs return from_attrs(target, args, kwargs, required | to_infer) # Otherwise, try using type hints if isclass(target): if is_typed_named_tuple(target): # Special handling for typing.NamedTuple hints = target._field_types else: hints = get_type_hints(target.__init__) else: hints = get_type_hints(target) if to_infer - set(hints): raise InvalidArgument( "passed infer for %s, but there is no type annotation" % (", ".join(sorted(to_infer - set(hints)))) ) for kw in set(hints) & (required | to_infer): kwargs[kw] = from_type(hints[kw]) # Mypy doesn't realise that `infer` is gone from kwargs now kwarg_strat = fixed_dictionaries(kwargs) # type: ignore return tuples(tuples(*args), kwarg_strat).map( lambda value: target(*value[0], **value[1]) ) def _defer_from_type(func): # type: (T) -> T """Decorator to make from_type lazy to support recursive definitions.""" @proxies(func) def inner(*args, **kwargs): return deferred(lambda: func(*args, **kwargs)) return inner @cacheable @_defer_from_type def from_type(thing): # type: (Type[Ex]) -> SearchStrategy[Ex] """Looks up the appropriate search strategy for the given type. ``from_type`` is used internally to fill in missing arguments to :func:`~hypothesis.strategies.builds` and can be used interactively to explore what strategies are available or to debug type resolution. You can use :func:`~hypothesis.strategies.register_type_strategy` to handle your custom types, or to globally redefine certain strategies - for example excluding NaN from floats, or use timezone-aware instead of naive time and datetime strategies. The resolution logic may be changed in a future version, but currently tries these five options: 1. If ``thing`` is in the default lookup mapping or user-registered lookup, return the corresponding strategy. The default lookup covers all types with Hypothesis strategies, including extras where possible. 2. If ``thing`` is from the :mod:`python:typing` module, return the corresponding strategy (special logic). 3. If ``thing`` has one or more subtypes in the merged lookup, return the union of the strategies for those types that are not subtypes of other elements in the lookup. 4. Finally, if ``thing`` has type annotations for all required arguments, and is not an abstract class, it is resolved via :func:`~hypothesis.strategies.builds`. 5. Because :mod:`abstract types ` cannot be instantiated, we treat abstract types as the union of their concrete subclasses. Note that this lookup works via inheritance but not via :obj:`~python:abc.ABCMeta.register`, so you may still need to use :func:`~hypothesis.strategies.register_type_strategy`. There is a valuable recipe for leveraging ``from_type()`` to generate "everything except" values from a specified type. I.e. .. code-block:: python def everything_except(excluded_types): return ( from_type(type).flatmap(from_type) .filter(lambda x: not isinstance(x, excluded_types)) ) For example, ``everything_except(int)`` returns a strategy that can generate anything that ``from_type()`` can ever generate, except for instances of :class:`python:int`, and excluding instances of types added via :func:`~hypothesis.strategies.register_type_strategy`. This is useful when writing tests which check that invalid input is rejected in a certain way. """ # TODO: We would like to move this to the top level, but pending some major # refactoring it's hard to do without creating circular imports. from hypothesis.searchstrategy import types def as_strategy(strat_or_callable, thing): # User-provided strategies need some validation, and callables even more # of it. We do this in three places, hence the helper function if not isinstance(strat_or_callable, SearchStrategy): assert callable(strat_or_callable) # Validated in register_type_strategy strategy = strat_or_callable(thing) else: strategy = strat_or_callable if not isinstance(strategy, SearchStrategy): raise ResolutionFailed( "Error: %s was registered for %r, but returned non-strategy %r" % (thing, nicerepr(strat_or_callable), strategy) ) if strategy.is_empty: raise ResolutionFailed("Error: %r resolved to an empty strategy" % (thing,)) return strategy if typing is not None: # pragma: no branch if not isinstance(thing, type): if types.is_a_new_type(thing): # Check if we have an explicitly registered strategy for this thing, # resolve it so, and otherwise resolve as for the base type. if thing in types._global_type_lookup: return as_strategy(types._global_type_lookup[thing], thing) return from_type(thing.__supertype__) # Under Python 3.6, Unions are not instances of `type` - but we # still want to resolve them! if getattr(thing, "__origin__", None) is typing.Union: args = sorted(thing.__args__, key=types.type_sorting_key) return one_of([from_type(t) for t in args]) # We can't resolve forward references, and under Python 3.5 (only) # a forward reference is an instance of type. Hence, explicit check: elif type(thing) == getattr(typing, "_ForwardRef", None): # pragma: no cover raise ResolutionFailed( "thing=%s cannot be resolved. Upgrading to python>=3.6 may " "fix this problem via improvements to the typing module." % (thing,) ) if not types.is_a_type(thing): raise InvalidArgument("thing=%s must be a type" % (thing,)) # Now that we know `thing` is a type, the first step is to check for an # explicitly registered strategy. This is the best (and hopefully most # common) way to resolve a type to a strategy. Note that the value in the # lookup may be a strategy or a function from type -> strategy; and we # convert empty results into an explicit error. if thing in types._global_type_lookup: return as_strategy(types._global_type_lookup[thing], thing) # If there's no explicitly registered strategy, maybe a subtype of thing # is registered - if so, we can resolve it to the subclass strategy. # We'll start by checking if thing is from from the typing module, # because there are several special cases that don't play well with # subclass and instance checks. if typing is not None: # pragma: no branch if isinstance(thing, typing_root_type): return types.from_typing_type(thing) # If it's not from the typing module, we get all registered types that are # a subclass of `thing` and are not themselves a subtype of any other such # type. For example, `Number -> integers() | floats()`, but bools() is # not included because bool is a subclass of int as well as Number. strategies = [ as_strategy(v, thing) for k, v in types._global_type_lookup.items() if isinstance(k, type) and issubclass(k, thing) and sum(types.try_issubclass(k, typ) for typ in types._global_type_lookup) == 1 ] if strategies: return one_of(strategies) # If we don't have a strategy registered for this type or any subtype, we # may be able to fall back on type annotations. if issubclass(thing, enum.Enum): return sampled_from(thing) # If we know that builds(thing) will fail, give a better error message required = required_args(thing) if required and not any( [ required.issubset(get_type_hints(thing.__init__)), attr.has(thing), # NamedTuples are weird enough that we need a specific check for them. is_typed_named_tuple(thing), ] ): raise ResolutionFailed( "Could not resolve %r to a strategy; consider " "using register_type_strategy" % (thing,) ) # Finally, try to build an instance by calling the type object if not isabstract(thing): return builds(thing) subclasses = thing.__subclasses__() if not subclasses: raise ResolutionFailed( "Could not resolve %r to a strategy, because it is an abstract type " "without any subclasses. Consider using register_type_strategy" % (thing,) ) return sampled_from(subclasses).flatmap(from_type) @cacheable @defines_strategy_with_reusable_values def fractions( min_value=None, # type: Union[Real, AnyStr] max_value=None, # type: Union[Real, AnyStr] max_denominator=None, # type: int ): # type: (...) -> SearchStrategy[Fraction] """Returns a strategy which generates Fractions. If ``min_value`` is not None then all generated values are no less than ``min_value``. If ``max_value`` is not None then all generated values are no greater than ``max_value``. ``min_value`` and ``max_value`` may be anything accepted by the :class:`~fractions.Fraction` constructor. If ``max_denominator`` is not None then the denominator of any generated values is no greater than ``max_denominator``. Note that ``max_denominator`` must be None or a positive integer. Examples from this strategy shrink towards smaller denominators, then closer to zero. """ min_value = try_convert(Fraction, min_value, "min_value") max_value = try_convert(Fraction, max_value, "max_value") if ( min_value is not None and not isinstance(min_value, Fraction) or max_value is not None and not isinstance(max_value, Fraction) ): assert False, "Unreachable for Mypy" # pragma: no cover check_valid_interval(min_value, max_value, "min_value", "max_value") check_valid_integer(max_denominator) if max_denominator is not None: if max_denominator < 1: raise InvalidArgument("max_denominator=%r must be >= 1" % max_denominator) def fraction_bounds(value): # type: (Fraction) -> Tuple[Fraction, Fraction] """Find the best lower and upper approximation for value.""" # Adapted from CPython's Fraction.limit_denominator here: # https://github.com/python/cpython/blob/3.6/Lib/fractions.py#L219 assert max_denominator is not None if value is None or value.denominator <= max_denominator: return value, value p0, q0, p1, q1 = 0, 1, 1, 0 n, d = value.numerator, value.denominator while True: a = n // d q2 = q0 + a * q1 if q2 > max_denominator: break p0, q0, p1, q1 = p1, q1, p0 + a * p1, q2 n, d = d, n - a * d k = (max_denominator - q0) // q1 low, high = Fraction(p1, q1), Fraction(p0 + k * p1, q0 + k * q1) assert low < value < high return low, high # Take the high approximation for min_value and low for max_value bounds = (max_denominator, min_value, max_value) if min_value is not None: if min_value.denominator > max_denominator: note_deprecation( "The min_value=%r has a denominator greater than the " "max_denominator=%r, which will be an error in a future " "version." % (min_value, max_denominator), since="2018-10-12", ) _, min_value = fraction_bounds(min_value) if max_value is not None: if max_value.denominator > max_denominator: note_deprecation( "The max_value=%r has a denominator greater than the " "max_denominator=%r, which will be an error in a future " "version." % (max_value, max_denominator), since="2018-10-12", ) max_value, _ = fraction_bounds(max_value) if min_value is not None and max_value is not None and min_value > max_value: raise InvalidArgument( "There are no fractions with a denominator <= %r between " "min_value=%r and max_value=%r" % bounds ) if min_value is not None and min_value == max_value: return just(min_value) def dm_func(denom): """Take denom, construct numerator strategy, and build fraction.""" # Four cases of algebra to get integer bounds and scale factor. min_num, max_num = None, None if max_value is None and min_value is None: pass elif min_value is None: max_num = denom * max_value.numerator denom *= max_value.denominator elif max_value is None: min_num = denom * min_value.numerator denom *= min_value.denominator else: low = min_value.numerator * max_value.denominator high = max_value.numerator * min_value.denominator scale = min_value.denominator * max_value.denominator # After calculating our integer bounds and scale factor, we remove # the gcd to avoid drawing more bytes for the example than needed. # Note that `div` can be at most equal to `scale`. div = gcd(scale, gcd(low, high)) min_num = denom * low // div max_num = denom * high // div denom *= scale // div return builds( Fraction, integers(min_value=min_num, max_value=max_num), just(denom) ) if max_denominator is None: return integers(min_value=1).flatmap(dm_func) return ( integers(1, max_denominator) .flatmap(dm_func) .map(lambda f: f.limit_denominator(max_denominator)) ) def _as_finite_decimal( value, # type: Union[Real, AnyStr, None] name, # type: str allow_infinity, # type: Optional[bool] ): # type: (...) -> Optional[Decimal] """Convert decimal bounds to decimals, carefully.""" assert name in ("min_value", "max_value") if value is None: return None if not isinstance(value, Decimal): with localcontext(Context()): # ensure that default traps are enabled value = try_convert(Decimal, value, name) assert isinstance(value, Decimal) if value.is_finite(): return value if value.is_infinite() and (value < 0 if "min" in name else value > 0): if allow_infinity or allow_infinity is None: return None raise InvalidArgument( "allow_infinity=%r, but %s=%r" % (allow_infinity, name, value) ) # This could be infinity, quiet NaN, or signalling NaN raise InvalidArgument(u"Invalid %s=%r" % (name, value)) @cacheable @defines_strategy_with_reusable_values def decimals( min_value=None, # type: Union[Real, AnyStr] max_value=None, # type: Union[Real, AnyStr] allow_nan=None, # type: bool allow_infinity=None, # type: bool places=None, # type: int ): # type: (...) -> SearchStrategy[Decimal] """Generates instances of :class:`python:decimal.Decimal`, which may be: - A finite rational number, between ``min_value`` and ``max_value``. - Not a Number, if ``allow_nan`` is True. None means "allow NaN, unless ``min_value`` and ``max_value`` are not None". - Positive or negative infinity, if ``max_value`` and ``min_value`` respectively are None, and ``allow_infinity`` is not False. None means "allow infinity, unless excluded by the min and max values". Note that where floats have one ``NaN`` value, Decimals have four: signed, and either *quiet* or *signalling*. See `the decimal module docs `_ for more information on special values. If ``places`` is not None, all finite values drawn from the strategy will have that number of digits after the decimal place. Examples from this strategy do not have a well defined shrink order but try to maximize human readability when shrinking. """ # Convert min_value and max_value to Decimal values, and validate args check_valid_integer(places) if places is not None and places < 0: raise InvalidArgument("places=%r may not be negative" % places) min_value = _as_finite_decimal(min_value, "min_value", allow_infinity) max_value = _as_finite_decimal(max_value, "max_value", allow_infinity) check_valid_interval(min_value, max_value, "min_value", "max_value") if allow_infinity and (None not in (min_value, max_value)): raise InvalidArgument("Cannot allow infinity between finite bounds") # Set up a strategy for finite decimals. Note that both floating and # fixed-point decimals require careful handling to remain isolated from # any external precision context - in short, we always work out the # required precision for lossless operation and use context methods. if places is not None: # Fixed-point decimals are basically integers with a scale factor def ctx(val): """Return a context in which this value is lossless.""" precision = ceil(math.log10(abs(val) or 1)) + places + 1 return Context(prec=max([precision, 1])) def int_to_decimal(val): context = ctx(val) return context.quantize(context.multiply(val, factor), factor) factor = Decimal(10) ** -places min_num, max_num = None, None if min_value is not None: min_num = ceil(ctx(min_value).divide(min_value, factor)) if max_value is not None: max_num = floor(ctx(max_value).divide(max_value, factor)) if min_num is not None and max_num is not None and min_num > max_num: raise InvalidArgument( "There are no decimals with %d places between min_value=%r " "and max_value=%r " % (places, min_value, max_value) ) strat = integers(min_num, max_num).map(int_to_decimal) else: # Otherwise, they're like fractions featuring a power of ten def fraction_to_decimal(val): precision = ( ceil(math.log10(abs(val.numerator) or 1) + math.log10(val.denominator)) + 1 ) return Context(prec=precision or 1).divide( Decimal(val.numerator), val.denominator ) strat = fractions(min_value, max_value).map(fraction_to_decimal) # Compose with sampled_from for infinities and NaNs as appropriate special = [] # type: List[Decimal] if allow_nan or (allow_nan is None and (None in (min_value, max_value))): special.extend(map(Decimal, ("NaN", "-NaN", "sNaN", "-sNaN"))) if allow_infinity or (allow_infinity is max_value is None): special.append(Decimal("Infinity")) if allow_infinity or (allow_infinity is min_value is None): special.append(Decimal("-Infinity")) return strat | (sampled_from(special) if special else nothing()) def recursive( base, # type: SearchStrategy[Ex] extend, # type: Callable[[SearchStrategy[Any]], SearchStrategy[T]] max_leaves=100, # type: int ): # type: (...) -> SearchStrategy[Union[T, Ex]] """base: A strategy to start from. extend: A function which takes a strategy and returns a new strategy. max_leaves: The maximum number of elements to be drawn from base on a given run. This returns a strategy ``S`` such that ``S = extend(base | S)``. That is, values may be drawn from base, or from any strategy reachable by mixing applications of | and extend. An example may clarify: ``recursive(booleans(), lists)`` would return a strategy that may return arbitrarily nested and mixed lists of booleans. So e.g. ``False``, ``[True]``, ``[False, []]``, and ``[[[[True]]]]`` are all valid values to be drawn from that strategy. Examples from this strategy shrink by trying to reduce the amount of recursion and by shrinking according to the shrinking behaviour of base and the result of extend. """ return RecursiveStrategy(base, extend, max_leaves) class PermutationStrategy(SearchStrategy): def __init__(self, values): self.values = values def do_draw(self, data): # Reversed Fisher-Yates shuffle: swap each element with itself or with # a later element. This shrinks i==j for each element, i.e. to no # change. We don't consider the last element as it's always a no-op. result = list(self.values) for i in hrange(len(result) - 1): j = integer_range(data, i, len(result) - 1) result[i], result[j] = result[j], result[i] return result @defines_strategy def permutations(values): # type: (Sequence[T]) -> SearchStrategy[List[T]] """Return a strategy which returns permutations of the ordered collection ``values``. Examples from this strategy shrink by trying to become closer to the original order of values. """ values = check_sample(values, "permutations") if not values: return builds(list) return PermutationStrategy(values) @defines_strategy_with_reusable_values def datetimes( min_value=dt.datetime.min, # type: dt.datetime max_value=dt.datetime.max, # type: dt.datetime timezones=none(), # type: SearchStrategy[Optional[dt.tzinfo]] ): # type: (...) -> SearchStrategy[dt.datetime] """A strategy for generating datetimes, which may be timezone-aware. This strategy works by drawing a naive datetime between ``min_value`` and ``max_value``, which must both be naive (have no timezone). ``timezones`` must be a strategy that generates :class:`~python:datetime.tzinfo` objects (or None, which is valid for naive datetimes). A value drawn from this strategy will be added to a naive datetime, and the resulting tz-aware datetime returned. .. note:: tz-aware datetimes from this strategy may be ambiguous or non-existent due to daylight savings, leap seconds, timezone and calendar adjustments, etc. This is intentional, as malformed timestamps are a common source of bugs. :py:func:`hypothesis.extra.pytz.timezones` requires the :pypi:`pytz` package, but provides all timezones in the Olsen database. If you want to allow naive datetimes, combine strategies like ``none() | timezones()``. :py:func:`hypothesis.extra.dateutil.timezones` requires the :pypi:`python-dateutil` package, and similarly provides all timezones there. Alternatively, you can create a list of the timezones you wish to allow (e.g. from the standard library, ``datetutil``, or ``pytz``) and use :py:func:`sampled_from`. Ensure that simple values such as None or UTC are at the beginning of the list for proper minimisation. Examples from this strategy shrink towards midnight on January 1st 2000. """ # Why must bounds be naive? In principle, we could also write a strategy # that took aware bounds, but the API and validation is much harder. # If you want to generate datetimes between two particular momements in # time I suggest (a) just filtering out-of-bounds values; (b) if bounds # are very close, draw a value and subtract its UTC offset, handling # overflows and nonexistent times; or (c) do something customised to # handle datetimes in e.g. a four-microsecond span which is not # representable in UTC. Handling (d), all of the above, leads to a much # more complex API for all users and a useful feature for very few. check_type(dt.datetime, min_value, "min_value") check_type(dt.datetime, max_value, "max_value") if min_value.tzinfo is not None: raise InvalidArgument("min_value=%r must not have tzinfo" % (min_value,)) if max_value.tzinfo is not None: raise InvalidArgument("max_value=%r must not have tzinfo" % (max_value,)) check_valid_interval(min_value, max_value, "min_value", "max_value") if not isinstance(timezones, SearchStrategy): raise InvalidArgument( "timezones=%r must be a SearchStrategy that can provide tzinfo " "for datetimes (either None or dt.tzinfo objects)" % (timezones,) ) return DatetimeStrategy(min_value, max_value, timezones) @defines_strategy_with_reusable_values def dates(min_value=dt.date.min, max_value=dt.date.max): # type: (dt.date, dt.date) -> SearchStrategy[dt.date] """A strategy for dates between ``min_value`` and ``max_value``. Examples from this strategy shrink towards January 1st 2000. """ check_type(dt.date, min_value, "min_value") check_type(dt.date, max_value, "max_value") check_valid_interval(min_value, max_value, "min_value", "max_value") if min_value == max_value: return just(min_value) return DateStrategy(min_value, max_value) @defines_strategy_with_reusable_values def times(min_value=dt.time.min, max_value=dt.time.max, timezones=none()): # type: (dt.time, dt.time, SearchStrategy) -> SearchStrategy[dt.time] """A strategy for times between ``min_value`` and ``max_value``. The ``timezones`` argument is handled as for :py:func:`datetimes`. Examples from this strategy shrink towards midnight, with the timezone component shrinking as for the strategy that provided it. """ check_type(dt.time, min_value, "min_value") check_type(dt.time, max_value, "max_value") if min_value.tzinfo is not None: raise InvalidArgument("min_value=%r must not have tzinfo" % min_value) if max_value.tzinfo is not None: raise InvalidArgument("max_value=%r must not have tzinfo" % max_value) check_valid_interval(min_value, max_value, "min_value", "max_value") day = dt.date(2000, 1, 1) return datetimes( min_value=dt.datetime.combine(day, min_value), max_value=dt.datetime.combine(day, max_value), timezones=timezones, ).map(lambda t: t.timetz()) @defines_strategy_with_reusable_values def timedeltas(min_value=dt.timedelta.min, max_value=dt.timedelta.max): # type: (dt.timedelta, dt.timedelta) -> SearchStrategy[dt.timedelta] """A strategy for timedeltas between ``min_value`` and ``max_value``. Examples from this strategy shrink towards zero. """ check_type(dt.timedelta, min_value, "min_value") check_type(dt.timedelta, max_value, "max_value") check_valid_interval(min_value, max_value, "min_value", "max_value") if min_value == max_value: return just(min_value) return TimedeltaStrategy(min_value=min_value, max_value=max_value) class CompositeStrategy(SearchStrategy): def __init__(self, definition, label, args, kwargs): self.definition = definition self.__label = label self.args = args self.kwargs = kwargs def do_draw(self, data): return self.definition(data.draw, *self.args, **self.kwargs) def calc_label(self): return self.__label @cacheable def composite(f): # type: (Callable[..., Ex]) -> Callable[..., SearchStrategy[Ex]] """Defines a strategy that is built out of potentially arbitrarily many other strategies. This is intended to be used as a decorator. See :ref:`the full documentation for more details ` about how to use this function. Examples from this strategy shrink by shrinking the output of each draw call. """ argspec = getfullargspec(f) if argspec.defaults is not None and len(argspec.defaults) == len(argspec.args): raise InvalidArgument("A default value for initial argument will never be used") if len(argspec.args) == 0 and not argspec.varargs: raise InvalidArgument( "Functions wrapped with composite must take at least one " "positional argument." ) annots = { k: v for k, v in argspec.annotations.items() if k in (argspec.args + argspec.kwonlyargs + ["return"]) } new_argspec = argspec._replace(args=argspec.args[1:], annotations=annots) label = calc_label_from_cls(f) @defines_strategy @define_function_signature(f.__name__, f.__doc__, new_argspec) def accept(*args, **kwargs): return CompositeStrategy(f, label, args, kwargs) accept.__module__ = f.__module__ return accept @defines_strategy_with_reusable_values @cacheable def complex_numbers( min_magnitude=0, max_magnitude=None, allow_infinity=None, allow_nan=None ): # type: (Optional[Real], Real, bool, bool) -> SearchStrategy[complex] """Returns a strategy that generates complex numbers. This strategy draws complex numbers with constrained magnitudes. The ``min_magnitude`` and ``max_magnitude`` parameters should be non-negative :class:`~python:numbers.Real` numbers; values of ``None`` correspond to zero and infinite values respectively. If ``min_magnitude`` is positive or ``max_magnitude`` is finite, it is an error to enable ``allow_nan``. If ``max_magnitude`` is finite, it is an error to enable ``allow_infinity``. The magnitude contraints are respected up to a relative error of (around) floating-point epsilon, due to implementation via the system ``sqrt`` function. Examples from this strategy shrink by shrinking their real and imaginary parts, as :func:`~hypothesis.strategies.floats`. If you need to generate complex numbers with particular real and imaginary parts or relationships between parts, consider using :func:`builds(complex, ...) ` or :func:`@composite ` respectively. """ check_valid_magnitude(min_magnitude, "min_magnitude") check_valid_magnitude(max_magnitude, "max_magnitude") check_valid_interval(min_magnitude, max_magnitude, "min_magnitude", "max_magnitude") if max_magnitude == float("inf"): max_magnitude = None if min_magnitude == 0: min_magnitude = None if allow_infinity is None: allow_infinity = bool(max_magnitude is None) elif allow_infinity and max_magnitude is not None: raise InvalidArgument( "Cannot have allow_infinity=%r with max_magnitude=%r" % (allow_infinity, max_magnitude) ) if allow_nan is None: allow_nan = bool(min_magnitude is None and max_magnitude is None) elif allow_nan and not (min_magnitude is None and max_magnitude is None): raise InvalidArgument( "Cannot have allow_nan=%r, min_magnitude=%r max_magnitude=%r" % (allow_nan, min_magnitude, max_magnitude) ) allow_kw = dict(allow_nan=allow_nan, allow_infinity=allow_infinity) if min_magnitude is None and max_magnitude is None: # In this simple but common case, there are no constraints on the # magnitude and therefore no relationship between the real and # imaginary parts. return builds(complex, floats(**allow_kw), floats(**allow_kw)) @composite def constrained_complex(draw): # Draw the imaginary part, and determine the maximum real part given # this and the max_magnitude if max_magnitude is None: zi = draw(floats(**allow_kw)) rmax = None else: zi = draw(floats(-max_magnitude, max_magnitude, **allow_kw)) rmax = cathetus(max_magnitude, zi) # Draw the real part from the allowed range given the imaginary part if min_magnitude is None or math.fabs(zi) >= min_magnitude: zr = draw(floats(None if rmax is None else -rmax, rmax, **allow_kw)) else: zr = draw(floats(cathetus(min_magnitude, zi), rmax, **allow_kw)) # Order of conditions carefully tuned so that for a given pair of # magnitude arguments, we always either draw or do not draw the bool # (crucial for good shrinking behaviour) but only invert when needed. if ( min_magnitude is not None and draw(booleans()) and math.fabs(zi) <= min_magnitude ): zr = -zr return complex(zr, zi) return constrained_complex() def shared(base, key=None): # type: (SearchStrategy[Ex], Any) -> SearchStrategy[Ex] """Returns a strategy that draws a single shared value per run, drawn from base. Any two shared instances with the same key will share the same value, otherwise the identity of this strategy will be used. That is: >>> s = integers() # or any other strategy >>> x = shared(s) >>> y = shared(s) In the above x and y may draw different (or potentially the same) values. In the following they will always draw the same: >>> x = shared(s, key="hi") >>> y = shared(s, key="hi") Examples from this strategy shrink as per their base strategy. """ return SharedStrategy(base, key) @cacheable @defines_strategy_with_reusable_values def uuids(version=None): # type: (int) -> SearchStrategy[UUID] """Returns a strategy that generates :class:`UUIDs `. If the optional version argument is given, value is passed through to :class:`~python:uuid.UUID` and only UUIDs of that version will be generated. All returned values from this will be unique, so e.g. if you do ``lists(uuids())`` the resulting list will never contain duplicates. Examples from this strategy don't have any meaningful shrink order. """ if version not in (None, 1, 2, 3, 4, 5): raise InvalidArgument( ( "version=%r, but version must be in (None, 1, 2, 3, 4, 5) " "to pass to the uuid.UUID constructor." ) % (version,) ) return shared(randoms(), key="hypothesis.strategies.uuids.generator").map( lambda r: UUID(version=version, int=r.getrandbits(128)) ) class RunnerStrategy(SearchStrategy): def __init__(self, default): self.default = default def do_draw(self, data): runner = getattr(data, "hypothesis_runner", not_set) if runner is not_set: if self.default is not_set: raise InvalidArgument( "Cannot use runner() strategy with no " "associated runner or explicit default." ) else: return self.default else: return runner @defines_strategy_with_reusable_values def runner(default=not_set): """A strategy for getting "the current test runner", whatever that may be. The exact meaning depends on the entry point, but it will usually be the associated 'self' value for it. If there is no current test runner and a default is provided, return that default. If no default is provided, raises InvalidArgument. Examples from this strategy do not shrink (because there is only one). """ return RunnerStrategy(default) class DataObject(object): """This type only exists so that you can write type hints for tests using the :func:`~hypothesis.strategies.data` strategy. Do not use it directly! """ # Note that "only exists" here really means "is only exported to users", # but we want to treat it as "semi-stable", not document it as "public API". def __init__(self, data): self.count = 0 self.conjecture_data = data def __repr__(self): return "data(...)" def draw(self, strategy, label=None): # type: (SearchStrategy[Ex], Any) -> Ex result = self.conjecture_data.draw(strategy) self.count += 1 if label is not None: note("Draw %d (%s): %r" % (self.count, label, result)) else: note("Draw %d: %r" % (self.count, result)) return result class DataStrategy(SearchStrategy): supports_find = False def do_draw(self, data): data.can_reproduce_example_from_repr = False if not hasattr(data, "hypothesis_shared_data_strategy"): data.hypothesis_shared_data_strategy = DataObject(data) return data.hypothesis_shared_data_strategy def __repr__(self): return "data()" def map(self, f): self.__not_a_first_class_strategy("map") def filter(self, f): self.__not_a_first_class_strategy("filter") def flatmap(self, f): self.__not_a_first_class_strategy("flatmap") def example(self): self.__not_a_first_class_strategy("example") def __not_a_first_class_strategy(self, name): raise InvalidArgument( "Cannot call %s on a DataStrategy. You should probably be using " "@composite for whatever it is you're trying to do." % (name,) ) @cacheable def data(): # type: () -> SearchStrategy[DataObject] """This isn't really a normal strategy, but instead gives you an object which can be used to draw data interactively from other strategies. See :ref:`the rest of the documentation ` for more complete information. Examples from this strategy do not shrink (because there is only one), but the result of calls to each draw() call shrink as they normally would. """ return DataStrategy() def register_type_strategy( custom_type, # type: type strategy, # type: Union[SearchStrategy, Callable[[type], SearchStrategy]] ): # type: (...) -> None """Add an entry to the global type-to-strategy lookup. This lookup is used in :func:`~hypothesis.strategies.builds` and :func:`@given `. :func:`~hypothesis.strategies.builds` will be used automatically for classes with type annotations on ``__init__`` , so you only need to register a strategy if one or more arguments need to be more tightly defined than their type-based default, or if you want to supply a strategy for an argument with a default value. ``strategy`` may be a search strategy, or a function that takes a type and returns a strategy (useful for generic types). """ # TODO: We would like to move this to the top level, but pending some major # refactoring it's hard to do without creating circular imports. from hypothesis.searchstrategy import types if not types.is_a_type(custom_type): raise InvalidArgument("custom_type=%r must be a type") elif not (isinstance(strategy, SearchStrategy) or callable(strategy)): raise InvalidArgument( "strategy=%r must be a SearchStrategy, or a function that takes " "a generic type and returns a specific SearchStrategy" ) elif isinstance(strategy, SearchStrategy) and strategy.is_empty: raise InvalidArgument("strategy=%r must not be empty") types._global_type_lookup[custom_type] = strategy from_type.__clear_cache() # type: ignore @cacheable def deferred(definition): # type: (Callable[[], SearchStrategy[Ex]]) -> SearchStrategy[Ex] """A deferred strategy allows you to write a strategy that references other strategies that have not yet been defined. This allows for the easy definition of recursive and mutually recursive strategies. The definition argument should be a zero-argument function that returns a strategy. It will be evaluated the first time the strategy is used to produce an example. Example usage: >>> import hypothesis.strategies as st >>> x = st.deferred(lambda: st.booleans() | st.tuples(x, x)) >>> x.example() (((False, (True, True)), (False, True)), (True, True)) >>> x.example() True Mutual recursion also works fine: >>> a = st.deferred(lambda: st.booleans() | b) >>> b = st.deferred(lambda: st.tuples(a, a)) >>> a.example() True >>> b.example() (False, (False, ((False, True), False))) Examples from this strategy shrink as they normally would from the strategy returned by the definition. """ return DeferredStrategy(definition) @defines_strategy_with_reusable_values def emails(): """A strategy for generating email addresses as unicode strings. The address format is specified in :rfc:`5322#section-3.4.1`. Values shrink towards shorter local-parts and host domains. This strategy is useful for generating "user data" for tests, as mishandling of email addresses is a common source of bugs. """ from hypothesis.provisional import domains local_chars = string.ascii_letters + string.digits + "!#$%&'*+-/=^_`{|}~" local_part = text(local_chars, min_size=1, max_size=64) # TODO: include dot-atoms, quoted strings, escaped chars, etc in local part return builds(u"{}@{}".format, local_part, domains()).filter( lambda addr: len(addr) <= 254 ) # Mypy can't yet handle default values with generic types or typevars, but the # @overload workaround from https://github.com/python/mypy/issues/3737 doesn't # work with @composite functions - Mypy can't see that the function implements # `(Any, Callable[..., T], SearchStrategy[T]) -> Callable[..., T]` @defines_strategy def functions(like=lambda: None, returns=none()): """A strategy for functions, which can be used in callbacks. The generated functions will mimic the interface of ``like``, which must be a callable (including a class, method, or function). The return value for the function is drawn from the ``returns`` argument, which must be a strategy. Note that the generated functions do not validate their arguments, and may return a different value if called again with the same arguments. Generated functions can only be called within the scope of the ``@given`` which created them. This strategy does not support ``.example()``. """ check_type(SearchStrategy, returns) if not callable(like): raise InvalidArgument( "The first argument to functions() must be a callable to imitate, " "but got non-callable like=%r" % (nicerepr(like),) ) return FunctionStrategy(like, returns) @composite def slices(draw, size): """Generates slices that will select indices up to the supplied size Generated slices will have start and stop indices that range from 0 to size - 1 and will step in the appropriate direction. Slices should only produce an empty selection if the start and end are the same. Examples from this strategy shrink toward 0 and smaller values """ check_valid_integer(size) if size is None or size < 1: raise InvalidArgument("size=%r must be at least one" % size) min_start = min_stop = 0 max_start = max_stop = size min_step = 1 # For slices start is inclusive and stop is exclusive start = draw(integers(min_start, max_start) | none()) stop = draw(integers(min_stop, max_stop) | none()) # Limit step size to be reasonable if start is None and stop is None: max_step = size elif start is None: max_step = stop elif stop is None: max_step = start else: max_step = abs(start - stop) step = draw(integers(min_step, max_step or 1)) if (stop or 0) < (start or 0): step *= -1 return slice(start, stop, step) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/configuration.py000066400000000000000000000031021354103617500314470ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os __hypothesis_home_directory_default = os.path.join(os.getcwd(), ".hypothesis") __hypothesis_home_directory = None def set_hypothesis_home_dir(directory): global __hypothesis_home_directory __hypothesis_home_directory = directory def mkdir_p(path): try: os.makedirs(path) except OSError: pass def hypothesis_home_dir(): global __hypothesis_home_directory if not __hypothesis_home_directory: __hypothesis_home_directory = os.getenv("HYPOTHESIS_STORAGE_DIRECTORY") if not __hypothesis_home_directory: __hypothesis_home_directory = __hypothesis_home_directory_default mkdir_p(__hypothesis_home_directory) return __hypothesis_home_directory def storage_directory(*names): path = os.path.join(hypothesis_home_dir(), *names) mkdir_p(path) return path def tmpdir(): return storage_directory("tmp") hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/control.py000066400000000000000000000077361354103617500303010ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import traceback from hypothesis import Verbosity, settings from hypothesis.errors import CleanupFailed, InvalidArgument, UnsatisfiedAssumption from hypothesis.reporting import report from hypothesis.utils.dynamicvariables import DynamicVariable if False: from typing import Any, AnyStr # noqa def reject(): raise UnsatisfiedAssumption() def assume(condition): # type: (Any) -> bool """Calling ``assume`` is like an :ref:`assert ` that marks the example as bad, rather than failing the test. This allows you to specify properties that you *assume* will be true, and let Hypothesis try to avoid similar examples in future. """ if not condition: raise UnsatisfiedAssumption() return True _current_build_context = DynamicVariable(None) def current_build_context(): context = _current_build_context.value if context is None: raise InvalidArgument(u"No build context registered") return context class BuildContext(object): def __init__(self, data, is_final=False, close_on_capture=True): self.data = data self.tasks = [] self.is_final = is_final self.close_on_capture = close_on_capture self.close_on_del = False self.notes = [] def __enter__(self): self.assign_variable = _current_build_context.with_value(self) self.assign_variable.__enter__() return self def __exit__(self, exc_type, exc_value, tb): self.assign_variable.__exit__(exc_type, exc_value, tb) if self.close() and exc_type is None: raise CleanupFailed() def close(self): any_failed = False for task in self.tasks: try: task() except BaseException: any_failed = True report(traceback.format_exc()) return any_failed def cleanup(teardown): """Register a function to be called when the current test has finished executing. Any exceptions thrown in teardown will be printed but not rethrown. Inside a test this isn't very interesting, because you can just use a finally block, but note that you can use this inside map, flatmap, etc. in order to e.g. insist that a value is closed at the end. """ context = _current_build_context.value if context is None: raise InvalidArgument(u"Cannot register cleanup outside of build context") context.tasks.append(teardown) def note(value): # type: (AnyStr) -> None """Report this value in the final execution.""" context = _current_build_context.value if context is None: raise InvalidArgument("Cannot make notes outside of a test") context.notes.append(value) if context.is_final or settings.default.verbosity >= Verbosity.verbose: report(value) def event(value): # type: (AnyStr) -> None """Record an event that occurred this test. Statistics on number of test runs with each event will be reported at the end if you run Hypothesis in statistics reporting mode. Events should be strings or convertible to them. """ context = _current_build_context.value if context is None: raise InvalidArgument("Cannot make record events outside of a test") if context.data is not None: context.data.note_event(value) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/core.py000066400000000000000000001206261354103617500275430ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """This module provides the core primitives of Hypothesis, such as given.""" from __future__ import absolute_import, division, print_function import ast import base64 import contextlib import datetime import inspect import os import random as rnd_module import traceback import warnings import zlib from random import Random from unittest import TestCase import attr import hypothesis.strategies as st from hypothesis._settings import ( HealthCheck, Phase, Verbosity, local_settings, note_deprecation, settings as Settings, ) from hypothesis.control import BuildContext from hypothesis.errors import ( DeadlineExceeded, DidNotReproduce, FailedHealthCheck, Flaky, HypothesisDeprecationWarning, HypothesisWarning, InvalidArgument, MultipleFailures, NoSuchExample, Unsatisfiable, UnsatisfiedAssumption, ) from hypothesis.executors import new_style_executor from hypothesis.internal.compat import ( PY2, bad_django_TestCase, benchmark_time, binary_type, get_type_hints, getfullargspec, hbytes, int_from_bytes, qualname, ) from hypothesis.internal.conjecture.data import ConjectureData, StopTest from hypothesis.internal.conjecture.engine import ConjectureRunner, ExitReason, sort_key from hypothesis.internal.entropy import deterministic_PRNG from hypothesis.internal.escalation import ( escalate_hypothesis_internal_error, get_trimmed_traceback, ) from hypothesis.internal.healthcheck import fail_health_check from hypothesis.internal.reflection import ( arg_string, convert_positional_arguments, define_function_signature, function_digest, get_pretty_function_description, impersonate, is_mock, nicerepr, proxies, ) from hypothesis.reporting import current_verbosity, report, verbose_report from hypothesis.searchstrategy.collections import TupleStrategy from hypothesis.searchstrategy.strategies import SearchStrategy from hypothesis.statistics import note_engine_for_statistics from hypothesis.utils.conventions import infer from hypothesis.version import __version__ if False: from typing import Any, Dict, Callable, Hashable, Optional, Union, TypeVar # noqa from hypothesis.utils.conventions import InferType # noqa TestFunc = TypeVar("TestFunc", bound=Callable) running_under_pytest = False global_force_seed = None def new_random(): return rnd_module.Random(rnd_module.getrandbits(128)) @attr.s() class Example(object): args = attr.ib() kwargs = attr.ib() def example(*args, **kwargs): # type: (*Any, **Any) -> Callable[[TestFunc], TestFunc] """A decorator which ensures a specific example is always tested.""" if args and kwargs: raise InvalidArgument( "Cannot mix positional and keyword arguments for examples" ) if not (args or kwargs): raise InvalidArgument("An example must provide at least one argument") def accept(test): if not hasattr(test, "hypothesis_explicit_examples"): test.hypothesis_explicit_examples = [] test.hypothesis_explicit_examples.append(Example(tuple(args), kwargs)) return test return accept def seed(seed): # type: (Hashable) -> Callable[[TestFunc], TestFunc] """seed: Start the test execution from a specific seed. May be any hashable object. No exact meaning for seed is provided other than that for a fixed seed value Hypothesis will try the same actions (insofar as it can given external sources of non- determinism. e.g. timing and hash randomization). Overrides the derandomize setting, which is designed to enable deterministic builds rather than reproducing observed failures. """ def accept(test): test._hypothesis_internal_use_seed = seed current_settings = getattr(test, "_hypothesis_internal_use_settings", None) test._hypothesis_internal_use_settings = Settings( current_settings, database=None ) return test return accept def reproduce_failure(version, blob): """Run the example that corresponds to this data blob in order to reproduce a failure. A test with this decorator *always* runs only one example and always fails. If the provided example does not cause a failure, or is in some way invalid for this test, then this will fail with a DidNotReproduce error. This decorator is not intended to be a permanent addition to your test suite. It's simply some code you can add to ease reproduction of a problem in the event that you don't have access to the test database. Because of this, *no* compatibility guarantees are made between different versions of Hypothesis - its API may change arbitrarily from version to version. """ def accept(test): test._hypothesis_internal_use_reproduce_failure = (version, blob) return test return accept def encode_failure(buffer): # This needs to be a real bytes() instance, so we use binary_type() # instead of hbytes() here. buffer = binary_type(buffer) compressed = zlib.compress(buffer) if len(compressed) < len(buffer): buffer = b"\1" + compressed else: buffer = b"\0" + buffer return base64.b64encode(buffer) def decode_failure(blob): try: buffer = base64.b64decode(blob) except Exception: raise InvalidArgument("Invalid base64 encoded string: %r" % (blob,)) prefix = buffer[:1] if prefix == b"\0": return buffer[1:] elif prefix == b"\1": try: return zlib.decompress(buffer[1:]) except zlib.error: raise InvalidArgument("Invalid zlib compression for blob %r" % (blob,)) else: raise InvalidArgument( "Could not decode blob %r: Invalid start byte %r" % (blob, prefix) ) class WithRunner(SearchStrategy): def __init__(self, base, runner): assert runner is not None self.base = base self.runner = runner def do_draw(self, data): data.hypothesis_runner = self.runner return self.base.do_draw(data) def is_invalid_test(name, original_argspec, generator_arguments, generator_kwargs): def invalid(message): def wrapped_test(*arguments, **kwargs): raise InvalidArgument(message) wrapped_test.is_hypothesis_test = True return wrapped_test if not (generator_arguments or generator_kwargs): return invalid("given must be called with at least one argument") if generator_arguments and any( [original_argspec.varargs, original_argspec.varkw, original_argspec.kwonlyargs] ): return invalid( "positional arguments to @given are not supported with varargs, " "varkeywords, or keyword-only arguments" ) if len(generator_arguments) > len(original_argspec.args): args = tuple(generator_arguments) return invalid( "Too many positional arguments for %s() were passed to @given " "- expected at most %d arguments, but got %d %r" % (name, len(original_argspec.args), len(args), args) ) if infer in generator_arguments: return invalid( "infer was passed as a positional argument to @given, " "but may only be passed as a keyword argument" ) if generator_arguments and generator_kwargs: return invalid("cannot mix positional and keyword arguments to @given") extra_kwargs = [ k for k in generator_kwargs if k not in original_argspec.args + original_argspec.kwonlyargs ] if extra_kwargs and not original_argspec.varkw: arg = extra_kwargs[0] return invalid( "%s() got an unexpected keyword argument %r, from `%s=%r` in @given" % (name, arg, arg, generator_kwargs[arg]) ) for a in original_argspec.args: if isinstance(a, list): # pragma: no cover return invalid( "Cannot decorate function %s() because it has destructuring arguments" % (name,) ) if original_argspec.defaults or original_argspec.kwonlydefaults: return invalid("Cannot apply @given to a function with defaults.") missing = [ repr(kw) for kw in original_argspec.kwonlyargs if kw not in generator_kwargs ] if missing: raise InvalidArgument( "Missing required kwarg{}: {}".format( "s" if len(missing) > 1 else "", ", ".join(missing) ) ) def execute_explicit_examples( test_runner, test, wrapped_test, settings, arguments, kwargs ): original_argspec = getfullargspec(test) for example in reversed(getattr(wrapped_test, "hypothesis_explicit_examples", ())): example_kwargs = dict(original_argspec.kwonlydefaults or {}) if example.args: if len(example.args) > len(original_argspec.args): raise InvalidArgument( "example has too many arguments for test. " "Expected at most %d but got %d" % (len(original_argspec.args), len(example.args)) ) example_kwargs.update( dict(zip(original_argspec.args[-len(example.args) :], example.args)) ) else: example_kwargs.update(example.kwargs) if Phase.explicit not in settings.phases: continue example_kwargs.update(kwargs) # Note: Test may mutate arguments and we can't rerun explicit # examples, so we have to calculate the failure message at this # point rather than than later. example_string = "%s(%s)" % ( test.__name__, arg_string(test, arguments, example_kwargs), ) with local_settings(settings): try: with BuildContext(None) as b: verbose_report("Trying example: " + example_string) test_runner(None, lambda data: test(*arguments, **example_kwargs)) except BaseException: report("Falsifying example: " + example_string) for n in b.notes: report(n) raise def get_random_for_wrapped_test(test, wrapped_test): settings = wrapped_test._hypothesis_internal_use_settings wrapped_test._hypothesis_internal_use_generated_seed = None if wrapped_test._hypothesis_internal_use_seed is not None: return Random(wrapped_test._hypothesis_internal_use_seed) elif settings.derandomize: return Random(int_from_bytes(function_digest(test))) elif global_force_seed is not None: return Random(global_force_seed) else: seed = rnd_module.getrandbits(128) wrapped_test._hypothesis_internal_use_generated_seed = seed return Random(seed) def process_arguments_to_given( wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs, argspec, test, settings, ): selfy = None arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs) # If the test function is a method of some kind, the bound object # will be the first named argument if there are any, otherwise the # first vararg (if any). if argspec.args: selfy = kwargs.get(argspec.args[0]) elif arguments: selfy = arguments[0] # Ensure that we don't mistake mocks for self here. # This can cause the mock to be used as the test runner. if is_mock(selfy): selfy = None test_runner = new_style_executor(selfy) arguments = tuple(arguments) # We use TupleStrategy over tuples() here to avoid polluting # st.STRATEGY_CACHE with references (see #493), and because this is # trivial anyway if the fixed_dictionaries strategy is cacheable. search_strategy = TupleStrategy( ( st.just(arguments), st.fixed_dictionaries(generator_kwargs).map( lambda args: dict(args, **kwargs) ), ) ) if selfy is not None: search_strategy = WithRunner(search_strategy, selfy) search_strategy.validate() return arguments, kwargs, test_runner, search_strategy def run_once(fn): """Wraps a no-args function so that its outcome is cached. We use this for calculating various lists of exceptions the first time we use them.""" result = [None] def run(): if result[0] is None: result[0] = fn() assert result[0] is not None return result[0] run.__name__ = fn.__name__ return run @run_once def skip_exceptions_to_reraise(): """Return a tuple of exceptions meaning 'skip this test', to re-raise. This is intended to cover most common test runners; if you would like another to be added please open an issue or pull request. """ import unittest # This is a set because nose may simply re-export unittest.SkipTest exceptions = {unittest.SkipTest} try: # pragma: no cover from unittest2 import SkipTest exceptions.add(SkipTest) except ImportError: pass try: # pragma: no cover from pytest.runner import Skipped exceptions.add(Skipped) except ImportError: pass try: # pragma: no cover from nose import SkipTest as NoseSkipTest exceptions.add(NoseSkipTest) except ImportError: pass return tuple(sorted(exceptions, key=str)) @run_once def failure_exceptions_to_catch(): """Return a tuple of exceptions meaning 'this test has failed', to catch. This is intended to cover most common test runners; if you would like another to be added please open an issue or pull request. """ exceptions = [Exception] try: # pragma: no cover from _pytest.outcomes import Failed exceptions.append(Failed) except ImportError: pass return tuple(exceptions) def new_given_argspec(original_argspec, generator_kwargs): """Make an updated argspec for the wrapped test.""" new_args = [a for a in original_argspec.args if a not in generator_kwargs] new_kwonlyargs = [ a for a in original_argspec.kwonlyargs if a not in generator_kwargs ] annots = { k: v for k, v in original_argspec.annotations.items() if k in new_args + new_kwonlyargs } annots["return"] = None return original_argspec._replace( args=new_args, kwonlyargs=new_kwonlyargs, annotations=annots ) ROOT = os.path.dirname(__file__) STDLIB = os.path.dirname(os.__file__) class StateForActualGivenExecution(object): def __init__(self, test_runner, search_strategy, test, settings, random, had_seed): self.test_runner = test_runner self.search_strategy = search_strategy self.settings = settings self.last_exception = None self.falsifying_examples = () self.__was_flaky = False self.random = random self.__warned_deadline = False self.__test_runtime = None self.__had_seed = had_seed self.test = test self.files_to_propagate = set() self.failed_normally = False self.used_examples_from_database = False def execute(self, data, print_example=False, is_final=False, expected_failure=None): text_repr = [None] if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = benchmark_time() result = self.test(*args, **kwargs) finish = benchmark_time() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): if not hasattr(data, "can_reproduce_example_from_repr"): data.can_reproduce_example_from_repr = True with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final): args, kwargs = data.draw(self.search_strategy) if expected_failure is not None: text_repr[0] = arg_string(test, args, kwargs) if print_example: example = "%s(%s)" % ( test.__name__, arg_string(test, args, kwargs), ) try: ast.parse(example) except SyntaxError: data.can_reproduce_example_from_repr = False report("Falsifying example: %s" % (example,)) elif current_verbosity() >= Verbosity.verbose: report( lambda: "Trying example: %s(%s)" % (test.__name__, arg_string(test, args, kwargs)) ) return test(*args, **kwargs) result = self.test_runner(data, run) if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( ( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." ) % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) self.__flaky( ( "Hypothesis %s(%s) produces unreliable results: Falsified" " on the first call but did not on a subsequent one" ) % (test.__name__, text_repr[0]) ) return result def evaluate_test_data(self, data): try: result = self.execute(data) if result is not None: fail_health_check( self.settings, ( "Tests run under @given should return None, but " "%s returned %r instead." ) % (self.test.__name__, result), HealthCheck.return_value, ) except UnsatisfiedAssumption: data.mark_invalid() except ( HypothesisDeprecationWarning, FailedHealthCheck, StopTest, ) + skip_exceptions_to_reraise(): raise except failure_exceptions_to_catch() as e: escalate_hypothesis_internal_error() if data.frozen: # This can happen if an error occurred in a finally # block somewhere, suppressing our original StopTest. # We raise a new one here to resume normal operation. raise StopTest(data.testcounter) else: tb = get_trimmed_traceback() info = data.extra_information info.__expected_traceback = "".join( traceback.format_exception(type(e), e, tb) ) info.__expected_exception = e verbose_report(info.__expected_traceback) origin = traceback.extract_tb(tb)[-1] filename = origin[0] lineno = origin[1] data.mark_interesting((type(e), filename, lineno)) def run(self): # Tell pytest to omit the body of this function from tracebacks __tracebackhide__ = True if global_force_seed is None: database_key = function_digest(self.test) else: database_key = None runner = ConjectureRunner( self.evaluate_test_data, settings=self.settings, random=self.random, database_key=database_key, ) try: runner.run() finally: self.used_examples_from_database = runner.used_examples_from_database note_engine_for_statistics(runner) self.used_examples_from_database = runner.used_examples_from_database if runner.call_count == 0: return if runner.interesting_examples: self.falsifying_examples = sorted( [d for d in runner.interesting_examples.values()], key=lambda d: sort_key(d.buffer), reverse=True, ) else: if runner.valid_examples == 0: raise Unsatisfiable( "Unable to satisfy assumptions of hypothesis %s." % (get_pretty_function_description(self.test),) ) if not self.falsifying_examples: return elif not self.settings.report_multiple_bugs: del self.falsifying_examples[:-1] self.failed_normally = True flaky = 0 for falsifying_example in self.falsifying_examples: info = falsifying_example.extra_information ran_example = ConjectureData.for_buffer(falsifying_example.buffer) self.__was_flaky = False assert info.__expected_exception is not None try: self.execute( ran_example, print_example=True, is_final=True, expected_failure=( info.__expected_exception, info.__expected_traceback, ), ) except (UnsatisfiedAssumption, StopTest): report(traceback.format_exc()) self.__flaky( "Unreliable assumption: An example which satisfied " "assumptions on the first run now fails it." ) except BaseException as e: if len(self.falsifying_examples) <= 1: raise tb = get_trimmed_traceback() report("".join(traceback.format_exception(type(e), e, tb))) finally: # pragma: no cover # This section is in fact entirely covered by the tests in # test_reproduce_failure, but it seems to trigger a lovely set # of coverage bugs: The branches show up as uncovered (despite # definitely being covered - you can add an assert False else # branch to verify this and see it fail - and additionally the # second branch still complains about lack of coverage even if # you add a pragma: no cover to it! # See https://bitbucket.org/ned/coveragepy/issues/623/ if self.settings.print_blob: report( ( "\nYou can reproduce this example by temporarily " "adding @reproduce_failure(%r, %r) as a decorator " "on your test case" ) % (__version__, encode_failure(falsifying_example.buffer)) ) if self.__was_flaky: flaky += 1 # If we only have one example then we should have raised an error or # flaky prior to this point. assert len(self.falsifying_examples) > 1 if flaky > 0: raise Flaky( ( "Hypothesis found %d distinct failures, but %d of them " "exhibited some sort of flaky behaviour." ) % (len(self.falsifying_examples), flaky) ) else: raise MultipleFailures( ("Hypothesis found %d distinct failures.") % (len(self.falsifying_examples)) ) def __flaky(self, message): if len(self.falsifying_examples) <= 1: raise Flaky(message) else: self.__was_flaky = True report("Flaky example! " + message) @contextlib.contextmanager def fake_subTest(self, msg=None, **__): """Monkeypatch for `unittest.TestCase.subTest` during `@given`. If we don't patch this out, each failing example is reported as a seperate failing test by the unittest test runner, which is obviously incorrect. We therefore replace it for the duration with this version. """ warnings.warn( "subTest per-example reporting interacts badly with Hypothesis " "trying hundreds of examples, so we disable it for the duration of " "any test that uses `@given`.", HypothesisWarning, stacklevel=2, ) yield @attr.s() class HypothesisHandle(object): """This object is provided as the .hypothesis attribute on @given tests. Downstream users can reassign its attributes to insert custom logic into the execution of each case, for example by converting an async into a sync function. This must be an attribute of an attribute, because reassignment of a first-level attribute would not be visible to Hypothesis if the function had been decorated before the assignment. See https://github.com/HypothesisWorks/hypothesis/issues/1257 for more information. """ inner_test = attr.ib() def given( *given_arguments, # type: Union[SearchStrategy, InferType] **given_kwargs # type: Union[SearchStrategy, InferType] ): # type: (...) -> Callable[[Callable[..., None]], Callable[..., None]] """A decorator for turning a test function that accepts arguments into a randomized test. This is the main entry point to Hypothesis. """ def run_test_with_generator(test): if hasattr(test, "_hypothesis_internal_test_function_without_warning"): # Pull out the original test function to avoid the warning we # stuck in about using @settings without @given. test = test._hypothesis_internal_test_function_without_warning if inspect.isclass(test): # Provide a meaningful error to users, instead of exceptions from # internals that assume we're dealing with a function. raise InvalidArgument("@given cannot be applied to a class.") generator_arguments = tuple(given_arguments) generator_kwargs = dict(given_kwargs) original_argspec = getfullargspec(test) check_invalid = is_invalid_test( test.__name__, original_argspec, generator_arguments, generator_kwargs ) if check_invalid is not None: return check_invalid for name, strategy in zip( reversed(original_argspec.args), reversed(generator_arguments) ): generator_kwargs[name] = strategy argspec = new_given_argspec(original_argspec, generator_kwargs) @impersonate(test) @define_function_signature(test.__name__, test.__doc__, argspec) def wrapped_test(*arguments, **kwargs): # Tell pytest to omit the body of this function from tracebacks __tracebackhide__ = True test = wrapped_test.hypothesis.inner_test if getattr(test, "is_hypothesis_test", False): raise InvalidArgument( ( "You have applied @given to the test %s more than once, which " "wraps the test several times and is extremely slow. A " "similar effect can be gained by combining the arguments " "of the two calls to given. For example, instead of " "@given(booleans()) @given(integers()), you could write " "@given(booleans(), integers())" ) % (test.__name__,) ) settings = wrapped_test._hypothesis_internal_use_settings random = get_random_for_wrapped_test(test, wrapped_test) if infer in generator_kwargs.values(): hints = get_type_hints(test) for name in [ name for name, value in generator_kwargs.items() if value is infer ]: if name not in hints: raise InvalidArgument( "passed %s=infer for %s, but %s has no type annotation" % (name, test.__name__, name) ) generator_kwargs[name] = st.from_type(hints[name]) processed_args = process_arguments_to_given( wrapped_test, arguments, kwargs, generator_arguments, generator_kwargs, argspec, test, settings, ) arguments, kwargs, test_runner, search_strategy = processed_args runner = getattr(search_strategy, "runner", None) if isinstance(runner, TestCase) and test.__name__ in dir(TestCase): msg = ( "You have applied @given to the method %s, which is " "used by the unittest runner but is not itself a test." " This is not useful in any way." % test.__name__ ) fail_health_check(settings, msg, HealthCheck.not_a_test_method) if bad_django_TestCase(runner): # pragma: no cover # Covered by the Django tests, but not the pytest coverage task raise InvalidArgument( "You have applied @given to a method on %s, but this " "class does not inherit from the supported versions in " "`hypothesis.extra.django`. Use the Hypothesis variants " "to ensure that each example is run in a separate " "database transaction." % qualname(type(runner)) ) state = StateForActualGivenExecution( test_runner, search_strategy, test, settings, random, had_seed=wrapped_test._hypothesis_internal_use_seed, ) reproduce_failure = wrapped_test._hypothesis_internal_use_reproduce_failure if reproduce_failure is not None: expected_version, failure = reproduce_failure if expected_version != __version__: raise InvalidArgument( ( "Attempting to reproduce a failure from a different " "version of Hypothesis. This failure is from %s, but " "you are currently running %r. Please change your " "Hypothesis version to a matching one." ) % (expected_version, __version__) ) try: state.execute( ConjectureData.for_buffer(decode_failure(failure)), print_example=True, is_final=True, ) raise DidNotReproduce( "Expected the test to raise an error, but it " "completed successfully." ) except StopTest: raise DidNotReproduce( "The shape of the test data has changed in some way " "from where this blob was defined. Are you sure " "you're running the same test?" ) except UnsatisfiedAssumption: raise DidNotReproduce( "The test data failed to satisfy an assumption in the " "test. Have you added it since this blob was " "generated?" ) execute_explicit_examples( test_runner, test, wrapped_test, settings, arguments, kwargs ) if settings.max_examples <= 0: return if not ( Phase.reuse in settings.phases or Phase.generate in settings.phases ): return try: if isinstance(runner, TestCase) and hasattr(runner, "subTest"): subTest = runner.subTest try: setattr(runner, "subTest", fake_subTest) state.run() finally: setattr(runner, "subTest", subTest) else: state.run() except BaseException as e: generated_seed = wrapped_test._hypothesis_internal_use_generated_seed with local_settings(settings): if not (state.failed_normally or generated_seed is None): if running_under_pytest: report( "You can add @seed(%(seed)d) to this test or " "run pytest with --hypothesis-seed=%(seed)d " "to reproduce this failure." % {"seed": generated_seed} ) else: report( "You can add @seed(%d) to this test to " "reproduce this failure." % (generated_seed,) ) # The dance here is to avoid showing users long tracebacks # full of Hypothesis internals they don't care about. # We have to do this inline, to avoid adding another # internal stack frame just when we've removed the rest. if PY2: # Python 2 doesn't have Exception.with_traceback(...); # instead it has a three-argument form of the `raise` # statement. Unfortunately this is a SyntaxError on # Python 3, and before Python 2.7.9 it was *also* a # SyntaxError to use it in a nested function so we # can't `exec` or `eval` our way out (BPO-21591). # So unless we break some versions of Python 2, none # of them get traceback elision. raise # On Python 3, we swap out the real traceback for our # trimmed version. Using a variable ensures that the line # which will actually appear in trackbacks is as clear as # possible - "raise the_error_hypothesis_found". the_error_hypothesis_found = e.with_traceback( get_trimmed_traceback() ) raise the_error_hypothesis_found for attrib in dir(test): if not (attrib.startswith("_") or hasattr(wrapped_test, attrib)): setattr(wrapped_test, attrib, getattr(test, attrib)) wrapped_test.is_hypothesis_test = True if hasattr(test, "_hypothesis_internal_settings_applied"): # Used to check if @settings is applied twice. wrapped_test._hypothesis_internal_settings_applied = True wrapped_test._hypothesis_internal_use_seed = getattr( test, "_hypothesis_internal_use_seed", None ) wrapped_test._hypothesis_internal_use_settings = ( getattr(test, "_hypothesis_internal_use_settings", None) or Settings.default ) wrapped_test._hypothesis_internal_use_reproduce_failure = getattr( test, "_hypothesis_internal_use_reproduce_failure", None ) wrapped_test.hypothesis = HypothesisHandle(test) return wrapped_test return run_test_with_generator def find( specifier, # type: SearchStrategy condition, # type: Callable[[Any], bool] settings=None, # type: Settings random=None, # type: Any database_key=None, # type: bytes ): # type: (...) -> Any """Returns the minimal example from the given strategy ``specifier`` that matches the predicate function ``condition``.""" note_deprecation( "`find(s, f)` is deprecated, because it is rarely used but takes " "ongoing work to maintain as we upgrade other parts of Hypothesis.", since="2019-07-11", ) if settings is None: settings = Settings(max_examples=2000) settings = Settings(settings, suppress_health_check=HealthCheck.all()) if database_key is None and settings.database is not None: database_key = function_digest(condition) if not isinstance(specifier, SearchStrategy): raise InvalidArgument( "Expected SearchStrategy but got %r of type %s" % (specifier, type(specifier).__name__) ) specifier.validate() search = specifier random = random or new_random() successful_examples = [0] last_data = [None] last_repr = [None] def template_condition(data): with deterministic_PRNG(): with BuildContext(data): try: data.is_find = True result = data.draw(search) data.note(result) success = condition(result) except UnsatisfiedAssumption: data.mark_invalid() if success: successful_examples[0] += 1 if settings.verbosity >= Verbosity.verbose: if not successful_examples[0]: report(u"Tried non-satisfying example %s" % (nicerepr(result),)) elif success: if successful_examples[0] == 1: last_repr[0] = nicerepr(result) report(u"Found satisfying example %s" % (last_repr[0],)) last_data[0] = data elif ( sort_key(hbytes(data.buffer)) < sort_key(last_data[0].buffer) ) and nicerepr(result) != last_repr[0]: last_repr[0] = nicerepr(result) report(u"Shrunk example to %s" % (last_repr[0],)) last_data[0] = data if success and not data.frozen: data.mark_interesting() runner = ConjectureRunner( template_condition, settings=settings, random=random, database_key=database_key ) runner.run() note_engine_for_statistics(runner) if runner.interesting_examples: data = ConjectureData.for_buffer( list(runner.interesting_examples.values())[0].buffer ) with deterministic_PRNG(): with BuildContext(data): return data.draw(search) if runner.valid_examples == 0 and (runner.exit_reason != ExitReason.finished): raise Unsatisfiable( "Unable to satisfy assumptions of %s." % (get_pretty_function_description(condition),) ) raise NoSuchExample(get_pretty_function_description(condition)) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/database.py000066400000000000000000000147271354103617500303630ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import binascii import os import warnings from hashlib import sha1 from hypothesis.configuration import storage_directory from hypothesis.errors import HypothesisException, HypothesisWarning from hypothesis.internal.compat import hbytes from hypothesis.utils.conventions import not_set def _db_for_path(path=None): if path is not_set: if os.getenv("HYPOTHESIS_DATABASE_FILE") is not None: # pragma: no cover raise HypothesisException( "The $HYPOTHESIS_DATABASE_FILE environment variable no longer has any " "effect. Configure your database location via a settings profile instead.\n" "https://hypothesis.readthedocs.io/en/latest/settings.html#settings-profiles" ) # Note: storage_directory attempts to create the dir in question, so # if os.access fails there *must* be a fatal permissions issue. path = storage_directory("examples") if os.access(path, os.R_OK | os.W_OK | os.X_OK): return _db_for_path(path) else: # pragma: no cover warnings.warn( HypothesisWarning( "The database setting is not configured, and the default " "location is unusable - falling back to an in-memory " "database for this session. path=%r" % (path,) ) ) return InMemoryExampleDatabase() if path in (None, ":memory:"): return InMemoryExampleDatabase() return DirectoryBasedExampleDatabase(str(path)) class EDMeta(type): def __call__(self, *args, **kwargs): if self is ExampleDatabase: return _db_for_path(*args, **kwargs) return super(EDMeta, self).__call__(*args, **kwargs) class ExampleDatabase(EDMeta("ExampleDatabase", (object,), {})): # type: ignore """Interface class for storage systems. A key -> multiple distinct values mapping. Keys and values are binary data. """ def save(self, key, value): """Save ``value`` under ``key``. If this value is already present for this key, silently do nothing """ raise NotImplementedError("%s.save" % (type(self).__name__)) def delete(self, key, value): """Remove this value from this key. If this value is not present, silently do nothing. """ raise NotImplementedError("%s.delete" % (type(self).__name__)) def move(self, src, dest, value): """Move value from key src to key dest. Equivalent to delete(src, value) followed by save(src, value) but may have a more efficient implementation. Note that value will be inserted at dest regardless of whether it is currently present at src. """ if src == dest: self.save(src, value) return self.delete(src, value) self.save(dest, value) def fetch(self, key): """Return all values matching this key.""" raise NotImplementedError("%s.fetch" % (type(self).__name__)) def close(self): """Clear up any resources associated with this database.""" raise NotImplementedError("%s.close" % (type(self).__name__)) class InMemoryExampleDatabase(ExampleDatabase): def __init__(self): self.data = {} def __repr__(self): return "InMemoryExampleDatabase(%r)" % (self.data,) def fetch(self, key): for v in self.data.get(key, ()): yield v def save(self, key, value): self.data.setdefault(key, set()).add(hbytes(value)) def delete(self, key, value): self.data.get(key, set()).discard(hbytes(value)) def close(self): pass def mkdirp(path): try: os.makedirs(path) except OSError: pass return path def _hash(key): return sha1(key).hexdigest()[:16] class DirectoryBasedExampleDatabase(ExampleDatabase): def __init__(self, path): self.path = path self.keypaths = {} def __repr__(self): return "DirectoryBasedExampleDatabase(%r)" % (self.path,) def close(self): pass def _key_path(self, key): try: return self.keypaths[key] except KeyError: pass directory = os.path.join(self.path, _hash(key)) mkdirp(directory) self.keypaths[key] = directory return directory def _value_path(self, key, value): return os.path.join(self._key_path(key), sha1(value).hexdigest()[:16]) def fetch(self, key): kp = self._key_path(key) for path in os.listdir(kp): try: with open(os.path.join(kp, path), "rb") as i: yield hbytes(i.read()) except EnvironmentError: pass def save(self, key, value): path = self._value_path(key, value) if not os.path.exists(path): suffix = binascii.hexlify(os.urandom(16)) if not isinstance(suffix, str): # pragma: no branch # On Python 3, binascii.hexlify returns bytes suffix = suffix.decode("ascii") tmpname = path + "." + suffix with open(tmpname, "wb") as o: o.write(value) try: os.rename(tmpname, path) except OSError: # pragma: no cover os.unlink(tmpname) assert not os.path.exists(tmpname) def move(self, src, dest, value): if src == dest: self.save(src, value) return try: os.rename(self._value_path(src, value), self._value_path(dest, value)) except OSError: self.delete(src, value) self.save(dest, value) def delete(self, key, value): try: os.unlink(self._value_path(key, value)) except OSError: pass hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/errors.py000066400000000000000000000127331354103617500301260ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function class HypothesisException(Exception): """Generic parent class for exceptions thrown by Hypothesis.""" class CleanupFailed(HypothesisException): """At least one cleanup task failed and no other exception was raised.""" class UnsatisfiedAssumption(HypothesisException): """An internal error raised by assume. If you're seeing this error something has gone wrong. """ class NoSuchExample(HypothesisException): """The condition we have been asked to satisfy appears to be always false. This does not guarantee that no example exists, only that we were unable to find one. """ def __init__(self, condition_string, extra=""): super(NoSuchExample, self).__init__( "No examples found of condition %s%s" % (condition_string, extra) ) class Unsatisfiable(HypothesisException): """We ran out of time or examples before we could find enough examples which satisfy the assumptions of this hypothesis. This could be because the function is too slow. If so, try upping the timeout. It could also be because the function is using assume in a way that is too hard to satisfy. If so, try writing a custom strategy or using a better starting point (e.g if you are requiring a list has unique values you could instead filter out all duplicate values from the list) """ class Flaky(HypothesisException): """This function appears to fail non-deterministically: We have seen it fail when passed this example at least once, but a subsequent invocation did not fail. Common causes for this problem are: 1. The function depends on external state. e.g. it uses an external random number generator. Try to make a version that passes all the relevant state in from Hypothesis. 2. The function is suffering from too much recursion and its failure depends sensitively on where it's been called from. 3. The function is timing sensitive and can fail or pass depending on how long it takes. Try breaking it up into smaller functions which don't do that and testing those instead. """ class InvalidArgument(HypothesisException, TypeError): """Used to indicate that the arguments to a Hypothesis function were in some manner incorrect.""" class ResolutionFailed(InvalidArgument): """Hypothesis had to resolve a type to a strategy, but this failed. Type inference is best-effort, so this only happens when an annotation exists but could not be resolved for a required argument to the target of ``builds()``, or where the user passed ``infer``. """ class InvalidState(HypothesisException): """The system is not in a state where you were allowed to do that.""" class InvalidDefinition(HypothesisException, TypeError): """Used to indicate that a class definition was not well put together and has something wrong with it.""" class HypothesisWarning(HypothesisException, Warning): """A generic warning issued by Hypothesis.""" class FailedHealthCheck(HypothesisWarning): """Raised when a test fails a preliminary healthcheck that occurs before execution.""" def __init__(self, message, check): super(FailedHealthCheck, self).__init__(message) self.health_check = check class HypothesisDeprecationWarning(HypothesisWarning, FutureWarning): """A deprecation warning issued by Hypothesis. Actually inherits from FutureWarning, because DeprecationWarning is hidden by the default warnings filter. You can configure the Python :mod:`python:warnings` to handle these warnings differently to others, either turning them into errors or suppressing them entirely. Obviously we would prefer the former! """ class Frozen(HypothesisException): """Raised when a mutation method has been called on a ConjectureData object after freeze() has been called.""" class MultipleFailures(HypothesisException): """Indicates that Hypothesis found more than one distinct bug when testing your code.""" class DeadlineExceeded(HypothesisException): """Raised when an individual test body has taken too long to run.""" def __init__(self, runtime, deadline): super(DeadlineExceeded, self).__init__( "Test took %.2fms, which exceeds the deadline of %.2fms" % (runtime.total_seconds() * 1000, deadline.total_seconds() * 1000) ) self.runtime = runtime self.deadline = deadline class StopTest(BaseException): """Raised when a test should stop running and return control to the Hypothesis engine, which should then continue normally. """ def __init__(self, testcounter): super(StopTest, self).__init__(repr(testcounter)) self.testcounter = testcounter class DidNotReproduce(HypothesisException): pass hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/executors.py000066400000000000000000000041171354103617500306300ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function def default_executor(function): # pragma: nocover raise NotImplementedError() # We don't actually use this any more def setup_teardown_executor(setup, teardown): setup = setup or (lambda: None) teardown = teardown or (lambda ex: None) def execute(function): token = None try: token = setup() return function() finally: teardown(token) return execute def executor(runner): try: return runner.execute_example except AttributeError: pass if hasattr(runner, "setup_example") or hasattr(runner, "teardown_example"): return setup_teardown_executor( getattr(runner, "setup_example", None), getattr(runner, "teardown_example", None), ) return default_executor def default_new_style_executor(data, function): return function(data) class ConjectureRunner(object): def hypothesis_execute_example_with_data(self, data, function): return function(data) def new_style_executor(runner): if runner is None: return default_new_style_executor if isinstance(runner, ConjectureRunner): return runner.hypothesis_execute_example_with_data old_school = executor(runner) if old_school is default_executor: return default_new_style_executor else: return lambda data, function: old_school(lambda: function(data)) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/000077500000000000000000000000001354103617500273555ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/__init__.py000066400000000000000000000012751354103617500314730ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/dateutil.py000066400000000000000000000045461354103617500315530ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """ -------------------- hypothesis[dateutil] -------------------- This module provides ``dateutil`` timezones. You can use this strategy to make :py:func:`hypothesis.strategies.datetimes` and :py:func:`hypothesis.strategies.times` produce timezone-aware values. """ from __future__ import absolute_import, division, print_function import datetime as dt from dateutil import tz, zoneinfo # type: ignore import hypothesis._strategies as st __all__ = ["timezones"] def __zone_sort_key(zone): """Sort by absolute UTC offset at reference date, positive first, with ties broken by name. """ assert zone is not None offset = zone.utcoffset(dt.datetime(2000, 1, 1)) offset = 999 if offset is None else offset return (abs(offset), -offset, str(zone)) @st.cacheable @st.defines_strategy def timezones(): # type: () -> st.SearchStrategy[dt.tzinfo] """Any timezone in dateutil. This strategy minimises to UTC, or the timezone with the smallest offset from UTC as of 2000-01-01, and is designed for use with :py:func:`~hypothesis.strategies.datetimes`. Note that the timezones generated by the strategy may vary depending on the configuration of your machine. See the dateutil documentation for more information. """ all_timezones = sorted( [tz.gettz(t) for t in zoneinfo.get_zonefile_instance().zones], key=__zone_sort_key, ) all_timezones.insert(0, tz.UTC) # We discard Nones in the list comprehension because Mypy knows that # tz.gettz may return None. However this should never happen for known # zone names, so we assert that it's impossible first. assert None not in all_timezones return st.sampled_from([z for z in all_timezones if z is not None]) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/django/000077500000000000000000000000001354103617500306175ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/django/__init__.py000066400000000000000000000020201354103617500327220ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.extra.django._fields import from_field, register_field_strategy from hypothesis.extra.django._impl import ( TestCase, TransactionTestCase, from_form, from_model, ) __all__ = [ "TestCase", "TransactionTestCase", "from_field", "from_model", "register_field_strategy", "from_form", ] hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/django/_fields.py000066400000000000000000000237721354103617500326110ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import re import string from datetime import timedelta from decimal import Decimal import django import django.db.models as dm import django.forms as df from django.core.validators import ( validate_ipv4_address, validate_ipv6_address, validate_ipv46_address, ) import hypothesis.strategies as st from hypothesis.errors import InvalidArgument from hypothesis.extra.pytz import timezones from hypothesis.internal.validation import check_type from hypothesis.provisional import ip4_addr_strings, ip6_addr_strings, urls from hypothesis.strategies import emails if False: from datetime import tzinfo # noqa from typing import Any, Type, Optional, List, Text, Callable, Union # noqa # Mapping of field types, to strategy objects or functions of (type) -> strategy _global_field_lookup = { dm.SmallIntegerField: st.integers(-32768, 32767), dm.IntegerField: st.integers(-2147483648, 2147483647), dm.BigIntegerField: st.integers(-9223372036854775808, 9223372036854775807), dm.PositiveIntegerField: st.integers(0, 2147483647), dm.PositiveSmallIntegerField: st.integers(0, 32767), dm.BinaryField: st.binary(), dm.BooleanField: st.booleans(), dm.DateField: st.dates(), dm.EmailField: emails(), dm.FloatField: st.floats(), dm.NullBooleanField: st.one_of(st.none(), st.booleans()), dm.URLField: urls(), dm.UUIDField: st.uuids(), df.DateField: st.dates(), df.DurationField: st.timedeltas(), df.EmailField: emails(), df.FloatField: st.floats(allow_nan=False, allow_infinity=False), df.IntegerField: st.integers(-2147483648, 2147483647), df.NullBooleanField: st.one_of(st.none(), st.booleans()), df.URLField: urls(), df.UUIDField: st.uuids(), } def register_for(field_type): def inner(func): _global_field_lookup[field_type] = func return func return inner @register_for(dm.DateTimeField) @register_for(df.DateTimeField) def _for_datetime(field): if getattr(django.conf.settings, "USE_TZ", False): return st.datetimes(timezones=timezones()) return st.datetimes() def using_sqlite(): try: return ( getattr(django.conf.settings, "DATABASES", {}) .get("default", {}) .get("ENGINE", "") .endswith(".sqlite3") ) except django.core.exceptions.ImproperlyConfigured: return None @register_for(dm.TimeField) def _for_model_time(field): # SQLITE supports TZ-aware datetimes, but not TZ-aware times. if getattr(django.conf.settings, "USE_TZ", False) and not using_sqlite(): return st.times(timezones=timezones()) return st.times() @register_for(df.TimeField) def _for_form_time(field): if getattr(django.conf.settings, "USE_TZ", False): return st.times(timezones=timezones()) return st.times() @register_for(dm.DurationField) def _for_duration(field): # SQLite stores timedeltas as six bytes of microseconds if using_sqlite(): delta = timedelta(microseconds=2 ** 47 - 1) return st.timedeltas(-delta, delta) return st.timedeltas() @register_for(dm.SlugField) @register_for(df.SlugField) def _for_slug(field): min_size = 1 if getattr(field, "blank", False) or not getattr(field, "required", True): min_size = 0 return st.text( alphabet=string.ascii_letters + string.digits, min_size=min_size, max_size=field.max_length, ) @register_for(dm.GenericIPAddressField) def _for_model_ip(field): return dict( ipv4=ip4_addr_strings(), ipv6=ip6_addr_strings(), both=ip4_addr_strings() | ip6_addr_strings(), )[field.protocol.lower()] @register_for(df.GenericIPAddressField) def _for_form_ip(field): # the IP address form fields have no direct indication of which type # of address they want, so direct comparison with the validator # function has to be used instead. Sorry for the potato logic here if validate_ipv46_address in field.default_validators: return ip4_addr_strings() | ip6_addr_strings() if validate_ipv4_address in field.default_validators: return ip4_addr_strings() if validate_ipv6_address in field.default_validators: return ip6_addr_strings() raise InvalidArgument("No IP version validator on field=%r" % field) @register_for(dm.DecimalField) @register_for(df.DecimalField) def _for_decimal(field): bound = Decimal(10 ** field.max_digits - 1) / (10 ** field.decimal_places) return st.decimals(min_value=-bound, max_value=bound, places=field.decimal_places) @register_for(dm.CharField) @register_for(dm.TextField) @register_for(df.CharField) @register_for(df.RegexField) def _for_text(field): # We can infer a vastly more precise strategy by considering the # validators as well as the field type. This is a minimal proof of # concept, but we intend to leverage the idea much more heavily soon. # See https://github.com/HypothesisWorks/hypothesis-python/issues/1116 regexes = [ re.compile(v.regex, v.flags) if isinstance(v.regex, str) else v.regex for v in field.validators if isinstance(v, django.core.validators.RegexValidator) and not v.inverse_match ] if regexes: # This strategy generates according to one of the regexes, and # filters using the others. It can therefore learn to generate # from the most restrictive and filter with permissive patterns. # Not maximally efficient, but it makes pathological cases rarer. # If you want a challenge: extend https://qntm.org/greenery to # compute intersections of the full Python regex language. return st.one_of(*[st.from_regex(r) for r in regexes]) # If there are no (usable) regexes, we use a standard text strategy. min_size = 1 if getattr(field, "blank", False) or not getattr(field, "required", True): min_size = 0 strategy = st.text( alphabet=st.characters( blacklist_characters=u"\x00", blacklist_categories=("Cs",) ), min_size=min_size, max_size=field.max_length, ) if getattr(field, "required", True): strategy = strategy.filter(lambda s: s.strip()) return strategy @register_for(df.BooleanField) def _for_form_boolean(field): if field.required: return st.just(True) return st.booleans() def register_field_strategy(field_type, strategy): # type: (Type[dm.Field], st.SearchStrategy) -> None """Add an entry to the global field-to-strategy lookup used by from_field. ``field_type`` must be a subtype of django.db.models.Field, which must not already be registered. ``strategy`` must be a SearchStrategy. """ if not issubclass(field_type, (dm.Field, df.Field)): raise InvalidArgument( "field_type=%r must be a subtype of Field" % (field_type,) ) check_type(st.SearchStrategy, strategy, "strategy") if field_type in _global_field_lookup: raise InvalidArgument( "field_type=%r already has a registered strategy (%r)" % (field_type, _global_field_lookup[field_type]) ) if issubclass(field_type, dm.AutoField): raise InvalidArgument("Cannot register a strategy for an AutoField") _global_field_lookup[field_type] = strategy def from_field(field): # type: (Type[dm.Field]) -> st.SearchStrategy[dm.Field] """Return a strategy for values that fit the given field. This is pretty similar to the core `from_type` function, with a subtle but important difference: `from_field` takes a Field *instance*, rather than a Field *subtype*, so that it has access to instance attributes such as string length and validators. """ check_type((dm.Field, df.Field), field, "field") if getattr(field, "choices", False): choices = [] # type: list for value, name_or_optgroup in field.choices: if isinstance(name_or_optgroup, (list, tuple)): choices.extend(key for key, _ in name_or_optgroup) else: choices.append(value) # form fields automatically include an empty choice, strip it out if u"" in choices: choices.remove(u"") min_size = 1 if isinstance(field, (dm.CharField, dm.TextField)) and field.blank: choices.insert(0, u"") elif isinstance(field, (df.Field)) and not field.required: choices.insert(0, u"") min_size = 0 strategy = st.sampled_from(choices) if isinstance(field, (df.MultipleChoiceField, df.TypedMultipleChoiceField)): strategy = st.lists(st.sampled_from(choices), min_size=min_size) else: if type(field) not in _global_field_lookup: if getattr(field, "null", False): return st.none() raise InvalidArgument("Could not infer a strategy for %r", (field,)) strategy = _global_field_lookup[type(field)] if not isinstance(strategy, st.SearchStrategy): strategy = strategy(field) assert isinstance(strategy, st.SearchStrategy) if field.validators: def validate(value): try: field.run_validators(value) return True except django.core.exceptions.ValidationError: return False strategy = strategy.filter(validate) if getattr(field, "null", False): return st.none() | strategy return strategy hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/django/_impl.py000066400000000000000000000201751354103617500322760ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import unittest from functools import partial import django.db.models as dm import django.forms as df import django.test as dt from django.core.exceptions import ValidationError from django.db import IntegrityError import hypothesis._strategies as st from hypothesis import reject from hypothesis.errors import InvalidArgument from hypothesis.extra.django._fields import from_field from hypothesis.utils.conventions import infer if False: from datetime import tzinfo # noqa from typing import Any, Type, Optional, List, Text, Callable, Union # noqa from hypothesis.utils.conventions import InferType # noqa class HypothesisTestCase(object): def setup_example(self): self._pre_setup() def teardown_example(self, example): self._post_teardown() def __call__(self, result=None): testMethod = getattr(self, self._testMethodName) if getattr(testMethod, u"is_hypothesis_test", False): return unittest.TestCase.__call__(self, result) else: return dt.SimpleTestCase.__call__(self, result) class TestCase(HypothesisTestCase, dt.TestCase): pass class TransactionTestCase(HypothesisTestCase, dt.TransactionTestCase): pass @st.defines_strategy def from_model( model, # type: Type[dm.Model] **field_strategies # type: Union[st.SearchStrategy[Any], InferType] ): # type: (...) -> st.SearchStrategy[Any] """Return a strategy for examples of ``model``. .. warning:: Hypothesis creates saved models. This will run inside your testing transaction when using the test runner, but if you use the dev console this will leave debris in your database. ``model`` must be an subclass of :class:`~django:django.db.models.Model`. Strategies for fields may be passed as keyword arguments, for example ``is_staff=st.just(False)``. Hypothesis can often infer a strategy based the field type and validators, and will attempt to do so for any required fields. No strategy will be inferred for an :class:`~django:django.db.models.AutoField`, nullable field, foreign key, or field for which a keyword argument is passed to ``from_model()``. For example, a Shop type with a foreign key to Company could be generated with:: shop_strategy = from_model(Shop, company=from_model(Company)) Like for :func:`~hypothesis.strategies.builds`, you can pass :obj:`~hypothesis.infer` as a keyword argument to infer a strategy for a field which has a default value instead of using the default. """ if not issubclass(model, dm.Model): raise InvalidArgument("model=%r must be a subtype of Model" % (model,)) fields_by_name = {f.name: f for f in model._meta.concrete_fields} for name, value in sorted(field_strategies.items()): if value is infer: field_strategies[name] = from_field(fields_by_name[name]) for name, field in sorted(fields_by_name.items()): if ( name not in field_strategies and not field.auto_created and field.default is dm.fields.NOT_PROVIDED ): field_strategies[name] = from_field(field) for field in field_strategies: if model._meta.get_field(field).primary_key: # The primary key is generated as part of the strategy. We # want to find any existing row with this primary key and # overwrite its contents. kwargs = {field: field_strategies.pop(field)} kwargs["defaults"] = st.fixed_dictionaries(field_strategies) # type: ignore return _models_impl(st.builds(model.objects.update_or_create, **kwargs)) # The primary key is not generated as part of the strategy, so we # just match against any row that has the same value for all # fields. return _models_impl(st.builds(model.objects.get_or_create, **field_strategies)) @st.composite def _models_impl(draw, strat): """Handle the nasty part of drawing a value for models()""" try: return draw(strat)[0] except IntegrityError: reject() @st.defines_strategy def from_form( form, # type: Type[dm.Model] form_kwargs=None, # type: dict **field_strategies # type: Union[st.SearchStrategy[Any], InferType] ): # type: (...) -> st.SearchStrategy[Any] """Return a strategy for examples of ``form``. ``form`` must be an subclass of :class:`~django:django.forms.Form`. Strategies for fields may be passed as keyword arguments, for example ``is_staff=st.just(False)``. Hypothesis can often infer a strategy based the field type and validators, and will attempt to do so for any required fields. No strategy will be inferred for a disabled field or field for which a keyword argument is passed to ``from_form()``. This function uses the fields of an unbound ``form`` instance to determine field strategies, any keyword arguments needed to instantiate the unbound ``form`` instance can be passed into ``from_form()`` as a dict with the keyword ``form_kwargs``. E.g.:: shop_strategy = from_form(Shop, form_kwargs={"company_id": 5}) Like for :func:`~hypothesis.strategies.builds`, you can pass :obj:`~hypothesis.infer` as a keyword argument to infer a strategy for a field which has a default value instead of using the default. """ # currently unsupported: # ComboField # FilePathField # FileField # ImageField form_kwargs = form_kwargs or {} if not issubclass(form, df.BaseForm): raise InvalidArgument("form=%r must be a subtype of Form" % (form,)) # Forms are a little bit different from models. Model classes have # all their fields defined, whereas forms may have different fields # per-instance. So, we ought to instantiate the form and get the # fields from the instance, thus we need to accept the kwargs for # instantiation as well as the explicitly defined strategies unbound_form = form(**form_kwargs) fields_by_name = {} for name, field in unbound_form.fields.items(): if isinstance(field, df.MultiValueField): # PS: So this is a little strange, but MultiValueFields must # have their form data encoded in a particular way for the # values to actually be picked up by the widget instances' # ``value_from_datadict``. # E.g. if a MultiValueField named 'mv_field' has 3 # sub-fields then the ``value_from_datadict`` will look for # 'mv_field_0', 'mv_field_1', and 'mv_field_2'. Here I'm # decomposing the individual sub-fields into the names that # the form validation process expects for i, _field in enumerate(field.fields): fields_by_name["%s_%d" % (name, i)] = _field else: fields_by_name[name] = field for name, value in sorted(field_strategies.items()): if value is infer: field_strategies[name] = from_field(fields_by_name[name]) for name, field in sorted(fields_by_name.items()): if name not in field_strategies and not field.disabled: field_strategies[name] = from_field(field) return _forms_impl( st.builds( partial(form, **form_kwargs), data=st.fixed_dictionaries(field_strategies), # type: ignore ) ) @st.composite def _forms_impl(draw, strat): """Handle the nasty part of drawing a value for from_form()""" try: return draw(strat) except ValidationError: reject() hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/django/models.py000066400000000000000000000110031354103617500324470ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import django.db.models as dm from django.db import IntegrityError import hypothesis._strategies as st from hypothesis import reject from hypothesis._settings import note_deprecation from hypothesis.errors import InvalidArgument from hypothesis.extra.django import from_field, register_field_strategy from hypothesis.utils.conventions import DefaultValueType if False: from typing import Any, Type, List, Text, Union # noqa def add_default_field_mapping(field_type, strategy): # type: (Type[dm.Field], st.SearchStrategy[Any]) -> None note_deprecation( "`hypothesis.extra.django.models.add_default_field_mapping` is deprecated; use `hypothesis.extra.django." "register_field_strategy` instead.", since="2019-01-10", ) register_field_strategy(field_type, strategy) default_value = DefaultValueType(u"default_value") @st.defines_strategy def models( model, # type: Type[dm.Model] **field_strategies # type: Union[st.SearchStrategy[Any], DefaultValueType] ): # type: (...) -> st.SearchStrategy[Any] """Return a strategy for examples of ``model``. .. warning:: Hypothesis creates saved models. This will run inside your testing transaction when using the test runner, but if you use the dev console this will leave debris in your database. ``model`` must be an subclass of :class:`~django:django.db.models.Model`. Strategies for fields may be passed as keyword arguments, for example ``is_staff=st.just(False)``. Hypothesis can often infer a strategy based the field type and validators - for best results, make sure your validators are derived from Django's and therefore have the known types and attributes. Passing a keyword argument skips inference for that field; pass a strategy or pass ``hypothesis.extra.django.models.default_value`` to skip inference for that field. Foreign keys are not automatically derived. If they're nullable they will default to always being null, otherwise you always have to specify them. For example, examples of a Shop type with a foreign key to Company could be generated with:: shop_strategy = models(Shop, company=models(Company)) """ note_deprecation( "`hypothesis.extra.django.models.models` is deprecated; use `hypothesis.extra.django." "from_model` instead.", since="2019-01-10", ) result = {} for k, v in field_strategies.items(): if not isinstance(v, DefaultValueType): result[k] = v missed = [] # type: List[Text] for f in model._meta.concrete_fields: if not (f.name in field_strategies or isinstance(f, dm.AutoField)): result[f.name] = from_field(f) if result[f.name].is_empty: missed.append(f.name) if missed: raise InvalidArgument( u"Missing arguments for mandatory field%s %s for model %s" % (u"s" if len(missed) > 1 else u"", u", ".join(missed), model.__name__) ) for field in result: if model._meta.get_field(field).primary_key: # The primary key is generated as part of the strategy. We # want to find any existing row with this primary key and # overwrite its contents. kwargs = {field: result.pop(field)} kwargs["defaults"] = st.fixed_dictionaries(result) return _models_impl(st.builds(model.objects.update_or_create, **kwargs)) # The primary key is not generated as part of the strategy, so we # just match against any row that has the same value for all # fields. return _models_impl(st.builds(model.objects.get_or_create, **result)) @st.composite def _models_impl(draw, strat): """Handle the nasty part of drawing a value for models()""" try: return draw(strat)[0] except IntegrityError: reject() hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/dpcontracts.py000066400000000000000000000041061354103617500322540ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """ ----------------------- hypothesis[dpcontracts] ----------------------- This module provides tools for working with the :pypi:`dpcontracts` library, because `combining contracts and property-based testing works really well `_. It requires ``dpcontracts >= 0.4``. """ from __future__ import absolute_import, division, print_function from dpcontracts import PreconditionError from hypothesis import reject from hypothesis.errors import InvalidArgument from hypothesis.internal.reflection import proxies def fulfill(contract_func): """Decorate ``contract_func`` to reject calls which violate preconditions, and retry them with different arguments. This is a convenience function for testing internal code that uses :pypi:dpcontracts`, to automatically filter out arguments that would be rejected by the public interface before triggering a contract error. This can be used as ``builds(fulfill(func), ...)`` or in the body of the test e.g. ``assert fulfill(func)(*args)``. """ if not hasattr(contract_func, "__contract_wrapped_func__"): raise InvalidArgument( "There are no dpcontracts preconditions associated with %s" % (contract_func.__name__,) ) @proxies(contract_func) def inner(*args, **kwargs): try: return contract_func(*args, **kwargs) except PreconditionError: reject() return inner hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/lark.py000066400000000000000000000214161354103617500306640ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """ ---------------- hypothesis[lark] ---------------- This extra can be used to generate strings matching any context-free grammar, using the `Lark parser library `_. It currently only supports Lark's native EBNF syntax, but we plan to extend this to support other common syntaxes such as ANTLR and :rfc:`5234` ABNF. Lark already `supports loading grammars `_ from `nearley.js `_, so you may not have to write your own at all. Note that as Lark is at version 0.x, this module *may* break API compatibility in minor releases if supporting the latest version of Lark would otherwise be infeasible. We may also be quite aggressive in bumping the minimum version of Lark, unless someone volunteers to either fund or do the maintainence. """ from __future__ import absolute_import, division, print_function import attr import lark from lark.grammar import NonTerminal, Terminal import hypothesis._strategies as st from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import getfullargspec, string_types from hypothesis.internal.conjecture.utils import calc_label_from_name from hypothesis.internal.validation import check_type from hypothesis.searchstrategy import SearchStrategy if False: from typing import Dict, Text # noqa __all__ = ["from_lark"] @attr.s() class DrawState(object): """Tracks state of a single draw from a lark grammar. Currently just wraps a list of tokens that will be emitted at the end, but as we support more sophisticated parsers this will need to track more state for e.g. indentation level. """ # The text output so far as a list of string tokens resulting from # each draw to a non-terminal. result = attr.ib(default=attr.Factory(list)) def get_terminal_names(terminals, rules, ignore_names): """Get names of all terminals in the grammar. The arguments are the results of calling ``Lark.grammar.compile()``, so you would think that the ``terminals`` and ``ignore_names`` would have it all... but they omit terminals created with ``@declare``, which appear only in the expansion(s) of nonterminals. """ names = {t.name for t in terminals} | set(ignore_names) for rule in rules: names |= {t.name for t in rule.expansion if isinstance(t, Terminal)} return names class LarkStrategy(SearchStrategy): """Low-level strategy implementation wrapping a Lark grammar. See ``from_lark`` for details. """ def __init__(self, grammar, start, explicit): assert isinstance(grammar, lark.lark.Lark) if start is None: start = grammar.options.start if not isinstance(start, list): start = [start] self.grammar = grammar if "start" in getfullargspec(grammar.grammar.compile).args: terminals, rules, ignore_names = grammar.grammar.compile(start) else: # pragma: no cover # This branch is to support lark <= 0.7.1, without the start argument. terminals, rules, ignore_names = grammar.grammar.compile() self.names_to_symbols = {} for r in rules: t = r.origin self.names_to_symbols[t.name] = t for t in terminals: self.names_to_symbols[t.name] = Terminal(t.name) self.start = st.sampled_from([self.names_to_symbols[s] for s in start]) self.ignored_symbols = ( st.sampled_from([self.names_to_symbols[n] for n in ignore_names]) if ignore_names else st.nothing() ) self.terminal_strategies = { t.name: st.from_regex(t.pattern.to_regexp(), fullmatch=True) for t in terminals } unknown_explicit = set(explicit) - get_terminal_names( terminals, rules, ignore_names ) if unknown_explicit: raise InvalidArgument( "The following arguments were passed as explicit_strategies, " "but there is no such terminal production in this grammar: %r" % (sorted(unknown_explicit),) ) self.terminal_strategies.update(explicit) nonterminals = {} for rule in rules: nonterminals.setdefault(rule.origin.name, []).append(tuple(rule.expansion)) for v in nonterminals.values(): v.sort(key=len) self.nonterminal_strategies = { k: st.sampled_from(v) for k, v in nonterminals.items() } self.__rule_labels = {} def do_draw(self, data): state = DrawState() start = data.draw(self.start) self.draw_symbol(data, start, state) return u"".join(state.result) def rule_label(self, name): try: return self.__rule_labels[name] except KeyError: return self.__rule_labels.setdefault( name, calc_label_from_name("LARK:%s" % (name,)) ) def draw_symbol(self, data, symbol, draw_state): if isinstance(symbol, Terminal): try: strategy = self.terminal_strategies[symbol.name] except KeyError: raise InvalidArgument( "Undefined terminal %r. Generation does not currently support " "use of %%declare unless you pass `explicit`, a dict of " 'names-to-strategies, such as `{%r: st.just("")}`' % (symbol.name, symbol.name) ) draw_state.result.append(data.draw(strategy)) else: assert isinstance(symbol, NonTerminal) data.start_example(self.rule_label(symbol.name)) expansion = data.draw(self.nonterminal_strategies[symbol.name]) for e in expansion: self.draw_symbol(data, e, draw_state) self.gen_ignore(data, draw_state) data.stop_example() def gen_ignore(self, data, draw_state): if self.ignored_symbols.is_empty: return if data.draw_bits(2) == 3: emit = data.draw(self.ignored_symbols) self.draw_symbol(data, emit, draw_state) def calc_has_reusable_values(self, recur): return True def check_explicit(name): def inner(value): check_type(string_types, value, "value drawn from " + name) return value return inner @st.cacheable @st.defines_strategy_with_reusable_values def from_lark( grammar, # type: lark.lark.Lark start=None, # type: Text explicit=None, # type: Dict[Text, st.SearchStrategy[Text]] ): # type: (...) -> st.SearchStrategy[Text] """A strategy for strings accepted by the given context-free grammar. ``grammar`` must be a ``Lark`` object, which wraps an EBNF specification. The Lark EBNF grammar reference can be found `here `_. ``from_lark`` will automatically generate strings matching the nonterminal ``start`` symbol in the grammar, which was supplied as an argument to the Lark class. To generate strings matching a different symbol, including terminals, you can override this by passing the ``start`` argument to ``from_lark``. Note that Lark may remove unreachable productions when the grammar is compiled, so you should probably pass the same value for ``start`` to both. Currently ``from_lark`` does not support grammars that need custom lexing. Any lexers will be ignored, and any undefined terminals from the use of ``%declare`` will result in generation errors. To define strategies for such terminals, pass a dictionary mapping their name to a corresponding strategy as the ``explicit`` argument. The :pypi:`hypothesmith` project includes a strategy for Python source, based on a grammar and careful post-processing. """ check_type(lark.lark.Lark, grammar, "grammar") if explicit is None: explicit = {} else: check_type(dict, explicit, "explicit") explicit = { k: v.map(check_explicit("explicit[%r]=%r" % (k, v))) for k, v in explicit.items() } return LarkStrategy(grammar, start, explicit) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/numpy.py000066400000000000000000001067541354103617500311140ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import math import numpy as np import hypothesis._strategies as st import hypothesis.internal.conjecture.utils as cu from hypothesis import Verbosity from hypothesis._settings import note_deprecation from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import PY2, hrange, integer_types from hypothesis.internal.coverage import check_function from hypothesis.internal.reflection import proxies from hypothesis.internal.validation import check_type, check_valid_interval from hypothesis.reporting import current_verbosity from hypothesis.searchstrategy import SearchStrategy if False: from typing import Any, Union, Sequence, Tuple, Optional # noqa from hypothesis.searchstrategy.strategies import T # noqa Shape = Tuple[int, ...] # noqa TIME_RESOLUTIONS = tuple("Y M D h m s ms us ns ps fs as".split()) @st.defines_strategy_with_reusable_values def from_dtype(dtype): # type: (np.dtype) -> st.SearchStrategy[Any] """Creates a strategy which can generate any value of the given dtype.""" check_type(np.dtype, dtype, "dtype") # Compound datatypes, eg 'f4,f4,f4' if dtype.names is not None: # mapping np.void.type over a strategy is nonsense, so return now. return st.tuples(*[from_dtype(dtype.fields[name][0]) for name in dtype.names]) # Subarray datatypes, eg '(2, 3)i4' if dtype.subdtype is not None: subtype, shape = dtype.subdtype return arrays(subtype, shape) # Scalar datatypes if dtype.kind == u"b": result = st.booleans() # type: SearchStrategy[Any] elif dtype.kind == u"f": if dtype.itemsize == 2: result = st.floats(width=16) elif dtype.itemsize == 4: result = st.floats(width=32) else: result = st.floats() elif dtype.kind == u"c": if dtype.itemsize == 8: float32 = st.floats(width=32) result = st.builds(complex, float32, float32) else: result = st.complex_numbers() elif dtype.kind in (u"S", u"a"): # Numpy strings are null-terminated; only allow round-trippable values. # `itemsize == 0` means 'fixed length determined at array creation' result = st.binary(max_size=dtype.itemsize or None).filter( lambda b: b[-1:] != b"\0" ) elif dtype.kind == u"u": result = st.integers(min_value=0, max_value=2 ** (8 * dtype.itemsize) - 1) elif dtype.kind == u"i": overflow = 2 ** (8 * dtype.itemsize - 1) result = st.integers(min_value=-overflow, max_value=overflow - 1) elif dtype.kind == u"U": # Encoded in UTF-32 (four bytes/codepoint) and null-terminated result = st.text(max_size=(dtype.itemsize or 0) // 4 or None).filter( lambda b: b[-1:] != u"\0" ) elif dtype.kind in (u"m", u"M"): if "[" in dtype.str: res = st.just(dtype.str.split("[")[-1][:-1]) else: res = st.sampled_from(TIME_RESOLUTIONS) result = st.builds(dtype.type, st.integers(-2 ** 63, 2 ** 63 - 1), res) else: raise InvalidArgument(u"No strategy inference for {}".format(dtype)) return result.map(dtype.type) @check_function def check_argument(condition, fail_message, *f_args, **f_kwargs): if not condition: raise InvalidArgument(fail_message.format(*f_args, **f_kwargs)) @check_function def order_check(name, floor, small, large): check_argument( floor <= small, u"min_{name} must be at least {} but was {}", floor, small, name=name, ) check_argument( small <= large, u"min_{name}={} is larger than max_{name}={}", small, large, name=name, ) class ArrayStrategy(SearchStrategy): def __init__(self, element_strategy, shape, dtype, fill, unique): self.shape = tuple(shape) self.fill = fill self.array_size = int(np.prod(shape)) self.dtype = dtype self.element_strategy = element_strategy self.unique = unique def set_element(self, data, result, idx, strategy=None): strategy = strategy or self.element_strategy val = data.draw(strategy) result[idx] = val if self._report_overflow and val != result[idx] and val == val: note_deprecation( "Generated array element %r from %r cannot be represented as " "dtype %r - instead it becomes %r (type %r). Consider using a more " "precise strategy, for example passing the `width` argument to " "`floats()`, as this will be an error in a future version." % (val, strategy, self.dtype, result[idx], type(result[idx])), since="2019-07-28", ) # Because the message includes the value of the generated element, # it would be easy to spam users with thousands of warnings. # We therefore only warn once per draw, unless in verbose mode. self._report_overflow = current_verbosity() >= Verbosity.verbose def do_draw(self, data): if 0 in self.shape: return np.zeros(dtype=self.dtype, shape=self.shape) # Reset this flag for each test case to emit warnings from set_element # Skip the check for object or void (multi-element) dtypes self._report_overflow = self.dtype.kind not in ("O", "V") # This could legitimately be a np.empty, but the performance gains for # that would be so marginal that there's really not much point risking # undefined behaviour shenanigans. result = np.zeros(shape=self.array_size, dtype=self.dtype) if self.fill.is_empty: # We have no fill value (either because the user explicitly # disabled it or because the default behaviour was used and our # elements strategy does not produce reusable values), so we must # generate a fully dense array with a freshly drawn value for each # entry. if self.unique: seen = set() elements = cu.many( data, min_size=self.array_size, max_size=self.array_size, average_size=self.array_size, ) i = 0 while elements.more(): # We assign first because this means we check for # uniqueness after numpy has converted it to the relevant # type for us. Because we don't increment the counter on # a duplicate we will overwrite it on the next draw. self.set_element(data, result, i) if result[i] not in seen: seen.add(result[i]) i += 1 else: elements.reject() else: for i in hrange(len(result)): self.set_element(data, result, i) else: # We draw numpy arrays as "sparse with an offset". We draw a # collection of index assignments within the array and assign # fresh values from our elements strategy to those indices. If at # the end we have not assigned every element then we draw a single # value from our fill strategy and use that to populate the # remaining positions with that strategy. elements = cu.many( data, min_size=0, max_size=self.array_size, # sqrt isn't chosen for any particularly principled reason. It # just grows reasonably quickly but sublinearly, and for small # arrays it represents a decent fraction of the array size. average_size=math.sqrt(self.array_size), ) needs_fill = np.full(self.array_size, True) seen = set() while elements.more(): i = cu.integer_range(data, 0, self.array_size - 1) if not needs_fill[i]: elements.reject() continue self.set_element(data, result, i) if self.unique: if result[i] in seen: elements.reject() continue else: seen.add(result[i]) needs_fill[i] = False if needs_fill.any(): # We didn't fill all of the indices in the early loop, so we # put a fill value into the rest. # We have to do this hilarious little song and dance to work # around numpy's special handling of iterable values. If the # value here were e.g. a tuple then neither array creation # nor putmask would do the right thing. But by creating an # array of size one and then assigning the fill value as a # single element, we both get an array with the right value in # it and putmask will do the right thing by repeating the # values of the array across the mask. one_element = np.zeros(shape=1, dtype=self.dtype) self.set_element(data, one_element, 0, self.fill) fill_value = one_element[0] if self.unique: try: is_nan = np.isnan(fill_value) except TypeError: is_nan = False if not is_nan: raise InvalidArgument( "Cannot fill unique array with non-NaN " "value %r" % (fill_value,) ) np.putmask(result, needs_fill, one_element) return result.reshape(self.shape) @check_function def fill_for(elements, unique, fill, name=""): if fill is None: if unique or not elements.has_reusable_values: fill = st.nothing() else: fill = elements else: st.check_strategy(fill, "%s.fill" % (name,) if name else "fill") return fill @st.defines_strategy def arrays( dtype, # type: Any shape, # type: Union[int, Shape, st.SearchStrategy[Shape]] elements=None, # type: st.SearchStrategy[Any] fill=None, # type: st.SearchStrategy[Any] unique=False, # type: bool ): # type: (...) -> st.SearchStrategy[np.ndarray] r"""Returns a strategy for generating :class:`numpy:numpy.ndarray`\ s. * ``dtype`` may be any valid input to :class:`~numpy:numpy.dtype` (this includes :class:`~numpy:numpy.dtype` objects), or a strategy that generates such values. * ``shape`` may be an integer >= 0, a tuple of such integers, or a strategy that generates such values. * ``elements`` is a strategy for generating values to put in the array. If it is None a suitable value will be inferred based on the dtype, which may give any legal value (including eg ``NaN`` for floats). If you have more specific requirements, you should supply your own elements strategy. * ``fill`` is a strategy that may be used to generate a single background value for the array. If None, a suitable default will be inferred based on the other arguments. If set to :func:`~hypothesis.strategies.nothing` then filling behaviour will be disabled entirely and every element will be generated independently. * ``unique`` specifies if the elements of the array should all be distinct from one another. Note that in this case multiple NaN values may still be allowed. If fill is also set, the only valid values for it to return are NaN values (anything for which :obj:`numpy:numpy.isnan` returns True. So e.g. for complex numbers (nan+1j) is also a valid fill). Note that if unique is set to True the generated values must be hashable. Arrays of specified ``dtype`` and ``shape`` are generated for example like this: .. code-block:: pycon >>> import numpy as np >>> arrays(np.int8, (2, 3)).example() array([[-8, 6, 3], [-6, 4, 6]], dtype=int8) - See :doc:`What you can generate and how `. .. code-block:: pycon >>> import numpy as np >>> from hypothesis.strategies import floats >>> arrays(np.float, 3, elements=floats(0, 1)).example() array([ 0.88974794, 0.77387938, 0.1977879 ]) Array values are generated in two parts: 1. Some subset of the coordinates of the array are populated with a value drawn from the elements strategy (or its inferred form). 2. If any coordinates were not assigned in the previous step, a single value is drawn from the fill strategy and is assigned to all remaining places. You can set fill to :func:`~hypothesis.strategies.nothing` if you want to disable this behaviour and draw a value for every element. If fill is set to None then it will attempt to infer the correct behaviour automatically: If unique is True, no filling will occur by default. Otherwise, if it looks safe to reuse the values of elements across multiple coordinates (this will be the case for any inferred strategy, and for most of the builtins, but is not the case for mutable values or strategies built with flatmap, map, composite, etc) then it will use the elements strategy as the fill, else it will default to having no fill. Having a fill helps Hypothesis craft high quality examples, but its main importance is when the array generated is large: Hypothesis is primarily designed around testing small examples. If you have arrays with hundreds or more elements, having a fill value is essential if you want your tests to run in reasonable time. """ # We support passing strategies as arguments for convenience, or at least # for legacy reasons, but don't want to pay the perf cost of a composite # strategy (i.e. repeated argument handling and validation) when it's not # needed. So we get the best of both worlds by recursing with flatmap, # but only when it's actually needed. if isinstance(dtype, SearchStrategy): return dtype.flatmap( lambda d: arrays(d, shape, elements=elements, fill=fill, unique=unique) ) if isinstance(shape, SearchStrategy): return shape.flatmap( lambda s: arrays(dtype, s, elements=elements, fill=fill, unique=unique) ) # From here on, we're only dealing with values and it's relatively simple. dtype = np.dtype(dtype) if elements is None: elements = from_dtype(dtype) if isinstance(shape, integer_types): shape = (shape,) shape = tuple(shape) check_argument( all(isinstance(s, integer_types) for s in shape), "Array shape must be integer in each dimension, provided shape was {}", shape, ) fill = fill_for(elements=elements, unique=unique, fill=fill) return ArrayStrategy(elements, shape, dtype, fill, unique) @st.defines_strategy def array_shapes(min_dims=1, max_dims=None, min_side=1, max_side=None): # type: (int, int, int, int) -> st.SearchStrategy[Shape] """Return a strategy for array shapes (tuples of int >= 1).""" check_type(integer_types, min_dims, "min_dims") check_type(integer_types, min_side, "min_side") if min_dims > 32: raise InvalidArgument( "Got min_dims=%r, but numpy does not support arrays greater than 32 dimensions" % min_dims ) if max_dims is None: max_dims = min(min_dims + 2, 32) check_type(integer_types, max_dims, "max_dims") if max_dims > 32: raise InvalidArgument( "Got max_dims=%r, but numpy does not support arrays greater than 32 dimensions" % max_dims ) if max_side is None: max_side = min_side + 5 check_type(integer_types, max_side, "max_side") order_check("dims", 0, min_dims, max_dims) order_check("side", 0, min_side, max_side) return st.lists( st.integers(min_side, max_side), min_size=min_dims, max_size=max_dims ).map(tuple) @st.defines_strategy def scalar_dtypes(): # type: () -> st.SearchStrategy[np.dtype] """Return a strategy that can return any non-flexible scalar dtype.""" return st.one_of( boolean_dtypes(), integer_dtypes(), unsigned_integer_dtypes(), floating_dtypes(), complex_number_dtypes(), datetime64_dtypes(), timedelta64_dtypes(), ) def defines_dtype_strategy(strat): # type: (T) -> T @st.defines_strategy @proxies(strat) def inner(*args, **kwargs): strategy = strat(*args, **kwargs) def convert_to_dtype(x): """Helper to debug issue #1798.""" try: return np.dtype(x) except ValueError: print( "Got invalid dtype value=%r from strategy=%r, function=%r" % (x, strategy, strat) ) raise return strategy.map(convert_to_dtype) return inner @defines_dtype_strategy def boolean_dtypes(): # type: () -> st.SearchStrategy[np.dtype] return st.just("?") def dtype_factory(kind, sizes, valid_sizes, endianness): # Utility function, shared logic for most integer and string types valid_endian = ("?", "<", "=", ">") check_argument( endianness in valid_endian, u"Unknown endianness: was {}, must be in {}", endianness, valid_endian, ) if valid_sizes is not None: if isinstance(sizes, int): sizes = (sizes,) check_argument(sizes, "Dtype must have at least one possible size.") check_argument( all(s in valid_sizes for s in sizes), u"Invalid sizes: was {} must be an item or sequence " u"in {}", sizes, valid_sizes, ) if all(isinstance(s, int) for s in sizes): sizes = sorted({s // 8 for s in sizes}) strat = st.sampled_from(sizes) if "{}" not in kind: kind += "{}" if endianness == "?": return strat.map(("<" + kind).format) | strat.map((">" + kind).format) return strat.map((endianness + kind).format) @defines_dtype_strategy def unsigned_integer_dtypes(endianness="?", sizes=(8, 16, 32, 64)): # type: (str, Sequence[int]) -> st.SearchStrategy[np.dtype] """Return a strategy for unsigned integer dtypes. endianness may be ``<`` for little-endian, ``>`` for big-endian, ``=`` for native byte order, or ``?`` to allow either byte order. This argument only applies to dtypes of more than one byte. sizes must be a collection of integer sizes in bits. The default (8, 16, 32, 64) covers the full range of sizes. """ return dtype_factory("u", sizes, (8, 16, 32, 64), endianness) @defines_dtype_strategy def integer_dtypes(endianness="?", sizes=(8, 16, 32, 64)): # type: (str, Sequence[int]) -> st.SearchStrategy[np.dtype] """Return a strategy for signed integer dtypes. endianness and sizes are treated as for :func:`unsigned_integer_dtypes`. """ return dtype_factory("i", sizes, (8, 16, 32, 64), endianness) @defines_dtype_strategy def floating_dtypes(endianness="?", sizes=(16, 32, 64)): # type: (str, Sequence[int]) -> st.SearchStrategy[np.dtype] """Return a strategy for floating-point dtypes. sizes is the size in bits of floating-point number. Some machines support 96- or 128-bit floats, but these are not generated by default. Larger floats (96 and 128 bit real parts) are not supported on all platforms and therefore disabled by default. To generate these dtypes, include these values in the sizes argument. """ return dtype_factory("f", sizes, (16, 32, 64, 96, 128), endianness) @defines_dtype_strategy def complex_number_dtypes(endianness="?", sizes=(64, 128)): # type: (str, Sequence[int]) -> st.SearchStrategy[np.dtype] """Return a strategy for complex-number dtypes. sizes is the total size in bits of a complex number, which consists of two floats. Complex halfs (a 16-bit real part) are not supported by numpy and will not be generated by this strategy. """ return dtype_factory("c", sizes, (64, 128, 192, 256), endianness) @check_function def validate_time_slice(max_period, min_period): check_argument( max_period in TIME_RESOLUTIONS, u"max_period {} must be a valid resolution in {}", max_period, TIME_RESOLUTIONS, ) check_argument( min_period in TIME_RESOLUTIONS, u"min_period {} must be a valid resolution in {}", min_period, TIME_RESOLUTIONS, ) start = TIME_RESOLUTIONS.index(max_period) end = TIME_RESOLUTIONS.index(min_period) + 1 check_argument( start < end, u"max_period {} must be earlier in sequence {} than " u"min_period {}", max_period, TIME_RESOLUTIONS, min_period, ) return TIME_RESOLUTIONS[start:end] @defines_dtype_strategy def datetime64_dtypes(max_period="Y", min_period="ns", endianness="?"): # type: (str, str, str) -> st.SearchStrategy[np.dtype] """Return a strategy for datetime64 dtypes, with various precisions from year to attosecond.""" return dtype_factory( "datetime64[{}]", validate_time_slice(max_period, min_period), TIME_RESOLUTIONS, endianness, ) @defines_dtype_strategy def timedelta64_dtypes(max_period="Y", min_period="ns", endianness="?"): # type: (str, str, str) -> st.SearchStrategy[np.dtype] """Return a strategy for timedelta64 dtypes, with various precisions from year to attosecond.""" return dtype_factory( "timedelta64[{}]", validate_time_slice(max_period, min_period), TIME_RESOLUTIONS, endianness, ) @defines_dtype_strategy def byte_string_dtypes(endianness="?", min_len=1, max_len=16): # type: (str, int, int) -> st.SearchStrategy[np.dtype] """Return a strategy for generating bytestring dtypes, of various lengths and byteorder. While Hypothesis' string strategies can generate empty strings, string dtypes with length 0 indicate that size is still to be determined, so the minimum length for string dtypes is 1. """ if min_len == 0: note_deprecation( "generating byte string dtypes for unspecified length ('S0') " "is deprecated. min_len will be 1 instead.", since="2019-09-09", ) min_len = 1 if max_len == 0: note_deprecation( "generating byte string dtypes for unspecified length ('S0') " "is deprecated. max_len will be 1 instead.", since="2019-09-09", ) max_len = 1 order_check("len", 1, min_len, max_len) return dtype_factory("S", list(range(min_len, max_len + 1)), None, endianness) @defines_dtype_strategy def unicode_string_dtypes(endianness="?", min_len=1, max_len=16): # type: (str, int, int) -> st.SearchStrategy[np.dtype] """Return a strategy for generating unicode string dtypes, of various lengths and byteorder. While Hypothesis' string strategies can generate empty strings, string dtypes with length 0 indicate that size is still to be determined, so the minimum length for string dtypes is 1. """ if min_len == 0: note_deprecation( "generating unicode string dtypes for unspecified length ('U0') " "is deprecated. min_len will be 1 instead.", since="2019-09-09", ) min_len = 1 if max_len == 0: note_deprecation( "generating unicode string dtypes for unspecified length ('U0') " "is deprecated. max_len will be 1 instead.", since="2019-09-09", ) max_len = 1 order_check("len", 1, min_len, max_len) return dtype_factory("U", list(range(min_len, max_len + 1)), None, endianness) @defines_dtype_strategy def array_dtypes( subtype_strategy=scalar_dtypes(), # type: st.SearchStrategy[np.dtype] min_size=1, # type: int max_size=5, # type: int allow_subarrays=False, # type: bool ): # type: (...) -> st.SearchStrategy[np.dtype] """Return a strategy for generating array (compound) dtypes, with members drawn from the given subtype strategy.""" order_check("size", 0, min_size, max_size) # Field names must be native strings and the empty string is weird; see #1963. if PY2: field_names = st.binary(min_size=1) else: field_names = st.text(min_size=1) elements = st.tuples(field_names, subtype_strategy) if allow_subarrays: elements |= st.tuples( field_names, subtype_strategy, array_shapes(max_dims=2, max_side=2) ) return st.lists( elements=elements, min_size=min_size, max_size=max_size, unique_by=lambda d: d[0], ) @st.defines_strategy def nested_dtypes( subtype_strategy=scalar_dtypes(), # type: st.SearchStrategy[np.dtype] max_leaves=10, # type: int max_itemsize=None, # type: int ): # type: (...) -> st.SearchStrategy[np.dtype] """Return the most-general dtype strategy. Elements drawn from this strategy may be simple (from the subtype_strategy), or several such values drawn from :func:`array_dtypes` with ``allow_subarrays=True``. Subdtypes in an array dtype may be nested to any depth, subject to the max_leaves argument. """ return st.recursive( subtype_strategy, lambda x: array_dtypes(x, allow_subarrays=True), max_leaves ).filter(lambda d: max_itemsize is None or d.itemsize <= max_itemsize) @st.defines_strategy def valid_tuple_axes(ndim, min_size=0, max_size=None): # type: (int, int, int) -> st.SearchStrategy[Shape] """Return a strategy for generating permissible tuple-values for the ``axis`` argument for a numpy sequential function (e.g. :func:`numpy:numpy.sum`), given an array of the specified dimensionality. All tuples will have an length >= min_size and <= max_size. The default value for max_size is ``ndim``. Examples from this strategy shrink towards an empty tuple, which render most sequential functions as no-ops. The following are some examples drawn from this strategy. .. code-block:: pycon >>> [valid_tuple_axes(3).example() for i in range(4)] [(-3, 1), (0, 1, -1), (0, 2), (0, -2, 2)] ``valid_tuple_axes`` can be joined with other strategies to generate any type of valid axis object, i.e. integers, tuples, and ``None``: .. code-block:: pycon any_axis_strategy = none() | integers(-ndim, ndim - 1) | valid_tuple_axes(ndim) """ if max_size is None: max_size = ndim check_type(integer_types, ndim, "ndim") check_type(integer_types, min_size, "min_size") check_type(integer_types, max_size, "max_size") order_check("size", 0, min_size, max_size) check_valid_interval(max_size, ndim, "max_size", "ndim") # shrink axis values from negative to positive axes = st.integers(0, max(0, 2 * ndim - 1)).map( lambda x: x if x < ndim else x - 2 * ndim ) return st.lists(axes, min_size, max_size, unique_by=lambda x: x % ndim).map(tuple) class BroadcastShapeStrategy(SearchStrategy): def __init__(self, shape, min_dims, max_dims, min_side, max_side): assert 0 <= min_side <= max_side assert 0 <= min_dims <= max_dims <= 32 SearchStrategy.__init__(self) self.shape = shape self.side_strat = st.integers(min_side, max_side) self.min_dims = min_dims self.max_dims = max_dims self.min_side = min_side self.max_side = max_side def do_draw(self, data): elements = cu.many( data, min_size=self.min_dims, max_size=self.max_dims, average_size=min( max(self.min_dims * 2, self.min_dims + 5), 0.5 * (self.min_dims + self.max_dims), ), ) result = [] reversed_shape = tuple(self.shape[::-1]) while elements.more(): if len(result) < len(self.shape): # Shrinks towards original shape if reversed_shape[len(result)] == 1: if self.min_side <= 1 and not data.draw(st.booleans()): side = 1 else: side = data.draw(self.side_strat) elif self.max_side >= reversed_shape[len(result)] and ( not self.min_side <= 1 <= self.max_side or data.draw(st.booleans()) ): side = reversed_shape[len(result)] else: side = 1 else: side = data.draw(self.side_strat) result.append(side) assert self.min_dims <= len(result) <= self.max_dims assert all(self.min_side <= s <= self.max_side for s in result) return tuple(reversed(result)) @st.defines_strategy def broadcastable_shapes(shape, min_dims=0, max_dims=None, min_side=1, max_side=None): # type: (Shape, int, int, int, int) -> st.SearchStrategy[Shape] """Return a strategy for generating shapes that are broadcast-compatible with the provided shape. Examples from this strategy shrink towards a shape with length ``min_dims``. The size of an aligned dimension shrinks towards being a singleton. The size of an unaligned dimension shrink towards ``min_side``. * ``shape`` a tuple of integers * ``min_dims`` The smallest length that the generated shape can possess. * ``max_dims`` The largest length that the generated shape can possess. shape can possess. Cannot exceed 32. The default-value for ``max_dims`` is ``2 + max(len(shape), min_dims)``. * ``min_side`` The smallest size that an unaligned dimension can possess. * ``max_side`` The largest size that an unaligned dimension can possess. The default value is 2 + 'size-of-largest-aligned-dimension'. The following are some examples drawn from this strategy. .. code-block:: pycon >>> [broadcastable_shapes(shape=(2, 3)).example() for i in range(5)] [(1, 3), (), (2, 3), (2, 1), (4, 1, 3), (3, )] """ check_type(tuple, shape, "shape") strict_check = max_side is None or max_dims is None check_type(integer_types, min_side, "min_side") check_type(integer_types, min_dims, "min_dims") if max_dims is None: max_dims = max(len(shape), min_dims) + 2 else: check_type(integer_types, max_dims, "max_dims") if max_side is None: max_side = max(tuple(shape[-max_dims:]) + (min_side,)) + 2 else: check_type(integer_types, max_side, "max_side") order_check("dims", 0, min_dims, max_dims) order_check("side", 0, min_side, max_side) if 32 < max_dims: raise InvalidArgument("max_dims cannot exceed 32") dims, bnd_name = (max_dims, "max_dims") if strict_check else (min_dims, "min_dims") # check for unsatisfiable min_side if not all(min_side <= s for s in shape[::-1][:dims] if s != 1): raise InvalidArgument( "Given shape=%r, there are no broadcast-compatible " "shapes that satisfy: %s=%s and min_side=%s" % (shape, bnd_name, dims, min_side) ) # check for unsatisfiable [min_side, max_side] if not ( min_side <= 1 <= max_side or all(s <= max_side for s in shape[::-1][:dims]) ): raise InvalidArgument( "Given shape=%r, there are no broadcast-compatible shapes " "that satisfy: %s=%s and [min_side=%s, max_side=%s]" % (shape, bnd_name, dims, min_side, max_side) ) if not strict_check: # reduce max_dims to exclude unsatisfiable dimensions for n, s in zip(range(max_dims), reversed(shape)): if s < min_side and s != 1: max_dims = n break elif not (min_side <= 1 <= max_side or s <= max_side): max_dims = n break return BroadcastShapeStrategy( shape, min_dims=min_dims, max_dims=max_dims, min_side=min_side, max_side=max_side, ) @st.defines_strategy def integer_array_indices(shape, result_shape=array_shapes(), dtype="int"): # type: (Shape, SearchStrategy[Shape], np.dtype) -> st.SearchStrategy[Tuple[np.ndarray, ...]] """Return a search strategy for tuples of integer-arrays that, when used to index into an array of shape ``shape``, given an array whose shape was drawn from ``result_shape``. Examples from this strategy shrink towards the tuple of index-arrays:: len(shape) * (np.zeros(drawn_result_shape, dtype), ) * ``shape`` a tuple of integers that indicates the shape of the array, whose indices are being generated. * ``result_shape`` a strategy for generating tuples of integers, which describe the shape of the resulting index arrays. The default is :func:`~hypothesis.extra.numpy.array_shapes`. The shape drawn from this strategy determines the shape of the array that will be produced when the corresponding example from ``integer_array_indices`` is used as an index. * ``dtype`` the integer data type of the generated index-arrays. Negative integer indices can be generated if a signed integer type is specified. Recall that an array can be indexed using a tuple of integer-arrays to access its members in an arbitrary order, producing an array with an arbitrary shape. For example: .. code-block:: pycon >>> from numpy import array >>> x = array([-0, -1, -2, -3, -4]) >>> ind = (array([[4, 0], [0, 1]]),) # a tuple containing a 2D integer-array >>> x[ind] # the resulting array is commensurate with the indexing array(s) array([[-4, 0], [0, -1]]) Note that this strategy does not accommodate all variations of so-called 'advanced indexing', as prescribed by NumPy's nomenclature. Combinations of basic and advanced indexes are too complex to usefully define in a standard strategy; we leave application-specific strategies to the user. Advanced-boolean indexing can be defined as ``arrays(shape=..., dtype=bool)``, and is similarly left to the user. """ check_type(tuple, shape, "shape") check_argument( shape and all(isinstance(x, integer_types) and x > 0 for x in shape), "shape=%r must be a non-empty tuple of integers > 0" % (shape,), ) check_type(SearchStrategy, result_shape, "result_shape") check_argument( np.issubdtype(dtype, np.integer), "dtype=%r must be an integer dtype" % (dtype,) ) signed = np.issubdtype(dtype, np.signedinteger) def array_for(index_shape, size): return arrays( dtype=dtype, shape=index_shape, elements=st.integers(-size if signed else 0, size - 1), ) return result_shape.flatmap( lambda index_shape: st.tuples(*[array_for(index_shape, size) for size in shape]) ) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/pandas/000077500000000000000000000000001354103617500306235ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/pandas/__init__.py000066400000000000000000000016271354103617500327420ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.extra.pandas.impl import ( column, columns, data_frames, indexes, range_indexes, series, ) __all__ = ["indexes", "range_indexes", "series", "column", "columns", "data_frames"] hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/pandas/impl.py000066400000000000000000000600271354103617500321430ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from copy import copy import attr import numpy as np import pandas import hypothesis._strategies as st import hypothesis.extra.numpy as npst import hypothesis.internal.conjecture.utils as cu from hypothesis.control import reject from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import OrderedDict, abc, hrange from hypothesis.internal.coverage import check, check_function from hypothesis.internal.validation import ( check_type, check_valid_interval, check_valid_size, try_convert, ) try: from pandas.api.types import is_categorical_dtype except ImportError: # pragma: no cover def is_categorical_dtype(dt): if isinstance(dt, np.dtype): return False return dt == "category" if False: from typing import Any, Union, Sequence, Set # noqa from hypothesis.searchstrategy.strategies import Ex # noqa def dtype_for_elements_strategy(s): return st.shared( s.map(lambda x: pandas.Series([x]).dtype), key=("hypothesis.extra.pandas.dtype_for_elements_strategy", s), ) def infer_dtype_if_necessary(dtype, values, elements, draw): if dtype is None and not values: return draw(dtype_for_elements_strategy(elements)) return dtype @check_function def elements_and_dtype(elements, dtype, source=None): if source is None: prefix = "" else: prefix = "%s." % (source,) if elements is not None: st.check_strategy(elements, "%selements" % (prefix,)) else: with check("dtype is not None"): if dtype is None: raise InvalidArgument( ( "At least one of %(prefix)selements or %(prefix)sdtype " "must be provided." ) % {"prefix": prefix} ) with check("is_categorical_dtype"): if is_categorical_dtype(dtype): raise InvalidArgument( "%sdtype is categorical, which is currently unsupported" % (prefix,) ) dtype = try_convert(np.dtype, dtype, "dtype") if elements is None: elements = npst.from_dtype(dtype) elif dtype is not None: def convert_element(value): name = "draw(%selements)" % (prefix,) try: return np.array([value], dtype=dtype)[0] except TypeError: raise InvalidArgument( "Cannot convert %s=%r of type %s to dtype %s" % (name, value, type(value).__name__, dtype.str) ) except ValueError: raise InvalidArgument( "Cannot convert %s=%r to type %s" % (name, value, dtype.str) ) elements = elements.map(convert_element) assert elements is not None return elements, dtype class ValueIndexStrategy(st.SearchStrategy): def __init__(self, elements, dtype, min_size, max_size, unique): super(ValueIndexStrategy, self).__init__() self.elements = elements self.dtype = dtype self.min_size = min_size self.max_size = max_size self.unique = unique def do_draw(self, data): result = [] seen = set() iterator = cu.many( data, min_size=self.min_size, max_size=self.max_size, average_size=(self.min_size + self.max_size) / 2, ) while iterator.more(): elt = data.draw(self.elements) if self.unique: if elt in seen: iterator.reject() continue seen.add(elt) result.append(elt) dtype = infer_dtype_if_necessary( dtype=self.dtype, values=result, elements=self.elements, draw=data.draw ) return pandas.Index(result, dtype=dtype, tupleize_cols=False) DEFAULT_MAX_SIZE = 10 @st.cacheable @st.defines_strategy def range_indexes(min_size=0, max_size=None): # type: (int, int) -> st.SearchStrategy[pandas.RangeIndex] """Provides a strategy which generates an :class:`~pandas.Index` whose values are 0, 1, ..., n for some n. Arguments: * min_size is the smallest number of elements the index can have. * max_size is the largest number of elements the index can have. If None it will default to some suitable value based on min_size. """ check_valid_size(min_size, "min_size") check_valid_size(max_size, "max_size") if max_size is None: max_size = min([min_size + DEFAULT_MAX_SIZE, 2 ** 63 - 1]) check_valid_interval(min_size, max_size, "min_size", "max_size") return st.integers(min_size, max_size).map(pandas.RangeIndex) @st.cacheable @st.defines_strategy def indexes( elements=None, # type: st.SearchStrategy[Ex] dtype=None, # type: Any min_size=0, # type: int max_size=None, # type: int unique=True, # type: bool ): """Provides a strategy for producing a :class:`pandas.Index`. Arguments: * elements is a strategy which will be used to generate the individual values of the index. If None, it will be inferred from the dtype. Note: even if the elements strategy produces tuples, the generated value will not be a MultiIndex, but instead be a normal index whose elements are tuples. * dtype is the dtype of the resulting index. If None, it will be inferred from the elements strategy. At least one of dtype or elements must be provided. * min_size is the minimum number of elements in the index. * max_size is the maximum number of elements in the index. If None then it will default to a suitable small size. If you want larger indexes you should pass a max_size explicitly. * unique specifies whether all of the elements in the resulting index should be distinct. """ check_valid_size(min_size, "min_size") check_valid_size(max_size, "max_size") check_valid_interval(min_size, max_size, "min_size", "max_size") check_type(bool, unique, "unique") elements, dtype = elements_and_dtype(elements, dtype) if max_size is None: max_size = min_size + DEFAULT_MAX_SIZE return ValueIndexStrategy(elements, dtype, min_size, max_size, unique) @st.defines_strategy def series( elements=None, # type: st.SearchStrategy[Ex] dtype=None, # type: Any index=None, # type: st.SearchStrategy[Union[Sequence, pandas.Index]] fill=None, # type: st.SearchStrategy[Ex] unique=False, # type: bool ): # type: (...) -> st.SearchStrategy[pandas.Series] """Provides a strategy for producing a :class:`pandas.Series`. Arguments: * elements: a strategy that will be used to generate the individual values in the series. If None, we will attempt to infer a suitable default from the dtype. * dtype: the dtype of the resulting series and may be any value that can be passed to :class:`numpy.dtype`. If None, will use pandas's standard behaviour to infer it from the type of the elements values. Note that if the type of values that comes out of your elements strategy varies, then so will the resulting dtype of the series. * index: If not None, a strategy for generating indexes for the resulting Series. This can generate either :class:`pandas.Index` objects or any sequence of values (which will be passed to the Index constructor). You will probably find it most convenient to use the :func:`~hypothesis.extra.pandas.indexes` or :func:`~hypothesis.extra.pandas.range_indexes` function to produce values for this argument. Usage: .. code-block:: pycon >>> series(dtype=int).example() 0 -2001747478 1 1153062837 """ if index is None: index = range_indexes() else: st.check_strategy(index) elements, dtype = elements_and_dtype(elements, dtype) index_strategy = index @st.composite def result(draw): index = draw(index_strategy) if len(index) > 0: if dtype is not None: result_data = draw( npst.arrays( dtype=dtype, elements=elements, shape=len(index), fill=fill, unique=unique, ) ) else: result_data = list( draw( npst.arrays( dtype=object, elements=elements, shape=len(index), fill=fill, unique=unique, ) ) ) return pandas.Series(result_data, index=index, dtype=dtype) else: return pandas.Series( (), index=index, dtype=dtype if dtype is not None else draw(dtype_for_elements_strategy(elements)), ) return result() @attr.s(slots=True) class column(object): """Data object for describing a column in a DataFrame. Arguments: * name: the column name, or None to default to the column position. Must be hashable, but can otherwise be any value supported as a pandas column name. * elements: the strategy for generating values in this column, or None to infer it from the dtype. * dtype: the dtype of the column, or None to infer it from the element strategy. At least one of dtype or elements must be provided. * fill: A default value for elements of the column. See :func:`~hypothesis.extra.numpy.arrays` for a full explanation. * unique: If all values in this column should be distinct. """ name = attr.ib(default=None) elements = attr.ib(default=None) dtype = attr.ib(default=None) fill = attr.ib(default=None) unique = attr.ib(default=False) def columns( names_or_number, # type: Union[int, Sequence[str]] dtype=None, # type: Any elements=None, # type: st.SearchStrategy[Ex] fill=None, # type: st.SearchStrategy[Ex] unique=False, # type: bool ): """A convenience function for producing a list of :class:`column` objects of the same general shape. The names_or_number argument is either a sequence of values, the elements of which will be used as the name for individual column objects, or a number, in which case that many unnamed columns will be created. All other arguments are passed through verbatim to create the columns. """ if isinstance(names_or_number, (int, float)): names = [None] * names_or_number # type: list else: names = list(names_or_number) return [ column(name=n, dtype=dtype, elements=elements, fill=fill, unique=unique) for n in names ] @st.defines_strategy def data_frames( columns=None, # type: Sequence[column] rows=None, # type: st.SearchStrategy[Union[dict, Sequence[Any]]] index=None, # type: st.SearchStrategy[Ex] ): # type: (...) -> st.SearchStrategy[pandas.DataFrame] """Provides a strategy for producing a :class:`pandas.DataFrame`. Arguments: * columns: An iterable of :class:`column` objects describing the shape of the generated DataFrame. * rows: A strategy for generating a row object. Should generate either dicts mapping column names to values or a sequence mapping column position to the value in that position (note that unlike the :class:`pandas.DataFrame` constructor, single values are not allowed here. Passing e.g. an integer is an error, even if there is only one column). At least one of rows and columns must be provided. If both are provided then the generated rows will be validated against the columns and an error will be raised if they don't match. Caveats on using rows: * In general you should prefer using columns to rows, and only use rows if the columns interface is insufficiently flexible to describe what you need - you will get better performance and example quality that way. * If you provide rows and not columns, then the shape and dtype of the resulting DataFrame may vary. e.g. if you have a mix of int and float in the values for one column in your row entries, the column will sometimes have an integral dtype and sometimes a float. * index: If not None, a strategy for generating indexes for the resulting DataFrame. This can generate either :class:`pandas.Index` objects or any sequence of values (which will be passed to the Index constructor). You will probably find it most convenient to use the :func:`~hypothesis.extra.pandas.indexes` or :func:`~hypothesis.extra.pandas.range_indexes` function to produce values for this argument. Usage: The expected usage pattern is that you use :class:`column` and :func:`columns` to specify a fixed shape of the DataFrame you want as follows. For example the following gives a two column data frame: .. code-block:: pycon >>> from hypothesis.extra.pandas import column, data_frames >>> data_frames([ ... column('A', dtype=int), column('B', dtype=float)]).example() A B 0 2021915903 1.793898e+232 1 1146643993 inf 2 -2096165693 1.000000e+07 If you want the values in different columns to interact in some way you can use the rows argument. For example the following gives a two column DataFrame where the value in the first column is always at most the value in the second: .. code-block:: pycon >>> from hypothesis.extra.pandas import column, data_frames >>> import hypothesis.strategies as st >>> data_frames( ... rows=st.tuples(st.floats(allow_nan=False), ... st.floats(allow_nan=False)).map(sorted) ... ).example() 0 1 0 -3.402823e+38 9.007199e+15 1 -1.562796e-298 5.000000e-01 You can also combine the two: .. code-block:: pycon >>> from hypothesis.extra.pandas import columns, data_frames >>> import hypothesis.strategies as st >>> data_frames( ... columns=columns(["lo", "hi"], dtype=float), ... rows=st.tuples(st.floats(allow_nan=False), ... st.floats(allow_nan=False)).map(sorted) ... ).example() lo hi 0 9.314723e-49 4.353037e+45 1 -9.999900e-01 1.000000e+07 2 -2.152861e+134 -1.069317e-73 (Note that the column dtype must still be specified and will not be inferred from the rows. This restriction may be lifted in future). Combining rows and columns has the following behaviour: * The column names and dtypes will be used. * If the column is required to be unique, this will be enforced. * Any values missing from the generated rows will be provided using the column's fill. * Any values in the row not present in the column specification (if dicts are passed, if there are keys with no corresponding column name, if sequences are passed if there are too many items) will result in InvalidArgument being raised. """ if index is None: index = range_indexes() else: st.check_strategy(index) index_strategy = index if columns is None: if rows is None: raise InvalidArgument("At least one of rows and columns must be provided") else: @st.composite def rows_only(draw): index = draw(index_strategy) @check_function def row(): result = draw(rows) check_type(abc.Iterable, result, "draw(row)") return result if len(index) > 0: return pandas.DataFrame([row() for _ in index], index=index) else: # If we haven't drawn any rows we need to draw one row and # then discard it so that we get a consistent shape for the # DataFrame. base = pandas.DataFrame([row()]) return base.drop(0) return rows_only() assert columns is not None cols = try_convert(tuple, columns, "columns") # type: Sequence[column] rewritten_columns = [] column_names = set() # type: Set[str] for i, c in enumerate(cols): check_type(column, c, "columns[%d]" % (i,)) c = copy(c) if c.name is None: label = "columns[%d]" % (i,) c.name = i else: label = c.name try: hash(c.name) except TypeError: raise InvalidArgument( "Column names must be hashable, but columns[%d].name was " "%r of type %s, which cannot be hashed." % (i, c.name, type(c.name).__name__) ) if c.name in column_names: raise InvalidArgument("duplicate definition of column name %r" % (c.name,)) column_names.add(c.name) c.elements, c.dtype = elements_and_dtype(c.elements, c.dtype, label) if c.dtype is None and rows is not None: raise InvalidArgument( "Must specify a dtype for all columns when combining rows with" " columns." ) c.fill = npst.fill_for( fill=c.fill, elements=c.elements, unique=c.unique, name=label ) rewritten_columns.append(c) if rows is None: @st.composite def just_draw_columns(draw): index = draw(index_strategy) local_index_strategy = st.just(index) data = OrderedDict((c.name, None) for c in rewritten_columns) # Depending on how the columns are going to be generated we group # them differently to get better shrinking. For columns with fill # enabled, the elements can be shrunk independently of the size, # so we can just shrink by shrinking the index then shrinking the # length and are generally much more free to move data around. # For columns with no filling the problem is harder, and drawing # them like that would result in rows being very far apart from # each other in the underlying data stream, which gets in the way # of shrinking. So what we do is reorder and draw those columns # row wise, so that the values of each row are next to each other. # This makes life easier for the shrinker when deleting blocks of # data. columns_without_fill = [c for c in rewritten_columns if c.fill.is_empty] if columns_without_fill: for c in columns_without_fill: data[c.name] = pandas.Series( np.zeros(shape=len(index), dtype=c.dtype), index=index ) seen = {c.name: set() for c in columns_without_fill if c.unique} for i in hrange(len(index)): for c in columns_without_fill: if c.unique: for _ in range(5): value = draw(c.elements) if value not in seen[c.name]: seen[c.name].add(value) break else: reject() else: value = draw(c.elements) data[c.name][i] = value for c in rewritten_columns: if not c.fill.is_empty: data[c.name] = draw( series( index=local_index_strategy, dtype=c.dtype, elements=c.elements, fill=c.fill, unique=c.unique, ) ) return pandas.DataFrame(data, index=index) return just_draw_columns() else: @st.composite def assign_rows(draw): index = draw(index_strategy) result = pandas.DataFrame( OrderedDict( ( c.name, pandas.Series( np.zeros(dtype=c.dtype, shape=len(index)), dtype=c.dtype ), ) for c in rewritten_columns ), index=index, ) fills = {} any_unique = any(c.unique for c in rewritten_columns) if any_unique: all_seen = [set() if c.unique else None for c in rewritten_columns] while all_seen[-1] is None: all_seen.pop() for row_index in hrange(len(index)): for _ in hrange(5): original_row = draw(rows) row = original_row if isinstance(row, dict): as_list = [None] * len(rewritten_columns) for i, c in enumerate(rewritten_columns): try: as_list[i] = row[c.name] except KeyError: try: as_list[i] = fills[i] except KeyError: fills[i] = draw(c.fill) as_list[i] = fills[i] for k in row: if k not in column_names: raise InvalidArgument( "Row %r contains column %r not in columns %r)" % (row, k, [c.name for c in rewritten_columns]) ) row = as_list if any_unique: has_duplicate = False for seen, value in zip(all_seen, row): if seen is None: continue if value in seen: has_duplicate = True break seen.add(value) if has_duplicate: continue row = list(try_convert(tuple, row, "draw(rows)")) if len(row) > len(rewritten_columns): raise InvalidArgument( ( "Row %r contains too many entries. Has %d but " "expected at most %d" ) % (original_row, len(row), len(rewritten_columns)) ) while len(row) < len(rewritten_columns): row.append(draw(rewritten_columns[len(row)].fill)) result.iloc[row_index] = row break else: reject() return result return assign_rows() hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/pytestplugin.py000066400000000000000000000161041354103617500325000ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from distutils.version import LooseVersion import pytest from hypothesis import Verbosity, core, settings from hypothesis._settings import note_deprecation from hypothesis.internal.compat import OrderedDict, text_type from hypothesis.internal.detection import is_hypothesis_test from hypothesis.reporting import default as default_reporter, with_reporter from hypothesis.statistics import collector LOAD_PROFILE_OPTION = "--hypothesis-profile" VERBOSITY_OPTION = "--hypothesis-verbosity" PRINT_STATISTICS_OPTION = "--hypothesis-show-statistics" SEED_OPTION = "--hypothesis-seed" class StoringReporter(object): def __init__(self, config): self.config = config self.results = [] def __call__(self, msg): if self.config.getoption("capture", "fd") == "no": default_reporter(msg) if not isinstance(msg, text_type): msg = repr(msg) self.results.append(msg) def pytest_addoption(parser): group = parser.getgroup("hypothesis", "Hypothesis") group.addoption( LOAD_PROFILE_OPTION, action="store", help="Load in a registered hypothesis.settings profile", ) group.addoption( VERBOSITY_OPTION, action="store", choices=[opt.name for opt in Verbosity], help="Override profile with verbosity setting specified", ) group.addoption( PRINT_STATISTICS_OPTION, action="store_true", help="Configure when statistics are printed", default=False, ) group.addoption( SEED_OPTION, action="store", help="Set a seed to use for all Hypothesis tests" ) def pytest_report_header(config): profile = config.getoption(LOAD_PROFILE_OPTION) if not profile: profile = settings._current_profile settings_str = settings.get_profile(profile).show_changed() if settings_str != "": settings_str = " -> %s" % (settings_str) return "hypothesis profile %r%s" % (profile, settings_str) def pytest_configure(config): core.running_under_pytest = True profile = config.getoption(LOAD_PROFILE_OPTION) if profile: settings.load_profile(profile) verbosity_name = config.getoption(VERBOSITY_OPTION) if verbosity_name: verbosity_value = Verbosity[verbosity_name] profile_name = "%s-with-%s-verbosity" % ( settings._current_profile, verbosity_name, ) # register_profile creates a new profile, exactly like the current one, # with the extra values given (in this case 'verbosity') settings.register_profile(profile_name, verbosity=verbosity_value) settings.load_profile(profile_name) seed = config.getoption(SEED_OPTION) if seed is not None: try: seed = int(seed) except ValueError: pass core.global_force_seed = seed config.addinivalue_line("markers", "hypothesis: Tests which use hypothesis.") gathered_statistics = OrderedDict() # type: dict @pytest.hookimpl(hookwrapper=True) def pytest_runtest_call(item): if not (hasattr(item, "obj") and is_hypothesis_test(item.obj)): yield else: store = StoringReporter(item.config) def note_statistics(stats): lines = [item.nodeid + ":", ""] + stats.get_description() + [""] gathered_statistics[item.nodeid] = lines item.hypothesis_statistics = lines with collector.with_value(note_statistics): with with_reporter(store): yield if store.results: item.hypothesis_report_information = list(store.results) @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): report = (yield).get_result() if hasattr(item, "hypothesis_report_information"): report.sections.append( ("Hypothesis", "\n".join(item.hypothesis_report_information)) ) if hasattr(item, "hypothesis_statistics") and report.when == "teardown": # Running on pytest < 3.5 where user_properties doesn't exist, fall # back on the global gathered_statistics (which breaks under xdist) if hasattr(report, "user_properties"): # pragma: no branch val = ("hypothesis-stats", item.hypothesis_statistics) # Workaround for https://github.com/pytest-dev/pytest/issues/4034 if isinstance(report.user_properties, tuple): report.user_properties += (val,) else: report.user_properties.append(val) def pytest_terminal_summary(terminalreporter): if not terminalreporter.config.getoption(PRINT_STATISTICS_OPTION): return terminalreporter.section("Hypothesis Statistics") if LooseVersion(pytest.__version__) < "3.5": # pragma: no cover if not gathered_statistics: terminalreporter.write_line( "Reporting Hypothesis statistics with pytest-xdist enabled " "requires pytest >= 3.5" ) for lines in gathered_statistics.values(): for li in lines: terminalreporter.write_line(li) return # terminalreporter.stats is a dict, where the empty string appears to # always be the key for a list of _pytest.reports.TestReport objects # (where we stored the statistics data in pytest_runtest_makereport above) for test_report in terminalreporter.stats.get("", []): for name, lines in test_report.user_properties: if name == "hypothesis-stats" and test_report.when == "teardown": for li in lines: terminalreporter.write_line(li) def pytest_collection_modifyitems(items): for item in items: if not isinstance(item, pytest.Function): continue if is_hypothesis_test(item.obj): item.add_marker("hypothesis") if getattr(item.obj, "is_hypothesis_strategy_function", False): def note_strategy_is_not_test(*args, **kwargs): note_deprecation( "%s is a function that returns a Hypothesis strategy, " "but pytest has collected it as a test function. This " "is useless as the function body will never be executed. " "To define a test function, use @given instead of " "@composite." % (item.nodeid,), since="2018-11-02", ) item.obj = note_strategy_is_not_test def load(): pass hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/extra/pytz.py000066400000000000000000000040711354103617500307370ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """ ---------------- hypothesis[pytz] ---------------- This module provides :pypi:`pytz` timezones. You can use this strategy to make :py:func:`hypothesis.strategies.datetimes` and :py:func:`hypothesis.strategies.times` produce timezone-aware values. """ from __future__ import absolute_import, division, print_function import datetime as dt import pytz from pytz.tzfile import StaticTzInfo import hypothesis._strategies as st __all__ = ["timezones"] @st.cacheable @st.defines_strategy def timezones(): # type: () -> st.SearchStrategy[dt.tzinfo] """Any timezone in the Olsen database, as a pytz tzinfo object. This strategy minimises to UTC, or the smallest possible fixed offset, and is designed for use with :py:func:`hypothesis.strategies.datetimes`. """ all_timezones = [pytz.timezone(tz) for tz in pytz.all_timezones] # Some timezones have always had a constant offset from UTC. This makes # them simpler than timezones with daylight savings, and the smaller the # absolute offset the simpler they are. Of course, UTC is even simpler! static = [pytz.UTC] # type: list static += sorted( (t for t in all_timezones if isinstance(t, StaticTzInfo)), key=lambda tz: abs(tz.utcoffset(dt.datetime(2000, 1, 1))), ) # Timezones which have changed UTC offset; best ordered by name. dynamic = [tz for tz in all_timezones if tz not in static] return st.sampled_from(static + dynamic) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/000077500000000000000000000000001354103617500300465ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/__init__.py000066400000000000000000000012751354103617500321640ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/cache.py000066400000000000000000000224651354103617500314740ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import attr @attr.s(slots=True) class Entry(object): key = attr.ib() value = attr.ib() score = attr.ib() pins = attr.ib(default=0) @property def sort_key(self): if self.pins == 0: # Unpinned entries are sorted by score. return (0, self.score) else: # Pinned entries sort after unpinned ones. Beyond that, we don't # worry about their relative order. return (1,) class GenericCache(object): """Generic supertype for cache implementations. Defines a dict-like mapping with a maximum size, where as well as mapping to a value, each key also maps to a score. When a write would cause the dict to exceed its maximum size, it first evicts the existing key with the smallest score, then adds the new key to the map. A key has the following lifecycle: 1. key is written for the first time, the key is given the score self.new_entry(key, value) 2. whenever an existing key is read or written, self.on_access(key, value, score) is called. This returns a new score for the key. 3. When a key is evicted, self.on_evict(key, value, score) is called. The cache will be in a valid state in all of these cases. Implementations are expected to implement new_entry and optionally on_access and on_evict to implement a specific scoring strategy. """ __slots__ = ("keys_to_indices", "data", "max_size", "__pinned_entry_count") def __init__(self, max_size): self.max_size = max_size # Implementation: We store a binary heap of Entry objects in self.data, # with the heap property requiring that a parent's score is <= that of # its children. keys_to_index then maps keys to their index in the # heap. We keep these two in sync automatically - the heap is never # reordered without updating the index. self.keys_to_indices = {} self.data = [] self.__pinned_entry_count = 0 def __len__(self): assert len(self.keys_to_indices) == len(self.data) return len(self.data) def __contains__(self, key): return key in self.keys_to_indices def __getitem__(self, key): i = self.keys_to_indices[key] result = self.data[i] self.on_access(result.key, result.value, result.score) self.__balance(i) return result.value def __setitem__(self, key, value): if self.max_size == 0: return evicted = None try: i = self.keys_to_indices[key] except KeyError: if self.max_size == self.__pinned_entry_count: raise ValueError( "Cannot increase size of cache where all keys have been pinned." ) entry = Entry(key, value, self.new_entry(key, value)) if len(self.data) >= self.max_size: evicted = self.data[0] assert evicted.pins == 0 del self.keys_to_indices[evicted.key] i = 0 self.data[0] = entry else: i = len(self.data) self.data.append(entry) self.keys_to_indices[key] = i else: entry = self.data[i] assert entry.key == key entry.value = value entry.score = self.on_access(entry.key, entry.value, entry.score) self.__balance(i) if evicted is not None: if self.data[0] is not entry: assert evicted.score <= self.data[0].score self.on_evict(evicted.key, evicted.value, evicted.score) def __iter__(self): return iter(self.keys_to_indices) def pin(self, key): """Mark ``key`` as pinned. That is, it may not be evicted until ``unpin(key)`` has been called. The same key may be pinned multiple times and will not be unpinned until the same number of calls to unpin have been made.""" i = self.keys_to_indices[key] entry = self.data[i] entry.pins += 1 if entry.pins == 1: self.__pinned_entry_count += 1 assert self.__pinned_entry_count <= self.max_size self.__balance(i) def unpin(self, key): """Undo one previous call to ``pin(key)``. Once all calls are undone this key may be evicted as normal.""" i = self.keys_to_indices[key] entry = self.data[i] if entry.pins == 0: raise ValueError("Key %r has not been pinned" % (key,)) entry.pins -= 1 if entry.pins == 0: self.__pinned_entry_count -= 1 self.__balance(i) def is_pinned(self, key): """Returns True if the key is currently pinned.""" i = self.keys_to_indices[key] return self.data[i].pins > 0 def clear(self): """Remove all keys, clearing their pinned status.""" del self.data[:] self.keys_to_indices.clear() self.__pinned_entry_count = 0 def __repr__(self): return "{%s}" % (", ".join("%r: %r" % (e.key, e.value) for e in self.data),) def new_entry(self, key, value): """Called when a key is written that does not currently appear in the map. Returns the score to associate with the key. """ raise NotImplementedError() def on_access(self, key, value, score): """Called every time a key that is already in the map is read or written. Returns the new score for the key. """ return score def on_evict(self, key, value, score): """Called after a key has been evicted, with the score it had had at the point of eviction.""" def check_valid(self): """Debugging method for use in tests. Asserts that all of the cache's invariants hold. When everything is working correctly this should be an expensive no-op. """ for i, e in enumerate(self.data): assert self.keys_to_indices[e.key] == i for j in [i * 2 + 1, i * 2 + 2]: if j < len(self.data): assert e.score <= self.data[j].score, self.data def __swap(self, i, j): assert i < j assert self.data[j].sort_key < self.data[i].sort_key self.data[i], self.data[j] = self.data[j], self.data[i] self.keys_to_indices[self.data[i].key] = i self.keys_to_indices[self.data[j].key] = j def __balance(self, i): """When we have made a modification to the heap such that means that the heap property has been violated locally around i but previously held for all other indexes (and no other values have been modified), this fixes the heap so that the heap property holds everywhere.""" while i > 0: parent = (i - 1) // 2 if self.__out_of_order(parent, i): self.__swap(parent, i) i = parent else: break while True: children = [j for j in (2 * i + 1, 2 * i + 2) if j < len(self.data)] if len(children) == 2: children.sort(key=lambda j: self.data[j].score) for j in children: if self.__out_of_order(i, j): self.__swap(i, j) i = j break else: break def __out_of_order(self, i, j): """Returns True if the indices i, j are in the wrong order. i must be the parent of j. """ assert i == (j - 1) // 2 return self.data[j].sort_key < self.data[i].sort_key class LRUReusedCache(GenericCache): """The only concrete implementation of GenericCache we use outside of tests currently. Adopts a modified least-frequently used eviction policy: It evicts the key that has been used least recently, but it will always preferentially evict keys that have only ever been accessed once. Among keys that have been accessed more than once, it ignores the number of accesses. This retains most of the benefits of an LRU cache, but adds an element of scan-resistance to the process: If we end up scanning through a large number of keys without reusing them, this does not evict the existing entries in preference for the new ones. """ __slots__ = ("__tick",) def __init__(self, max_size): super(LRUReusedCache, self).__init__(max_size) self.__tick = 0 def tick(self): self.__tick += 1 return self.__tick def new_entry(self, key, value): return [1, self.tick()] def on_access(self, key, value, score): score[0] = 2 score[1] = self.tick() return score hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/cathetus.py000066400000000000000000000047731354103617500322530ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from math import fabs, isinf, isnan, sqrt from sys import float_info def cathetus(h, a): """Given the lengths of the hypotenuse and a side of a right triangle, return the length of the other side. A companion to the C99 hypot() function. Some care is needed to avoid underflow in the case of small arguments, and overflow in the case of large arguments as would occur for the naive implementation as sqrt(h*h - a*a). The behaviour with respect the non-finite arguments (NaNs and infinities) is designed to be as consistent as possible with the C99 hypot() specifications. This function relies on the system ``sqrt`` function and so, like it, may be inaccurate up to a relative error of (around) floating-point epsilon. Based on the C99 implementation https://github.com/jjgreen/cathetus """ if isnan(h): return float(u"nan") if isinf(h): if isinf(a): return float(u"nan") else: # Deliberately includes the case when isnan(a), because the # C99 standard mandates that hypot(inf, nan) == inf return float(u"inf") h = fabs(h) a = fabs(a) if h < a: return float(u"nan") # Thanks to floating-point precision issues when performing multiple # operations on extremely large or small values, we may rarely calculate # a side length that is longer than the hypotenuse. This is clearly an # error, so we clip to the hypotenuse as the best available estimate. if h > sqrt(float_info.max): if h > float_info.max / 2: b = sqrt(h - a) * sqrt(h / 2 + a / 2) * sqrt(2) else: b = sqrt(h - a) * sqrt(h + a) elif h < sqrt(float_info.min): b = sqrt(h - a) * sqrt(h + a) else: b = sqrt((h - a) * (h + a)) return min(b, h) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/charmap.py000066400000000000000000000307011354103617500320340ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import gzip import json import os import sys import tempfile import unicodedata from hypothesis.configuration import storage_directory, tmpdir from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import hunichr if False: from typing import Dict, Tuple intervals = Tuple[Tuple[int, int], ...] cache_type = Dict[Tuple[Tuple[str, ...], int, int, intervals], intervals] def charmap_file(): return os.path.join( storage_directory("unicodedata", unicodedata.unidata_version), "charmap.json.gz" ) _charmap = None def charmap(): """Return a dict that maps a Unicode category, to a tuple of 2-tuples covering the codepoint intervals for characters in that category. >>> charmap()['Co'] ((57344, 63743), (983040, 1048573), (1048576, 1114109)) """ global _charmap # Best-effort caching in the face of missing files and/or unwritable # filesystems is fairly simple: check if loaded, else try loading, # else calculate and try writing the cache. if _charmap is None: f = charmap_file() try: with gzip.GzipFile(f, "rb") as i: tmp_charmap = dict(json.loads(i)) except Exception: tmp_charmap = {} for i in range(0, sys.maxunicode + 1): cat = unicodedata.category(hunichr(i)) rs = tmp_charmap.setdefault(cat, []) if rs and rs[-1][-1] == i - 1: rs[-1][-1] += 1 else: rs.append([i, i]) try: # Write the Unicode table atomically fd, tmpfile = tempfile.mkstemp(dir=tmpdir()) os.close(fd) # Explicitly set the mtime to get reproducible output with gzip.GzipFile(tmpfile, "wb", mtime=1) as o: result = json.dumps(sorted(tmp_charmap.items())) o.write(result.encode()) os.rename(tmpfile, f) except Exception: pass # convert between lists and tuples _charmap = { k: tuple(tuple(pair) for pair in pairs) for k, pairs in tmp_charmap.items() } # each value is a tuple of 2-tuples (that is, tuples of length 2) # and that both elements of that tuple are integers. for vs in _charmap.values(): ints = list(sum(vs, ())) assert all([isinstance(x, int) for x in ints]) assert ints == sorted(ints) assert all([len(tup) == 2 for tup in vs]) assert _charmap is not None return _charmap _categories = None def categories(): """Return a tuple of Unicode categories in a normalised order. >>> categories() # doctest: +ELLIPSIS ('Zl', 'Zp', 'Co', 'Me', 'Pc', ..., 'Cc', 'Cs') """ global _categories if _categories is None: cm = charmap() _categories = sorted(cm.keys(), key=lambda c: len(cm[c])) _categories.remove("Cc") # Other, Control _categories.remove("Cs") # Other, Surrogate _categories.append("Cc") _categories.append("Cs") return tuple(_categories) def as_general_categories(cats, name="cats"): """Return a tuple of Unicode categories in a normalised order. This function expands one-letter designations of a major class to include all subclasses: >>> as_general_categories(['N']) ('Nd', 'Nl', 'No') See section 4.5 of the Unicode standard for more on classes: https://www.unicode.org/versions/Unicode10.0.0/ch04.pdf If the collection ``cats`` includes any elements that do not represent a major class or a class with subclass, a deprecation warning is raised. """ if cats is None: return None major_classes = ("L", "M", "N", "P", "S", "Z", "C") cs = categories() out = set(cats) for c in cats: if c in major_classes: out.discard(c) out.update(x for x in cs if x.startswith(c)) elif c not in cs: raise InvalidArgument( "In %s=%r, %r is not a valid Unicode category." % (name, cats, c) ) return tuple(c for c in cs if c in out) def _union_intervals(x, y): """Merge two sequences of intervals into a single tuple of intervals. Any integer bounded by `x` or `y` is also bounded by the result. >>> _union_intervals([(3, 10)], [(1, 2), (5, 17)]) ((1, 17),) """ if not x: return tuple((u, v) for u, v in y) if not y: return tuple((u, v) for u, v in x) intervals = sorted(x + y, reverse=True) result = [intervals.pop()] while intervals: # 1. intervals is in descending order # 2. pop() takes from the RHS. # 3. (a, b) was popped 1st, then (u, v) was popped 2nd # 4. Therefore: a <= u # 5. We assume that u <= v and a <= b # 6. So we need to handle 2 cases of overlap, and one disjoint case # | u--v | u----v | u--v | # | a----b | a--b | a--b | u, v = intervals.pop() a, b = result[-1] if u <= b + 1: # Overlap cases result[-1] = (a, max(v, b)) else: # Disjoint case result.append((u, v)) return tuple(result) def _subtract_intervals(x, y): """Set difference for lists of intervals. That is, returns a list of intervals that bounds all values bounded by x that are not also bounded by y. x and y are expected to be in sorted order. For example _subtract_intervals([(1, 10)], [(2, 3), (9, 15)]) would return [(1, 1), (4, 8)], removing the values 2, 3, 9 and 10 from the interval. """ if not y: return tuple(x) x = list(map(list, x)) i = 0 j = 0 result = [] while i < len(x) and j < len(y): # Iterate in parallel over x and y. j stays pointing at the smallest # interval in the left hand side that could still overlap with some # element of x at index >= i. # Similarly, i is not incremented until we know that it does not # overlap with any element of y at index >= j. xl, xr = x[i] assert xl <= xr yl, yr = y[j] assert yl <= yr if yr < xl: # The interval at y[j] is strictly to the left of the interval at # x[i], so will not overlap with it or any later interval of x. j += 1 elif yl > xr: # The interval at y[j] is strictly to the right of the interval at # x[i], so all of x[i] goes into the result as no further intervals # in y will intersect it. result.append(x[i]) i += 1 elif yl <= xl: if yr >= xr: # x[i] is contained entirely in y[j], so we just skip over it # without adding it to the result. i += 1 else: # The beginning of x[i] is contained in y[j], so we update the # left endpoint of x[i] to remove this, and increment j as we # now have moved past it. Note that this is not added to the # result as is, as more intervals from y may intersect it so it # may need updating further. x[i][0] = yr + 1 j += 1 else: # yl > xl, so the left hand part of x[i] is not contained in y[j], # so there are some values we should add to the result. result.append((xl, yl - 1)) if yr + 1 <= xr: # If y[j] finishes before x[i] does, there may be some values # in x[i] left that should go in the result (or they may be # removed by a later interval in y), so we update x[i] to # reflect that and increment j because it no longer overlaps # with any remaining element of x. x[i][0] = yr + 1 j += 1 else: # Every element of x[i] other than the initial part we have # already added is contained in y[j], so we move to the next # interval. i += 1 # Any remaining intervals in x do not overlap with any of y, as if they did # we would not have incremented j to the end, so can be added to the result # as they are. result.extend(x[i:]) return tuple(map(tuple, result)) def _intervals(s): """Return a tuple of intervals, covering the codepoints of characters in `s`. >>> _intervals('abcdef0123456789') ((48, 57), (97, 102)) """ intervals = tuple((ord(c), ord(c)) for c in sorted(s)) return _union_intervals(intervals, intervals) category_index_cache = {(): ()} def _category_key(exclude, include): """Return a normalised tuple of all Unicode categories that are in `include`, but not in `exclude`. If include is None then default to including all categories. Any item in include that is not a unicode character will be excluded. >>> _category_key(exclude=['So'], include=['Lu', 'Me', 'Cs', 'So']) ('Me', 'Lu', 'Cs') """ cs = categories() if include is None: include = set(cs) else: include = set(include) exclude = set(exclude or ()) assert include.issubset(cs) assert exclude.issubset(cs) include -= exclude result = tuple(c for c in cs if c in include) return result def _query_for_key(key): """Return a tuple of codepoint intervals covering characters that match one or more categories in the tuple of categories `key`. >>> _query_for_key(categories()) ((0, 1114111),) >>> _query_for_key(('Zl', 'Zp', 'Co')) ((8232, 8233), (57344, 63743), (983040, 1048573), (1048576, 1114109)) """ try: return category_index_cache[key] except KeyError: pass assert key if set(key) == set(categories()): result = ((0, sys.maxunicode),) else: result = _union_intervals(_query_for_key(key[:-1]), charmap()[key[-1]]) category_index_cache[key] = result return result limited_category_index_cache = {} # type: cache_type def query( exclude_categories=(), include_categories=None, min_codepoint=None, max_codepoint=None, include_characters="", exclude_characters="", ): """Return a tuple of intervals covering the codepoints for all characters that meet the critera (min_codepoint <= codepoint(c) <= max_codepoint and any(cat in include_categories for cat in categories(c)) and all(cat not in exclude_categories for cat in categories(c)) or (c in include_characters) >>> query() ((0, 1114111),) >>> query(min_codepoint=0, max_codepoint=128) ((0, 128),) >>> query(min_codepoint=0, max_codepoint=128, include_categories=['Lu']) ((65, 90),) >>> query(min_codepoint=0, max_codepoint=128, include_categories=['Lu'], ... include_characters=u'☃') ((65, 90), (9731, 9731)) """ if min_codepoint is None: min_codepoint = 0 if max_codepoint is None: max_codepoint = sys.maxunicode catkey = _category_key(exclude_categories, include_categories) character_intervals = _intervals(include_characters or "") exclude_intervals = _intervals(exclude_characters or "") qkey = ( catkey, min_codepoint, max_codepoint, character_intervals, exclude_intervals, ) try: return limited_category_index_cache[qkey] except KeyError: pass base = _query_for_key(catkey) result = [] for u, v in base: if v >= min_codepoint and u <= max_codepoint: result.append((max(u, min_codepoint), min(v, max_codepoint))) result = tuple(result) result = _union_intervals(result, character_intervals) result = _subtract_intervals(result, exclude_intervals) limited_category_index_cache[qkey] = result return result hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/compat.py000066400000000000000000000360011354103617500317030ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER # pylint: skip-file from __future__ import absolute_import, division, print_function import array import codecs import importlib import inspect import math import platform import re import sys import time from base64 import b64encode from collections import namedtuple try: from collections import OrderedDict, Counter except ImportError: from ordereddict import OrderedDict # type: ignore from counter import Counter # type: ignore try: from collections import abc except ImportError: import collections as abc # type: ignore if False: from typing import Type, Tuple # noqa PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 PYPY = platform.python_implementation() == "PyPy" CAN_UNPACK_BYTE_ARRAY = sys.version_info[:3] >= (2, 7, 4) CAN_PACK_HALF_FLOAT = sys.version_info[:2] >= (3, 6) WINDOWS = platform.system() == "Windows" if sys.version_info[:2] <= (2, 6): raise ImportError("Hypothesis is not supported on Python versions before 2.7") def bit_length(n): return n.bit_length() def quiet_raise(exc): # Overridden by Py3 version, iff `raise XXX from None` is valid raise exc if PY3: def str_to_bytes(s): return s.encode(a_good_encoding()) def int_to_text(i): return str(i) text_type = str binary_type = bytes hrange = range ARG_NAME_ATTRIBUTE = "arg" integer_types = (int,) hunichr = chr def unicode_safe_repr(x): return repr(x) def isidentifier(s): return s.isidentifier() def escape_unicode_characters(s): return codecs.encode(s, "unicode_escape").decode("ascii") def print_unicode(x): print(x) exec( """ def quiet_raise(exc): raise exc from None """ ) def int_from_bytes(data): return int.from_bytes(data, "big") def int_to_bytes(i, size): return i.to_bytes(size, "big") def to_bytes_sequence(ls): return bytes(ls) def int_to_byte(i): return bytes([i]) import struct struct_pack = struct.pack struct_unpack = struct.unpack def benchmark_time(): return time.monotonic() else: import struct def struct_pack(*args): return hbytes(struct.pack(*args)) if CAN_UNPACK_BYTE_ARRAY: def struct_unpack(fmt, string): return struct.unpack(fmt, string) else: def struct_unpack(fmt, string): return struct.unpack(fmt, str(string)) def int_from_bytes(data): if CAN_UNPACK_BYTE_ARRAY: unpackable_data = data elif isinstance(data, bytearray): unpackable_data = bytes(data) else: unpackable_data = data assert isinstance(data, (bytes, bytearray)) result = 0 i = 0 while i + 4 <= len(data): result <<= 32 result |= struct.unpack(">I", unpackable_data[i : i + 4])[0] i += 4 while i < len(data): result <<= 8 result |= data[i] i += 1 return int(result) def int_to_bytes(i, size): assert i >= 0 result = bytearray(size) j = size - 1 arg = i while i and j >= 0: result[j] = i & 255 i >>= 8 j -= 1 if i: raise OverflowError("i=%r cannot be represented in %r bytes" % (arg, size)) return hbytes(result) int_to_byte = chr def to_bytes_sequence(ls): return bytearray(ls) def str_to_bytes(s): return s def int_to_text(i): return str(i).decode("ascii") VALID_PYTHON_IDENTIFIER = re.compile(r"^[a-zA-Z_][a-zA-Z0-9_]*$") def isidentifier(s): return VALID_PYTHON_IDENTIFIER.match(s) def unicode_safe_repr(x): r = repr(x) assert isinstance(r, str) return r.decode(a_good_encoding()) text_type = unicode binary_type = str def hrange(start_or_finish, finish=None, step=None): try: if step is None: if finish is None: return xrange(start_or_finish) else: return xrange(start_or_finish, finish) else: return xrange(start_or_finish, finish, step) except OverflowError: if step == 0: raise ValueError(u"step argument may not be zero") if step is None: step = 1 if finish is not None: start = start_or_finish else: start = 0 finish = start_or_finish assert step != 0 if step > 0: def shimrange(): i = start while i < finish: yield i i += step else: def shimrange(): i = start while i > finish: yield i i += step return shimrange() ARG_NAME_ATTRIBUTE = "id" integer_types = (int, long) hunichr = unichr def escape_unicode_characters(s): return codecs.encode(s, "string_escape") def print_unicode(x): if isinstance(x, unicode): x = x.encode(a_good_encoding()) print(x) def benchmark_time(): return time.time() # coverage mixes unicode and str filepaths on Python 2, which causes us # problems if we're running under unicodenazi (it might also cause problems # when not running under unicodenazi, but hard to say for sure). This method # exists to work around that: If we're given a unicode filepath, we turn it # into a string file path using the appropriate encoding. See # https://bitbucket.org/ned/coveragepy/issues/602/ for more information. if PY2: def encoded_filepath(filepath): if isinstance(filepath, text_type): return filepath.encode(sys.getfilesystemencoding()) else: return filepath else: def encoded_filepath(filepath): return filepath def a_good_encoding(): return "utf-8" def to_unicode(x): if isinstance(x, text_type): return x else: return x.decode(a_good_encoding()) def qualname(f): try: return f.__qualname__ except AttributeError: pass try: return f.im_class.__name__ + "." + f.__name__ except AttributeError: return f.__name__ try: import typing except ImportError: typing_root_type = () # type: Tuple[type, ...] ForwardRef = None else: try: # These types are new in Python 3.7, but also (partially) backported to the # typing backport on PyPI. Use if possible; or fall back to older names. typing_root_type = (typing._Final, typing._GenericAlias) # type: ignore ForwardRef = typing.ForwardRef # type: ignore except AttributeError: typing_root_type = (typing.TypingMeta, typing.TypeVar) # type: ignore try: typing_root_type += (typing._Union,) # type: ignore except AttributeError: # Under Python 3.5.0, we'll just give up... if users want strategies # inferred from Union-typed attrs attributes they can try a newer Python. pass ForwardRef = typing._ForwardRef # type: ignore if PY2: FullArgSpec = namedtuple( "FullArgSpec", "args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations", ) def getfullargspec(func): args, varargs, varkw, defaults = inspect.getargspec(func) return FullArgSpec( args, varargs, varkw, defaults, [], None, getattr(func, "__annotations__", {}), ) else: from inspect import getfullargspec, FullArgSpec if sys.version_info[:2] < (3, 6): def get_type_hints(thing): try: spec = getfullargspec(thing) return { k: v for k, v in spec.annotations.items() if k in (spec.args + spec.kwonlyargs) and isinstance(v, type) } except TypeError: return {} else: import typing def get_type_hints(thing): try: return typing.get_type_hints(thing) except TypeError: return {} importlib_invalidate_caches = getattr(importlib, "invalidate_caches", lambda: ()) if PY2: CODE_FIELD_ORDER = [ "co_argcount", "co_nlocals", "co_stacksize", "co_flags", "co_code", "co_consts", "co_names", "co_varnames", "co_filename", "co_name", "co_firstlineno", "co_lnotab", "co_freevars", "co_cellvars", ] else: # This field order is accurate for 3.5 - 3.7, but not 3.8 when a new field # was added for positional-only arguments. However it also added a .replace() # method that we use instead of field indices, so they're fine as-is. CODE_FIELD_ORDER = [ "co_argcount", "co_kwonlyargcount", "co_nlocals", "co_stacksize", "co_flags", "co_code", "co_consts", "co_names", "co_varnames", "co_filename", "co_name", "co_firstlineno", "co_lnotab", "co_freevars", "co_cellvars", ] def update_code_location(code, newfile, newlineno): """Take a code object and lie shamelessly about where it comes from. Why do we want to do this? It's for really shallow reasons involving hiding the hypothesis_temporary_module code from test runners like pytest's verbose mode. This is a vastly disproportionate terrible hack that I've done purely for vanity, and if you're reading this code you're probably here because it's broken something and now you're angry at me. Sorry. """ if hasattr(code, "replace"): # Python 3.8 added positional-only params (PEP 570), and thus changed # the layout of code objects. In beta1, the `.replace()` method was # added to facilitate future-proof code. See BPO-37032 for details. return code.replace(co_filename=newfile, co_firstlineno=newlineno) unpacked = [getattr(code, name) for name in CODE_FIELD_ORDER] unpacked[CODE_FIELD_ORDER.index("co_filename")] = newfile unpacked[CODE_FIELD_ORDER.index("co_firstlineno")] = newlineno return type(code)(*unpacked) class compatbytes(bytearray): __name__ = "bytes" def __init__(self, *args, **kwargs): bytearray.__init__(self, *args, **kwargs) self.__hash = None def __str__(self): return bytearray.__str__(self) def __repr__(self): return "compatbytes(b%r)" % (str(self),) def __hash__(self): if self.__hash is None: self.__hash = hash(str(self)) return self.__hash def count(self, value): c = 0 for w in self: if w == value: c += 1 return c def index(self, value): for i, v in enumerate(self): if v == value: return i raise ValueError("Value %r not in sequence %r" % (value, self)) def __add__(self, value): assert isinstance(value, compatbytes) return compatbytes(bytearray.__add__(self, value)) def __radd__(self, value): assert isinstance(value, compatbytes) return compatbytes(bytearray.__add__(value, self)) def __mul__(self, value): return compatbytes(bytearray.__mul__(self, value)) def __rmul__(self, value): return compatbytes(bytearray.__rmul__(self, value)) def __getitem__(self, *args, **kwargs): r = bytearray.__getitem__(self, *args, **kwargs) if isinstance(r, bytearray): return compatbytes(r) else: return r __setitem__ = None # type: ignore def join(self, parts): result = bytearray() first = True for p in parts: if not first: result.extend(self) first = False result.extend(p) return compatbytes(result) def __contains__(self, value): return any(v == value for v in self) if PY2: hbytes = compatbytes reasonable_byte_type = bytearray string_types = (str, unicode) else: hbytes = bytes reasonable_byte_type = bytes string_types = (str,) EMPTY_BYTES = hbytes(b"") if PY2: def to_str(s): if isinstance(s, unicode): return s.encode(a_good_encoding()) assert isinstance(s, str) return s else: def to_str(s): return s def cast_unicode(s, encoding=None): if isinstance(s, bytes): return s.decode(encoding or a_good_encoding(), "replace") return s def get_stream_enc(stream, default=None): return getattr(stream, "encoding", None) or default def implements_iterator(it): """Turn things with a __next__ attribute into iterators on Python 2.""" if PY2 and not hasattr(it, "next") and hasattr(it, "__next__"): it.next = it.__next__ return it # Under Python 2, math.floor and math.ceil return floats, which cannot # represent large integers - eg `float(2**53) == float(2**53 + 1)`. # We therefore implement them entirely in (long) integer operations. # We use the same trick on Python 3, because Numpy values and other # custom __floor__ or __ceil__ methods may convert via floats. # See issue #1667, Numpy issue 9068. def floor(x): y = int(x) if y != x and x < 0: return y - 1 return y def ceil(x): y = int(x) if y != x and x > 0: return y + 1 return y try: from math import gcd except ImportError: from fractions import gcd if PY2: def b64decode(s): from base64 import b64decode as base return hbytes(base(s)) else: from base64 import b64decode try: from django.test import TransactionTestCase def bad_django_TestCase(runner): if runner is None: return False if not isinstance(runner, TransactionTestCase): return False from hypothesis.extra.django._impl import HypothesisTestCase return not isinstance(runner, HypothesisTestCase) except Exception: # Can't use ImportError, because of e.g. Django config errors def bad_django_TestCase(runner): return False if PY2: LIST_CODES = ("q", "Q", "O") else: LIST_CODES = ("O",) def array_or_list(code, contents): if code in LIST_CODES: return list(contents) return array.array(code, contents) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture/000077500000000000000000000000001354103617500322075ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture/__init__.py000066400000000000000000000012751354103617500343250ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function choicetree.py000066400000000000000000000105121354103617500346130ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from collections import defaultdict from hypothesis.internal.compat import hrange class Chooser(object): """A source of nondeterminism for use in shrink passes.""" def __init__(self, tree, prefix): self.__prefix = prefix self.__tree = tree self.__node_trail = [tree.root] self.__choices = [] self.__finished = False def choose(self, values, condition=lambda x: True): """Return some element of values satisfying the condition that will not lead to an exhausted branch, or raise DeadBranch if no such element exist". """ assert not self.__finished node = self.__node_trail[-1] if node.live_child_count is None: node.live_child_count = len(values) node.n = len(values) assert node.live_child_count > 0 or len(values) == 0 depth = len(self.__choices) if depth < len(self.__prefix): i = self.__prefix[depth] if i >= len(values): i = 0 else: i = 0 count = 0 while node.live_child_count > 0: count += 1 assert count <= len(values) if not node.children[i].exhausted: v = values[i] if condition(v): self.__choices.append(i) self.__node_trail.append(node.children[i]) return v else: node.children[i] = DeadNode node.live_child_count -= 1 i = (i + 1) % len(values) raise DeadBranch() def finish(self): """Record the decisions made in the underlying tree and return a prefix that can be used for the next Chooser to be used.""" self.__finished = True assert len(self.__node_trail) == len(self.__choices) + 1 next_value = list(self.__choices) if next_value: next_value[-1] += 1 for i in hrange(len(next_value) - 1, -1, -1): if next_value[i] >= self.__node_trail[i].n: next_value[i] = 0 if i > 0: next_value[i - 1] += 1 else: break self.__node_trail[-1].live_child_count = 0 while len(self.__node_trail) > 1 and self.__node_trail[-1].exhausted: self.__node_trail.pop() assert len(self.__node_trail) == len(self.__choices) i = self.__choices.pop() target = self.__node_trail[-1] target.children[i] = DeadNode target.live_child_count -= 1 while len(next_value) > 0 and next_value[-1] == 0: next_value.pop() return tuple(next_value) class ChoiceTree(object): """Records sequences of choices made during shrinking so that we can track what parts of a pass has run. Used to create Chooser objects that are the main interface that a pass uses to make decisions about what to do. """ def __init__(self): self.root = TreeNode() @property def exhausted(self): return self.root.exhausted def step(self, prefix, f): assert not self.exhausted chooser = Chooser(self, prefix) try: f(chooser) except DeadBranch: pass return chooser.finish() class TreeNode(object): def __init__(self): self.children = defaultdict(TreeNode) self.live_child_count = None self.n = None @property def exhausted(self): return self.live_child_count == 0 DeadNode = TreeNode() DeadNode.live_child_count = 0 class DeadBranch(Exception): pass hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture/data.py000066400000000000000000001026511354103617500334770ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from collections import defaultdict from enum import IntEnum import attr from hypothesis.errors import Frozen, InvalidArgument, StopTest from hypothesis.internal.compat import ( benchmark_time, bit_length, hbytes, hrange, int_from_bytes, int_to_bytes, text_type, unicode_safe_repr, ) from hypothesis.internal.conjecture.junkdrawer import IntList from hypothesis.internal.conjecture.utils import calc_label_from_name from hypothesis.internal.escalation import mark_for_escalation TOP_LABEL = calc_label_from_name("top") DRAW_BYTES_LABEL = calc_label_from_name("draw_bytes() in ConjectureData") class ExtraInformation(object): """A class for holding shared state on a ``ConjectureData`` that should be added to the final ``ConjectureResult``.""" def __repr__(self): return "ExtraInformation(%s)" % ( ", ".join(["%s=%r" % (k, v) for k, v in self.__dict__.items()]), ) def has_information(self): return bool(self.__dict__) class Status(IntEnum): OVERRUN = 0 INVALID = 1 VALID = 2 INTERESTING = 3 def __repr__(self): return "Status.%s" % (self.name,) class Example(object): """Examples track the hierarchical structure of draws from the byte stream, within a single test run. Examples are created to mark regions of the byte stream that might be useful to the shrinker, such as: - The bytes used by a single draw from a strategy. - Useful groupings within a strategy, such as individual list elements. - Strategy-like helper functions that aren't first-class strategies. - Each lowest-level draw of bits or bytes from the byte stream. - A single top-level example that spans the entire input. Example-tracking allows the shrinker to try "high-level" transformations, such as rearranging or deleting the elements of a list, without having to understand their exact representation in the byte stream. Rather than store each ``Example`` as a rich object, it is actually just an index into the ``Examples`` class defined below. This has two purposes: Firstly, for most properties of examples we will never need to allocate storage at all, because most properties are not used on most examples. Secondly, by storing the properties as compact lists of integers, we save a considerable amount of space compared to Python's normal object size. This does have the downside that it increases the amount of allocation we do, and slows things down as a result, in some usage patterns because we repeatedly allocate the same Example or int objects, but it will often dramatically reduce our memory usage, so is worth it. """ __slots__ = ("owner", "index") def __init__(self, owner, index): self.owner = owner self.index = index def __eq__(self, other): if self is other: return True if not isinstance(other, Example): return NotImplemented return (self.owner is other.owner) and (self.index == other.index) def __ne__(self, other): if self is other: return False if not isinstance(other, Example): return NotImplemented return (self.owner is not other.owner) or (self.index != other.index) def __repr__(self): return "examples[%d]" % (self.index,) @property def label(self): """A label is an opaque value that associates each example with its approximate origin, such as a particular strategy class or a particular kind of draw.""" return self.owner.labels[self.owner.label_indices[self.index]] @property def parent(self): """The index of the example that this one is nested directly within.""" if self.index == 0: return None return self.owner.parentage[self.index] @property def start(self): """The position of the start of this example in the byte stream.""" return self.owner.starts[self.index] @property def end(self): """The position directly after the last byte in this byte stream. i.e. the example corresponds to the half open region [start, end). """ return self.owner.ends[self.index] @property def depth(self): """Depth of this example in the example tree. The top-level example has a depth of 0.""" return self.owner.depths[self.index] @property def trivial(self): """An example is "trivial" if it only contains forced bytes and zero bytes. All examples start out as trivial, and then get marked non-trivial when we see a byte that is neither forced nor zero.""" return self.index in self.owner.trivial @property def discarded(self): """True if this is example's ``stop_example`` call had ``discard`` set to ``True``. This means we believe that the shrinker should be able to delete this example completely, without affecting the value produced by its enclosing strategy. Typically set when a rejection sampler decides to reject a generated value and try again.""" return self.index in self.owner.discarded @property def length(self): """The number of bytes in this example.""" return self.end - self.start @property def children(self): """The list of all examples with this as a parent, in increasing index order.""" return [self.owner[i] for i in self.owner.children[self.index]] class ExampleProperty(object): """There are many properties of examples that we calculate by essentially rerunning thet test case multiple times based on the calls which we record in ExampleRecord. This class defines a visitor, subclasses of which can be used to calculate these properties. """ def __init__(self, examples): self.example_stack = [] self.examples = examples self.bytes_read = 0 self.example_count = 0 self.block_count = 0 def run(self): """Rerun the test case with this visitor and return the results of ``self.finish()``.""" self.begin() blocks = self.examples.blocks for record in self.examples.trail: if record == DRAW_BITS_RECORD: self.__push(0) self.bytes_read = blocks.endpoints[self.block_count] self.block(self.block_count) self.block_count += 1 self.__pop(False) elif record >= START_EXAMPLE_RECORD: self.__push(record - START_EXAMPLE_RECORD) else: assert record in ( STOP_EXAMPLE_DISCARD_RECORD, STOP_EXAMPLE_NO_DISCARD_RECORD, ) self.__pop(record == STOP_EXAMPLE_DISCARD_RECORD) return self.finish() def __push(self, label_index): i = self.example_count assert i < len(self.examples) self.start_example(i, label_index) self.example_count += 1 self.example_stack.append(i) def __pop(self, discarded): i = self.example_stack.pop() self.stop_example(i, discarded) def begin(self): """Called at the beginning of the run to initialise any relevant state.""" self.result = IntList.of_length(len(self.examples)) def start_example(self, i, label_index): """Called at the start of each example, with ``i`` the index of the example and ``label_index`` the index of its label in ``self.examples.labels``.""" def block(self, i): """Called with each ``draw_bits`` call, with ``i`` the index of the corresonding block in ``self.examples.blocks``""" def stop_example(self, i, discarded): """Called at the end of each example, with ``i`` the index of the example and ``discarded`` being ``True`` if ``stop_example`` was called with ``discard=True``.""" def finish(self): return self.result def calculated_example_property(cls): """Given an ``ExampleProperty`` as above we use this decorator to transform it into a lazy property on the ``Examples`` class, which has as its value the result of calling ``cls.run()``, computed the first time the property is accessed. This has the slightly weird result that we are defining nested classes which get turned into properties.""" name = cls.__name__ cache_name = "__" + name def lazy_calculate(self): result = getattr(self, cache_name, None) if result is None: result = cls(self).run() setattr(self, cache_name, result) return result lazy_calculate.__name__ = cls.__name__ lazy_calculate.__qualname__ = getattr(cls, "__qualname__", cls.__name__) return property(lazy_calculate) DRAW_BITS_RECORD = 0 STOP_EXAMPLE_DISCARD_RECORD = 1 STOP_EXAMPLE_NO_DISCARD_RECORD = 2 START_EXAMPLE_RECORD = 3 class ExampleRecord(object): """Records the series of ``start_example``, ``stop_example``, and ``draw_bits`` calls so that these may be stored in ``Examples`` and replayed when we need to know about the structure of individual ``Example`` objects. Note that there is significant similarity between this class and ``DataObserver``, and the plan is to eventually unify them, but they currently have slightly different functions and implementations. """ def __init__(self): self.labels = [DRAW_BYTES_LABEL] self.__index_of_labels = {DRAW_BYTES_LABEL: 0} self.trail = IntList() def freeze(self): self.__index_of_labels = None def start_example(self, label): try: i = self.__index_of_labels[label] except KeyError: i = self.__index_of_labels.setdefault(label, len(self.labels)) self.labels.append(label) self.trail.append(START_EXAMPLE_RECORD + i) def stop_example(self, discard): if discard: self.trail.append(STOP_EXAMPLE_DISCARD_RECORD) else: self.trail.append(STOP_EXAMPLE_NO_DISCARD_RECORD) def draw_bits(self, n, forced): self.trail.append(DRAW_BITS_RECORD) class Examples(object): """A lazy collection of ``Example`` objects, derived from the record of recorded behaviour in ``ExampleRecord``. Behaves logically as if it were a list of ``Example`` objects, but actually mostly exists as a compact store of information for them to reference into. All properties on here are best understood as the backing storage for ``Example`` and are described there. """ def __init__(self, record, blocks): self.trail = record.trail self.labels = record.labels self.__length = ( self.trail.count(STOP_EXAMPLE_DISCARD_RECORD) + record.trail.count(STOP_EXAMPLE_NO_DISCARD_RECORD) + record.trail.count(DRAW_BITS_RECORD) ) self.__example_lengths = None self.blocks = blocks self.__children = None @calculated_example_property class starts_and_ends(ExampleProperty): def begin(self): self.starts = IntList.of_length(len(self.examples)) self.ends = IntList.of_length(len(self.examples)) def start_example(self, i, label_index): self.starts[i] = self.bytes_read def stop_example(self, i, label_index): self.ends[i] = self.bytes_read def finish(self): return (self.starts, self.ends) @property def starts(self): return self.starts_and_ends[0] @property def ends(self): return self.starts_and_ends[1] @calculated_example_property class discarded(ExampleProperty): def begin(self): self.result = set() def finish(self): return frozenset(self.result) def stop_example(self, i, discarded): if discarded: self.result.add(i) @calculated_example_property class trivial(ExampleProperty): def begin(self): self.nontrivial = IntList.of_length(len(self.examples)) self.result = set() def block(self, i): if not self.examples.blocks.trivial(i): self.nontrivial[self.example_stack[-1]] = 1 def stop_example(self, i, discarded): if self.nontrivial[i]: if self.example_stack: self.nontrivial[self.example_stack[-1]] = 1 else: self.result.add(i) def finish(self): return frozenset(self.result) @calculated_example_property class parentage(ExampleProperty): def stop_example(self, i, discarded): if i > 0: self.result[i] = self.example_stack[-1] @calculated_example_property class depths(ExampleProperty): def begin(self): self.result = IntList.of_length(len(self.examples)) def start_example(self, i, label_index): self.result[i] = len(self.example_stack) @calculated_example_property class label_indices(ExampleProperty): def start_example(self, i, label_index): self.result[i] = label_index @property def children(self): if self.__children is None: self.__children = [IntList() for _ in hrange(len(self))] for i, p in enumerate(self.parentage): if i > 0: self.__children[p].append(i) # Replace empty children lists with a tuple to reduce # memory usage. for i, c in enumerate(self.__children): if not c: self.__children[i] = () return self.__children def __len__(self): return self.__length def __getitem__(self, i): assert isinstance(i, int) n = len(self) if i < -n or i >= n: raise IndexError("Index %d out of range [-%d, %d)" % (i, n, n)) if i < 0: i += n return Example(self, i) @attr.s(slots=True, frozen=True) class Block(object): """Blocks track the flat list of lowest-level draws from the byte stream, within a single test run. Block-tracking allows the shrinker to try "low-level" transformations, such as minimizing the numeric value of an individual call to ``draw_bits``. """ start = attr.ib() end = attr.ib() # Index of this block inside the overall list of blocks. index = attr.ib() # True if this block's byte values were forced by a write operation. # As long as the bytes before this block remain the same, modifying this # block's bytes will have no effect. forced = attr.ib(repr=False) # True if this block's byte values are all 0. Reading this flag can be # more convenient than explicitly checking a slice for non-zero bytes. all_zero = attr.ib(repr=False) @property def bounds(self): return (self.start, self.end) @property def length(self): return self.end - self.start @property def trivial(self): return self.forced or self.all_zero class Blocks(object): """A lazily calculated list of blocks for a particular ``ConjectureResult`` or ``ConjectureData`` object. Pretends to be a list containing ``Block`` objects but actually only contains their endpoints right up until the point where you want to access the actual block, at which point it is constructed. This is designed to be as space efficient as possible, so will at various points silently transform its representation into one that is better suited for the current access pattern. In addition, it has a number of convenience methods for accessing properties of the block object at index ``i`` that should generally be preferred to using the Block objects directly, as it will not have to allocate the actual object.""" __slots__ = ("endpoints", "owner", "__blocks", "__count", "__sparse") def __init__(self, owner): self.owner = owner self.endpoints = IntList() self.__blocks = {} self.__count = 0 self.__sparse = True def add_endpoint(self, n): """Add n to the list of endpoints.""" assert isinstance(self.owner, ConjectureData) self.endpoints.append(n) def transfer_ownership(self, new_owner): """Used to move ``Blocks`` over to a ``ConjectureResult`` object when that is read to be used and we no longer want to keep the whole ``ConjectureData`` around.""" assert isinstance(new_owner, ConjectureResult) self.owner = new_owner self.__check_completion() def start(self, i): """Equivalent to self[i].start.""" i = self._check_index(i) if i == 0: return 0 else: return self.end(i - 1) def end(self, i): """Equivalent to self[i].end.""" return self.endpoints[i] def bounds(self, i): """Equivalent to self[i].bounds.""" return (self.start(i), self.end(i)) def all_bounds(self): """Equivalent to [(b.start, b.end) for b in self].""" prev = 0 for e in self.endpoints: yield (prev, e) prev = e @property def last_block_length(self): return self.end(-1) - self.start(-1) def __len__(self): return len(self.endpoints) def __known_block(self, i): try: return self.__blocks[i] except (KeyError, IndexError): return None def trivial(self, i): """Equivalent to self.blocks[i].trivial.""" if self.owner is not None: return self.start(i) in self.owner.forced_indices or not any( self.owner.buffer[self.start(i) : self.end(i)] ) else: return self[i].trivial def _check_index(self, i): n = len(self) if i < -n or i >= n: raise IndexError("Index %d out of range [-%d, %d)" % (i, n, n)) if i < 0: i += n return i def __getitem__(self, i): i = self._check_index(i) assert i >= 0 result = self.__known_block(i) if result is not None: return result # We store the blocks as a sparse dict mapping indices to the # actual result, but this isn't the best representation once we # stop being sparse and want to use most of the blocks. Switch # over to a list at that point. if self.__sparse and len(self.__blocks) * 2 >= len(self): new_blocks = [None] * len(self) for k, v in self.__blocks.items(): new_blocks[k] = v self.__sparse = False self.__blocks = new_blocks assert self.__blocks[i] is None start = self.start(i) end = self.end(i) # We keep track of the number of blocks that have actually been # instantiated so that when every block that could be instantiated # has been we know that the list is complete and can throw away # some data that we no longer need. self.__count += 1 # Integrity check: We can't have allocated more blocks than we have # positions for blocks. assert self.__count <= len(self) result = Block( start=start, end=end, index=i, forced=start in self.owner.forced_indices, all_zero=not any(self.owner.buffer[start:end]), ) try: self.__blocks[i] = result except IndexError: assert isinstance(self.__blocks, list) assert len(self.__blocks) < len(self) self.__blocks.extend([None] * (len(self) - len(self.__blocks))) self.__blocks[i] = result self.__check_completion() return result def __check_completion(self): """The list of blocks is complete if we have created every ``Block`` object that we currently good and know that no more will be created. If this happens then we don't need to keep the reference to the owner around, and delete it so that there is no circular reference. The main benefit of this is that the gc doesn't need to run to collect this because normal reference counting is enough. """ if self.__count == len(self) and isinstance(self.owner, ConjectureResult): self.owner = None def __iter__(self): for i in hrange(len(self)): yield self[i] def __repr__(self): parts = [] for i in hrange(len(self)): b = self.__known_block(i) if b is None: parts.append("...") else: parts.append(repr(b)) return "Block([%s])" % (", ".join(parts),) class _Overrun(object): status = Status.OVERRUN def __repr__(self): return "Overrun" Overrun = _Overrun() global_test_counter = 0 MAX_DEPTH = 100 class DataObserver(object): """Observer class for recording the behaviour of a ConjectureData object, primarily used for tracking the behaviour in the tree cache.""" def conclude_test(self, status, interesting_origin): """Called when ``conclude_test`` is called on the observed ``ConjectureData``, with the same arguments. Note that this is called after ``freeze`` has completed. """ def draw_bits(self, n_bits, forced, value): """Called when ``draw_bits`` is called on on the observed ``ConjectureData``. * ``n_bits`` is the number of bits drawn. * ``forced`` is True if the corresponding draw was forced or ``False`` otherwise. * ``value`` is the result that ``draw_bits`` returned. """ @attr.s(slots=True) class ConjectureResult(object): """Result class storing the parts of ConjectureData that we will care about after the original ConjectureData has outlived its usefulness.""" status = attr.ib() interesting_origin = attr.ib() buffer = attr.ib() blocks = attr.ib() output = attr.ib() extra_information = attr.ib() has_discards = attr.ib() forced_indices = attr.ib(repr=False) examples = attr.ib(repr=False) index = attr.ib(init=False) def __attrs_post_init__(self): self.index = len(self.buffer) self.forced_indices = frozenset(self.forced_indices) # Masks for masking off the first byte of an n-bit buffer. # The appropriate mask is stored at position n % 8. BYTE_MASKS = [(1 << n) - 1 for n in hrange(8)] BYTE_MASKS[0] = 255 class ConjectureData(object): @classmethod def for_buffer(self, buffer, observer=None): buffer = hbytes(buffer) return ConjectureData( max_length=len(buffer), draw_bytes=lambda data, n: hbytes(buffer[data.index : data.index + n]), observer=observer, ) def __init__(self, max_length, draw_bytes, observer=None): if observer is None: observer = DataObserver() assert isinstance(observer, DataObserver) self.observer = observer self.max_length = max_length self.is_find = False self._draw_bytes = draw_bytes self.overdraw = 0 self.__block_starts = defaultdict(list) self.__block_starts_calculated_to = 0 self.blocks = Blocks(self) self.buffer = bytearray() self.index = 0 self.output = u"" self.status = Status.VALID self.frozen = False global global_test_counter self.testcounter = global_test_counter global_test_counter += 1 self.start_time = benchmark_time() self.events = set() self.forced_indices = set() self.interesting_origin = None self.draw_times = [] self.max_depth = 0 self.has_discards = False self.consecutive_discard_counts = [] self.__result = None # Normally unpopulated but we need this in the niche case # that self.as_result() is Overrun but we still want the # examples for reporting purposes. self.__examples = None # We want the top level example to have depth 0, so we start # at -1. self.depth = -1 self.__example_record = ExampleRecord() self.extra_information = ExtraInformation() self.start_example(TOP_LABEL) def __repr__(self): return "ConjectureData(%s, %d bytes%s)" % ( self.status.name, len(self.buffer), ", frozen" if self.frozen else "", ) def as_result(self): """Convert the result of running this test into either an Overrun object or a ConjectureResult.""" assert self.frozen if self.status == Status.OVERRUN: return Overrun if self.__result is None: self.__result = ConjectureResult( status=self.status, interesting_origin=self.interesting_origin, buffer=self.buffer, examples=self.examples, blocks=self.blocks, output=self.output, extra_information=self.extra_information if self.extra_information.has_information() else None, has_discards=self.has_discards, forced_indices=self.forced_indices, ) self.blocks.transfer_ownership(self.__result) return self.__result def __assert_not_frozen(self, name): if self.frozen: raise Frozen("Cannot call %s on frozen ConjectureData" % (name,)) def note(self, value): self.__assert_not_frozen("note") if not isinstance(value, text_type): value = unicode_safe_repr(value) self.output += value def draw(self, strategy, label=None): if self.is_find and not strategy.supports_find: raise InvalidArgument( ( "Cannot use strategy %r within a call to find (presumably " "because it would be invalid after the call had ended)." ) % (strategy,) ) if strategy.is_empty: self.mark_invalid() if self.depth >= MAX_DEPTH: self.mark_invalid() return self.__draw(strategy, label=label) def __draw(self, strategy, label): at_top_level = self.depth == 0 if label is None: label = strategy.label self.start_example(label=label) try: if not at_top_level: return strategy.do_draw(self) else: try: strategy.validate() start_time = benchmark_time() try: return strategy.do_draw(self) finally: self.draw_times.append(benchmark_time() - start_time) except BaseException as e: mark_for_escalation(e) raise finally: self.stop_example() def start_example(self, label): self.__assert_not_frozen("start_example") self.depth += 1 # Logically it would make sense for this to just be # ``self.depth = max(self.depth, self.max_depth)``, which is what it used to # be until we ran the code under tracemalloc and found a rather significant # chunk of allocation was happening here. This was presumably due to varargs # or the like, but we didn't investigate further given that it was easy # to fix with this check. if self.depth > self.max_depth: self.max_depth = self.depth self.__example_record.start_example(label) self.consecutive_discard_counts.append(0) def stop_example(self, discard=False): if self.frozen: return self.consecutive_discard_counts.pop() if discard: self.has_discards = True self.depth -= 1 assert self.depth >= -1 self.__example_record.stop_example(discard) if self.consecutive_discard_counts: # We block long sequences of discards. This helps us avoid performance # problems where there is rejection sampling. In particular tests which # have a very small actual state space but use rejection sampling will # play badly with generate_novel_prefix() in DataTree, and will end up # generating very long tests with long runs of the rejection sample. if discard: self.consecutive_discard_counts[-1] += 1 # 20 is a fairly arbitrary limit chosen mostly so that all of the # existing tests passed under it. Essentially no reasonable # generation should hit this limit when running in purely random # mode, but unreasonable generation is fairly widespread, and our # manipulation of the bitstream can make it more likely. if self.consecutive_discard_counts[-1] > 20: self.mark_invalid() else: self.consecutive_discard_counts[-1] = 0 def note_event(self, event): self.events.add(event) @property def examples(self): assert self.frozen if self.__examples is None: self.__examples = Examples(record=self.__example_record, blocks=self.blocks) return self.__examples def freeze(self): if self.frozen: assert isinstance(self.buffer, hbytes) return self.finish_time = benchmark_time() assert len(self.buffer) == self.index # Always finish by closing all remaining examples so that we have a # valid tree. while self.depth >= 0: self.stop_example() self.__example_record.freeze() self.frozen = True self.buffer = hbytes(self.buffer) self.events = frozenset(self.events) del self._draw_bytes self.observer.conclude_test(self.status, self.interesting_origin) def draw_bits(self, n, forced=None): """Return an ``n``-bit integer from the underlying source of bytes. If ``forced`` is set to an integer will instead ignore the underlying source and simulate a draw as if it had returned that integer.""" self.__assert_not_frozen("draw_bits") if n == 0: return 0 assert n > 0 n_bytes = bits_to_bytes(n) self.__check_capacity(n_bytes) if forced is not None: buf = bytearray(int_to_bytes(forced, n_bytes)) else: buf = bytearray(self._draw_bytes(self, n_bytes)) assert len(buf) == n_bytes # If we have a number of bits that is not a multiple of 8 # we have to mask off the high bits. buf[0] &= BYTE_MASKS[n % 8] buf = hbytes(buf) result = int_from_bytes(buf) self.observer.draw_bits(n, forced is not None, result) self.__example_record.draw_bits(n, forced) initial = self.index self.buffer.extend(buf) self.index = len(self.buffer) if forced is not None: self.forced_indices.update(hrange(initial, self.index)) self.blocks.add_endpoint(self.index) assert bit_length(result) <= n return result @property def block_starts(self): while self.__block_starts_calculated_to < len(self.blocks): i = self.__block_starts_calculated_to self.__block_starts_calculated_to += 1 u, v = self.blocks.bounds(i) self.__block_starts[v - u].append(u) return self.__block_starts def draw_bytes(self, n): """Draw n bytes from the underlying source.""" return int_to_bytes(self.draw_bits(8 * n), n) def write(self, string): """Write ``string`` to the output buffer.""" self.__assert_not_frozen("write") string = hbytes(string) if not string: return self.draw_bits(len(string) * 8, forced=int_from_bytes(string)) return self.buffer[-len(string) :] def __check_capacity(self, n): if self.index + n > self.max_length: self.mark_overrun() def conclude_test(self, status, interesting_origin=None): assert (interesting_origin is None) or (status == Status.INTERESTING) self.__assert_not_frozen("conclude_test") self.interesting_origin = interesting_origin self.status = status self.freeze() raise StopTest(self.testcounter) def mark_interesting(self, interesting_origin=None): self.conclude_test(Status.INTERESTING, interesting_origin) def mark_invalid(self): self.conclude_test(Status.INVALID) def mark_overrun(self): self.conclude_test(Status.OVERRUN) def bits_to_bytes(n): """The number of bytes required to represent an n-bit number. Equivalent to (n + 7) // 8, but slightly faster. This really is called enough times that that matters.""" return (n + 7) >> 3 hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture/datatree.py000066400000000000000000000347371354103617500343700ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import attr from hypothesis.errors import Flaky, HypothesisException from hypothesis.internal.compat import hbytes, int_to_bytes from hypothesis.internal.conjecture.data import ( ConjectureData, DataObserver, Status, StopTest, bits_to_bytes, ) from hypothesis.internal.conjecture.junkdrawer import IntList class PreviouslyUnseenBehaviour(HypothesisException): pass def inconsistent_generation(): raise Flaky( "Inconsistent data generation! Data generation behaved differently " "between different runs. Is your data generation depending on external " "state?" ) EMPTY = frozenset() @attr.s(slots=True) class Branch(object): bit_length = attr.ib() children = attr.ib() @property def max_children(self): return 1 << self.bit_length @attr.s(slots=True, frozen=True) class Conclusion(object): status = attr.ib() interesting_origin = attr.ib() CONCLUSIONS = {} def conclusion(status, interesting_origin): result = Conclusion(status, interesting_origin) return CONCLUSIONS.setdefault(result, result) @attr.s(slots=True) class TreeNode(object): """Node in a tree that corresponds to previous interactions with a ``ConjectureData`` object according to some fixed test function. This is functionally a variant patricia trie. See https://en.wikipedia.org/wiki/Radix_tree for the general idea, but what this means in particular here is that we have a very deep but very lightly branching tree and rather than store this as a fully recursive structure we flatten prefixes and long branches into lists. This significantly compacts the storage requirements. A single ``TreeNode`` corresponds to a previously seen sequence of calls to ``ConjectureData`` which we have never seen branch, followed by a ``transition`` which describes what happens next. """ # Records the previous sequence of calls to ``data.draw_bits``, # with the ``n_bits`` argument going in ``bit_lengths`` and the # values seen in ``values``. These should always have the same # length. bit_lengths = attr.ib(default=attr.Factory(IntList)) values = attr.ib(default=attr.Factory(IntList)) # The indices of of the calls to ``draw_bits`` that we have stored # where ``forced`` is not None. Stored as None if no indices # have been forced, purely for space saving reasons (we force # quite rarely). __forced = attr.ib(default=None, init=False) # What happens next after observing this sequence of calls. # Either: # # * ``None``, indicating we don't know yet. # * A ``Branch`` object indicating that there is a ``draw_bits`` # call that we have seen take multiple outcomes there. # * A ``Conclusion`` object indicating that ``conclude_test`` # was called here. transition = attr.ib(default=None) # A tree node is exhausted if every possible sequence of # draws below it has been explored. We store this information # on a field and update it when performing operations that # could change the answer. # # A node may start exhausted, e.g. because it it leads # immediately to a conclusion, but can only go from # non-exhausted to exhausted when one of its children # becomes exhausted or it is marked as a conclusion. # # Therefore we only need to check whether we need to update # this field when the node is first created in ``split_at`` # or when we have walked a path through this node to a # conclusion in ``TreeRecordingObserver``. is_exhausted = attr.ib(default=False, init=False) @property def forced(self): if not self.__forced: return EMPTY return self.__forced def mark_forced(self, i): """Note that the value at index ``i`` was forced.""" assert 0 <= i < len(self.values) if self.__forced is None: self.__forced = set() self.__forced.add(i) def split_at(self, i): """Splits the tree so that it can incorporate a decision at the ``draw_bits`` call corresponding to position ``i``, or raises ``Flaky`` if that was meant to be a forced node.""" if i in self.forced: inconsistent_generation() assert not self.is_exhausted key = self.values[i] child = TreeNode( bit_lengths=self.bit_lengths[i + 1 :], values=self.values[i + 1 :], transition=self.transition, ) self.transition = Branch(bit_length=self.bit_lengths[i], children={key: child}) if self.__forced is not None: child.__forced = {j - i - 1 for j in self.__forced if j > i} self.__forced = {j for j in self.__forced if j < i} child.check_exhausted() del self.values[i:] del self.bit_lengths[i:] assert len(self.values) == len(self.bit_lengths) == i def check_exhausted(self): """Recalculates ``self.is_exhausted`` if necessary then returns it.""" if ( not self.is_exhausted and len(self.forced) == len(self.values) and self.transition is not None ): if isinstance(self.transition, Conclusion): self.is_exhausted = True elif len(self.transition.children) == self.transition.max_children: self.is_exhausted = all( v.is_exhausted for v in self.transition.children.values() ) return self.is_exhausted class DataTree(object): """Tracks the tree structure of a collection of ConjectureData objects, for use in ConjectureRunner.""" def __init__(self): self.root = TreeNode() @property def is_exhausted(self): """Returns True if every possible node is dead and thus the language described must have been fully explored.""" return self.root.is_exhausted def generate_novel_prefix(self, random): """Generate a short random string that (after rewriting) is not a prefix of any buffer previously added to the tree. The resulting prefix is essentially arbitrary - it would be nice for it to be uniform at random, but previous attempts to do that have proven too expensive. """ assert not self.is_exhausted novel_prefix = bytearray() def append_int(n_bits, value): novel_prefix.extend(int_to_bytes(value, bits_to_bytes(n_bits))) current_node = self.root while True: assert not current_node.is_exhausted for i, (n_bits, value) in enumerate( zip(current_node.bit_lengths, current_node.values) ): if i in current_node.forced: append_int(n_bits, value) else: while True: k = random.getrandbits(n_bits) if k != value: append_int(n_bits, k) break # We've now found a value that is allowed to # vary, so what follows is not fixed. return hbytes(novel_prefix) else: assert not isinstance(current_node.transition, Conclusion) if current_node.transition is None: return hbytes(novel_prefix) branch = current_node.transition assert isinstance(branch, Branch) n_bits = branch.bit_length while True: k = random.getrandbits(n_bits) try: child = branch.children[k] except KeyError: append_int(n_bits, k) return hbytes(novel_prefix) if not child.is_exhausted: append_int(n_bits, k) current_node = child break def rewrite(self, buffer): """Use previously seen ConjectureData objects to return a tuple of the rewritten buffer and the status we would get from running that buffer with the test function. If the status cannot be predicted from the existing values it will be None.""" buffer = hbytes(buffer) data = ConjectureData.for_buffer(buffer) try: self.simulate_test_function(data) return (data.buffer, data.status) except PreviouslyUnseenBehaviour: return (buffer, None) def simulate_test_function(self, data): """Run a simulated version of the test function recorded by this tree. Note that this does not currently call ``stop_example`` or ``start_example`` as these are not currently recorded in the tree. This will likely change in future.""" node = self.root try: while True: for i, (n_bits, previous) in enumerate( zip(node.bit_lengths, node.values) ): v = data.draw_bits( n_bits, forced=node.values[i] if i in node.forced else None ) if v != previous: raise PreviouslyUnseenBehaviour() if isinstance(node.transition, Conclusion): t = node.transition data.conclude_test(t.status, t.interesting_origin) elif node.transition is None: raise PreviouslyUnseenBehaviour() else: v = data.draw_bits(node.transition.bit_length) try: node = node.transition.children[v] except KeyError: raise PreviouslyUnseenBehaviour() except StopTest: pass def new_observer(self): return TreeRecordingObserver(self) class TreeRecordingObserver(DataObserver): def __init__(self, tree): self.__current_node = tree.root self.__index_in_current_node = 0 self.__trail = [self.__current_node] def draw_bits(self, n_bits, forced, value): i = self.__index_in_current_node self.__index_in_current_node += 1 node = self.__current_node assert len(node.bit_lengths) == len(node.values) if i < len(node.bit_lengths): if n_bits != node.bit_lengths[i]: inconsistent_generation() # Note that we don't check whether a previously # forced value is now free. That will be caught # if we ever split the node there, but otherwise # may pass silently. This is acceptable because it # means we skip a hash set lookup on every # draw and that's a pretty niche failure mode. if forced and i not in node.forced: inconsistent_generation() if value != node.values[i]: node.split_at(i) assert i == len(node.values) new_node = TreeNode() branch = node.transition branch.children[value] = new_node self.__current_node = new_node self.__index_in_current_node = 0 else: trans = node.transition if trans is None: node.bit_lengths.append(n_bits) node.values.append(value) if forced: node.mark_forced(i) elif isinstance(trans, Conclusion): assert trans.status != Status.OVERRUN # We tried to draw where history says we should have # stopped inconsistent_generation() else: assert isinstance(trans, Branch) if n_bits != trans.bit_length: inconsistent_generation() try: self.__current_node = trans.children[value] except KeyError: self.__current_node = trans.children.setdefault(value, TreeNode()) self.__index_in_current_node = 0 if self.__trail[-1] is not self.__current_node: self.__trail.append(self.__current_node) def conclude_test(self, status, interesting_origin): """Says that ``status`` occurred at node ``node``. This updates the node if necessary and checks for consistency.""" if status == Status.OVERRUN: return i = self.__index_in_current_node node = self.__current_node if i < len(node.values) or isinstance(node.transition, Branch): inconsistent_generation() new_transition = conclusion(status, interesting_origin) if node.transition is not None and node.transition != new_transition: # As an, I'm afraid, horrible bodge, we deliberately ignore flakiness # where tests go from interesting to valid, because it's much easier # to produce good error messages for these further up the stack. if ( node.transition.status != Status.INTERESTING or new_transition.status != Status.VALID ): raise Flaky( "Inconsistent test results! Test case was %r on first run but %r on second" % (node.transition, new_transition) ) else: node.transition = new_transition assert node is self.__trail[-1] node.check_exhausted() assert len(node.values) > 0 or node.check_exhausted() for t in reversed(self.__trail): # Any node we've traversed might have now become exhausted. # We check from the right. As soon as we hit a node that # isn't exhausted, this automatically implies that all of # its parents are not exhausted, so we stop. if not t.check_exhausted(): break hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture/engine.py000066400000000000000000001126071354103617500340350ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from enum import Enum from random import Random, getrandbits from weakref import WeakKeyDictionary import attr from hypothesis import HealthCheck, Phase, Verbosity, settings as Settings from hypothesis._settings import local_settings from hypothesis.internal.cache import LRUReusedCache from hypothesis.internal.compat import ( Counter, ceil, hbytes, hrange, int_from_bytes, to_bytes_sequence, ) from hypothesis.internal.conjecture.data import ( MAX_DEPTH, ConjectureData, ConjectureResult, Overrun, Status, StopTest, ) from hypothesis.internal.conjecture.datatree import DataTree, TreeRecordingObserver from hypothesis.internal.conjecture.junkdrawer import pop_random, uniform from hypothesis.internal.conjecture.shrinker import Shrinker, sort_key from hypothesis.internal.healthcheck import fail_health_check from hypothesis.reporting import base_report # Tell pytest to omit the body of this module from tracebacks # https://docs.pytest.org/en/latest/example/simple.html#writing-well-integrated-assertion-helpers __tracebackhide__ = True MAX_SHRINKS = 500 CACHE_SIZE = 10000 MUTATION_POOL_SIZE = 100 MIN_TEST_CALLS = 10 BUFFER_SIZE = 8 * 1024 @attr.s class HealthCheckState(object): valid_examples = attr.ib(default=0) invalid_examples = attr.ib(default=0) overrun_examples = attr.ib(default=0) draw_times = attr.ib(default=attr.Factory(list)) class ExitReason(Enum): max_examples = 0 max_iterations = 1 max_shrinks = 3 finished = 4 flaky = 5 class RunIsComplete(Exception): pass class ConjectureRunner(object): def __init__(self, test_function, settings=None, random=None, database_key=None): self._test_function = test_function self.settings = settings or Settings() self.shrinks = 0 self.call_count = 0 self.event_call_counts = Counter() self.valid_examples = 0 self.random = random or Random(getrandbits(128)) self.database_key = database_key self.status_runtimes = {} self.all_drawtimes = [] self.all_runtimes = [] self.events_to_strings = WeakKeyDictionary() self.target_selector = TargetSelector(self.random) self.interesting_examples = {} # We use call_count because there may be few possible valid_examples. self.first_bug_found_at = None self.last_bug_found_at = None self.shrunk_examples = set() self.health_check_state = None self.used_examples_from_database = False self.tree = DataTree() # We want to be able to get the ConjectureData object that results # from running a buffer without recalculating, especially during # shrinking where we need to know about the structure of the # executed test case. self.__data_cache = LRUReusedCache(CACHE_SIZE) def __tree_is_exhausted(self): return self.tree.is_exhausted def __stoppable_test_function(self, data): """Run ``self._test_function``, but convert a ``StopTest`` exception into a normal return. """ try: self._test_function(data) except StopTest as e: if e.testcounter == data.testcounter: # This StopTest has successfully stopped its test, and can now # be discarded. pass else: # This StopTest was raised by a different ConjectureData. We # need to re-raise it so that it will eventually reach the # correct engine. raise def test_function(self, data): assert isinstance(data.observer, TreeRecordingObserver) self.call_count += 1 try: self.__stoppable_test_function(data) except BaseException: self.save_buffer(data.buffer) raise finally: data.freeze() self.note_details(data) self.target_selector.add(data) self.debug_data(data) if data.status == Status.VALID: self.valid_examples += 1 # Record the test result in the tree, to avoid unnecessary work in # the future. # The tree has two main uses: # 1. It is mildly useful in some cases during generation where there is # a high probability of duplication but it is possible to generate # many examples. e.g. if we had input of the form none() | text() # then we would generate duplicates 50% of the time, and would # like to avoid that and spend more time exploring the text() half # of the search space. The tree allows us to predict in advance if # the test would lead to a duplicate and avoid that. # 2. When shrinking it is *extremely* useful to be able to anticipate # duplication, because we try many similar and smaller test cases, # and these will tend to have a very high duplication rate. This is # where the tree usage really shines. # # In aid of this, we keep around just enough of the structure of the # the tree of examples we've seen so far to let us predict whether # something will lead to a known result, and to canonicalize it into # the buffer that would belong to the ConjectureData that you get # from running it. if data.status == Status.INTERESTING: key = data.interesting_origin changed = False try: existing = self.interesting_examples[key] except KeyError: changed = True self.last_bug_found_at = self.call_count if self.first_bug_found_at is None: self.first_bug_found_at = self.call_count else: if sort_key(data.buffer) < sort_key(existing.buffer): self.shrinks += 1 self.downgrade_buffer(existing.buffer) self.__data_cache.unpin(existing.buffer) changed = True if changed: self.save_buffer(data.buffer) self.interesting_examples[key] = data.as_result() self.__data_cache.pin(data.buffer) self.shrunk_examples.discard(key) if self.shrinks >= MAX_SHRINKS: self.exit_with(ExitReason.max_shrinks) if not self.interesting_examples: # Note that this logic is reproduced to end the generation phase when # we have interesting examples. Update that too if you change this! # (The doubled implementation is because here we exit the engine entirely, # while in the other case below we just want to move on to shrinking.) if self.valid_examples >= self.settings.max_examples: self.exit_with(ExitReason.max_examples) if self.call_count >= max( self.settings.max_examples * 10, # We have a high-ish default max iterations, so that tests # don't become flaky when max_examples is too low. 1000, ): self.exit_with(ExitReason.max_iterations) if self.__tree_is_exhausted(): self.exit_with(ExitReason.finished) self.record_for_health_check(data) def generate_novel_prefix(self): """Uses the tree to proactively generate a starting sequence of bytes that we haven't explored yet for this test. When this method is called, we assume that there must be at least one novel prefix left to find. If there were not, then the test run should have already stopped due to tree exhaustion. """ return self.tree.generate_novel_prefix(self.random) @property def cap(self): return BUFFER_SIZE // 2 def record_for_health_check(self, data): # Once we've actually found a bug, there's no point in trying to run # health checks - they'll just mask the actually important information. if data.status == Status.INTERESTING: self.health_check_state = None state = self.health_check_state if state is None: return state.draw_times.extend(data.draw_times) if data.status == Status.VALID: state.valid_examples += 1 elif data.status == Status.INVALID: state.invalid_examples += 1 else: assert data.status == Status.OVERRUN state.overrun_examples += 1 max_valid_draws = 10 max_invalid_draws = 50 max_overrun_draws = 20 assert state.valid_examples <= max_valid_draws if state.valid_examples == max_valid_draws: self.health_check_state = None return if state.overrun_examples == max_overrun_draws: fail_health_check( self.settings, ( "Examples routinely exceeded the max allowable size. " "(%d examples overran while generating %d valid ones)" ". Generating examples this large will usually lead to" " bad results. You could try setting max_size parameters " "on your collections and turning " "max_leaves down on recursive() calls." ) % (state.overrun_examples, state.valid_examples), HealthCheck.data_too_large, ) if state.invalid_examples == max_invalid_draws: fail_health_check( self.settings, ( "It looks like your strategy is filtering out a lot " "of data. Health check found %d filtered examples but " "only %d good ones. This will make your tests much " "slower, and also will probably distort the data " "generation quite a lot. You should adapt your " "strategy to filter less. This can also be caused by " "a low max_leaves parameter in recursive() calls" ) % (state.invalid_examples, state.valid_examples), HealthCheck.filter_too_much, ) draw_time = sum(state.draw_times) if draw_time > 1.0: fail_health_check( self.settings, ( "Data generation is extremely slow: Only produced " "%d valid examples in %.2f seconds (%d invalid ones " "and %d exceeded maximum size). Try decreasing " "size of the data you're generating (with e.g." "max_size or max_leaves parameters)." ) % ( state.valid_examples, draw_time, state.invalid_examples, state.overrun_examples, ), HealthCheck.too_slow, ) def save_buffer(self, buffer): if self.settings.database is not None: key = self.database_key if key is None: return self.settings.database.save(key, hbytes(buffer)) def downgrade_buffer(self, buffer): if self.settings.database is not None and self.database_key is not None: self.settings.database.move(self.database_key, self.secondary_key, buffer) @property def secondary_key(self): return b".".join((self.database_key, b"secondary")) @property def covering_key(self): return b".".join((self.database_key, b"coverage")) def note_details(self, data): self.__data_cache[data.buffer] = data.as_result() runtime = max(data.finish_time - data.start_time, 0.0) self.all_runtimes.append(runtime) self.all_drawtimes.extend(data.draw_times) self.status_runtimes.setdefault(data.status, []).append(runtime) for event in set(map(self.event_to_string, data.events)): self.event_call_counts[event] += 1 def debug(self, message): if self.settings.verbosity >= Verbosity.debug: base_report(message) @property def report_debug_info(self): return self.settings.verbosity >= Verbosity.debug def debug_data(self, data): if not self.report_debug_info: return stack = [[]] def go(ex): if ex.length == 0: return if len(ex.children) == 0: stack[-1].append(int_from_bytes(data.buffer[ex.start : ex.end])) else: node = [] stack.append(node) for v in ex.children: go(v) stack.pop() if len(node) == 1: stack[-1].extend(node) else: stack[-1].append(node) go(data.examples[0]) assert len(stack) == 1 status = repr(data.status) if data.status == Status.INTERESTING: status = "%s (%r)" % (status, data.interesting_origin) self.debug( "%d bytes %r -> %s, %s" % (data.index, stack[0], status, data.output) ) def run(self): with local_settings(self.settings): try: self._run() except RunIsComplete: pass for v in self.interesting_examples.values(): self.debug_data(v) self.debug( u"Run complete after %d examples (%d valid) and %d shrinks" % (self.call_count, self.valid_examples, self.shrinks) ) def _new_mutator(self): target_data = [None] def draw_new(data, n): return uniform(self.random, n) def draw_existing(data, n): return target_data[0].buffer[data.index : data.index + n] def draw_smaller(data, n): existing = target_data[0].buffer[data.index : data.index + n] r = uniform(self.random, n) if r <= existing: return r return _draw_predecessor(self.random, existing) def draw_larger(data, n): existing = target_data[0].buffer[data.index : data.index + n] r = uniform(self.random, n) if r >= existing: return r return _draw_successor(self.random, existing) def reuse_existing(data, n): choices = data.block_starts.get(n, []) if choices: i = self.random.choice(choices) assert i + n <= len(data.buffer) return hbytes(data.buffer[i : i + n]) else: result = uniform(self.random, n) assert isinstance(result, hbytes) return result def flip_bit(data, n): buf = bytearray(target_data[0].buffer[data.index : data.index + n]) i = self.random.randint(0, n - 1) k = self.random.randint(0, 7) buf[i] ^= 1 << k return hbytes(buf) def draw_zero(data, n): return hbytes(b"\0" * n) def draw_max(data, n): return hbytes([255]) * n def draw_constant(data, n): return hbytes([self.random.randint(0, 255)]) * n def redraw_last(data, n): u = target_data[0].blocks[-1].start if data.index + n <= u: return target_data[0].buffer[data.index : data.index + n] else: return uniform(self.random, n) options = [ draw_new, redraw_last, redraw_last, reuse_existing, reuse_existing, draw_existing, draw_smaller, draw_larger, flip_bit, draw_zero, draw_max, draw_zero, draw_max, draw_constant, ] bits = [self.random.choice(options) for _ in hrange(3)] prefix = [None] def mutate_from(origin): target_data[0] = origin prefix[0] = self.generate_novel_prefix() return draw_mutated def draw_mutated(data, n): if data.index + n > len(target_data[0].buffer): result = uniform(self.random, n) else: draw = self.random.choice(bits) result = draw(data, n) p = prefix[0] if data.index < len(p): start = p[data.index : data.index + n] result = start + result[len(start) :] assert len(result) == n return self.__zero_bound(data, result) return mutate_from def __zero_bound(self, data, result): """This tries to get the size of the generated data under control by replacing the result with zero if we are too deep or have already generated too much data. This causes us to enter "shrinking mode" there and thus reduce the size of the generated data. """ initial = len(result) if data.depth * 2 >= MAX_DEPTH or data.index >= self.cap: data.forced_indices.update(hrange(data.index, data.index + initial)) data.hit_zero_bound = True result = hbytes(initial) elif data.index + initial >= self.cap: data.hit_zero_bound = True n = self.cap - data.index data.forced_indices.update(hrange(self.cap, data.index + initial)) result = result[:n] + hbytes(initial - n) assert len(result) == initial return result @property def database(self): if self.database_key is None: return None return self.settings.database def has_existing_examples(self): return self.database is not None and Phase.reuse in self.settings.phases def reuse_existing_examples(self): """If appropriate (we have a database and have been told to use it), try to reload existing examples from the database. If there are a lot we don't try all of them. We always try the smallest example in the database (which is guaranteed to be the last failure) and the largest (which is usually the seed example which the last failure came from but we don't enforce that). We then take a random sampling of the remainder and try those. Any examples that are no longer interesting are cleared out. """ if self.has_existing_examples(): self.debug("Reusing examples from database") # We have to do some careful juggling here. We have two database # corpora: The primary and secondary. The primary corpus is a # small set of minimized examples each of which has at one point # demonstrated a distinct bug. We want to retry all of these. # We also have a secondary corpus of examples that have at some # point demonstrated interestingness (currently only ones that # were previously non-minimal examples of a bug, but this will # likely expand in future). These are a good source of potentially # interesting examples, but there are a lot of them, so we down # sample the secondary corpus to a more manageable size. corpus = sorted( self.settings.database.fetch(self.database_key), key=sort_key ) desired_size = max(2, ceil(0.1 * self.settings.max_examples)) for extra_key in [self.secondary_key, self.covering_key]: if len(corpus) < desired_size: extra_corpus = list(self.settings.database.fetch(extra_key)) shortfall = desired_size - len(corpus) if len(extra_corpus) <= shortfall: extra = extra_corpus else: extra = self.random.sample(extra_corpus, shortfall) extra.sort(key=sort_key) corpus.extend(extra) self.used_examples_from_database = len(corpus) > 0 for existing in corpus: last_data = ConjectureData.for_buffer( existing, observer=self.tree.new_observer() ) try: self.test_function(last_data) finally: if last_data.status != Status.INTERESTING: self.settings.database.delete(self.database_key, existing) self.settings.database.delete(self.secondary_key, existing) def exit_with(self, reason): self.exit_reason = reason raise RunIsComplete() def generate_new_examples(self): if Phase.generate not in self.settings.phases: return if self.interesting_examples: # The example database has failing examples from a previous run, # so we'd rather report that they're still failing ASAP than take # the time to look for additional failures. return zero_data = self.cached_test_function(hbytes(BUFFER_SIZE)) if zero_data.status > Status.OVERRUN: self.__data_cache.pin(zero_data.buffer) if zero_data.status == Status.OVERRUN or ( zero_data.status == Status.VALID and len(zero_data.buffer) * 2 > BUFFER_SIZE ): fail_health_check( self.settings, "The smallest natural example for your test is extremely " "large. This makes it difficult for Hypothesis to generate " "good examples, especially when trying to reduce failing ones " "at the end. Consider reducing the size of your data if it is " "of a fixed size. You could also fix this by improving how " "your data shrinks (see https://hypothesis.readthedocs.io/en/" "latest/data.html#shrinking for details), or by introducing " "default values inside your strategy. e.g. could you replace " "some arguments with their defaults by using " "one_of(none(), some_complex_strategy)?", HealthCheck.large_base_example, ) if zero_data is not Overrun: # If the language starts with writes of length >= cap then there is # only one string in it: Everything after cap is forced to be zero (or # to be whatever value is written there). That means that once we've # tried the zero value, there's nothing left for us to do, so we # exit early here. has_non_forced = False # It's impossible to fall out of this loop normally because if we # did then that would mean that all blocks are writes, so we would # already have triggered the exhaustedness check on the tree and # finished running. for b in zero_data.blocks: # pragma: no branch if b.start >= self.cap: break if not b.forced: has_non_forced = True break if not has_non_forced: self.exit_with(ExitReason.finished) self.health_check_state = HealthCheckState() def should_generate_more(): # If we haven't found a bug, keep looking. We check this before # doing anything else as it's by far the most common case. if not self.interesting_examples: return True # If we've found a bug and won't report more than one, stop looking. elif not self.settings.report_multiple_bugs: return False assert self.first_bug_found_at <= self.last_bug_found_at <= self.call_count # End the generation phase where we would have ended it if no bugs had # been found. This reproduces the exit logic in `self.test_function`, # but with the important distinction that this clause will move on to # the shrinking phase having found one or more bugs, while the other # will exit having found zero bugs. if ( self.valid_examples >= self.settings.max_examples or self.call_count >= max(self.settings.max_examples * 10, 1000) ): # pragma: no cover return False # Otherwise, keep searching for between ten and 'a heuristic' calls. # We cap 'calls after first bug' so errors are reported reasonably # soon even for tests that are allowed to run for a very long time, # or sooner if the latest half of our test effort has been fruitless. return self.call_count < MIN_TEST_CALLS or self.call_count < min( self.first_bug_found_at + 1000, self.last_bug_found_at * 2 ) count = 0 while should_generate_more() and ( count < 10 or self.health_check_state is not None # If we have not found a valid prefix yet, the target selector will # be empty and the mutation stage will fail with a very rare internal # error. We therefore continue this initial random generation step # until we have found at least one prefix to mutate. or len(self.target_selector) == 0 ): prefix = self.generate_novel_prefix() def draw_bytes(data, n): if data.index < len(prefix): result = prefix[data.index : data.index + n] # We always draw prefixes as a whole number of blocks assert len(result) == n else: result = uniform(self.random, n) return self.__zero_bound(data, result) last_data = self.new_conjecture_data(draw_bytes) self.test_function(last_data) last_data.freeze() count += 1 mutations = 0 mutator = self._new_mutator() zero_bound_queue = [] while should_generate_more(): if zero_bound_queue: # Whenever we generated an example and it hits a bound # which forces zero blocks into it, this creates a weird # distortion effect by making certain parts of the data # stream (especially ones to the right) much more likely # to be zero. We fix this by redistributing the generated # data by shuffling it randomly. This results in the # zero data being spread evenly throughout the buffer. # Hopefully the shrinking this causes will cause us to # naturally fail to hit the bound. # If it doesn't then we will queue the new version up again # (now with more zeros) and try again. overdrawn = zero_bound_queue.pop() buffer = bytearray(overdrawn.buffer) # These will have values written to them that are different # from what's in them anyway, so the value there doesn't # really "count" for distributional purposes, and if we # leave them in then they can cause the fraction of non # zero bytes to increase on redraw instead of decrease. for i in overdrawn.forced_indices: buffer[i] = 0 self.random.shuffle(buffer) buffer = hbytes(buffer) def draw_bytes(data, n): result = buffer[data.index : data.index + n] if len(result) < n: result += hbytes(n - len(result)) return self.__zero_bound(data, result) data = self.new_conjecture_data(draw_bytes=draw_bytes) self.test_function(data) data.freeze() else: origin = self.target_selector.select() mutations += 1 data = self.new_conjecture_data(draw_bytes=mutator(origin)) self.test_function(data) data.freeze() if data.status > origin.status: mutations = 0 elif data.status < origin.status or mutations >= 10: # Cap the variations of a single example and move on to # an entirely fresh start. Ten is an entirely arbitrary # constant, but it's been working well for years. mutations = 0 mutator = self._new_mutator() if getattr(data, "hit_zero_bound", False): zero_bound_queue.append(data) mutations += 1 def _run(self): self.reuse_existing_examples() self.generate_new_examples() self.shrink_interesting_examples() self.exit_with(ExitReason.finished) def new_conjecture_data(self, draw_bytes): return ConjectureData( draw_bytes=draw_bytes, max_length=BUFFER_SIZE, observer=self.tree.new_observer(), ) def new_conjecture_data_for_buffer(self, buffer): return ConjectureData.for_buffer(buffer, observer=self.tree.new_observer()) def shrink_interesting_examples(self): """If we've found interesting examples, try to replace each of them with a minimal interesting example with the same interesting_origin. We may find one or more examples with a new interesting_origin during the shrink process. If so we shrink these too. """ if Phase.shrink not in self.settings.phases or not self.interesting_examples: return for prev_data in sorted( self.interesting_examples.values(), key=lambda d: sort_key(d.buffer) ): assert prev_data.status == Status.INTERESTING data = self.new_conjecture_data_for_buffer(prev_data.buffer) self.test_function(data) if data.status != Status.INTERESTING: self.exit_with(ExitReason.flaky) self.clear_secondary_key() while len(self.shrunk_examples) < len(self.interesting_examples): target, example = min( [ (k, v) for k, v in self.interesting_examples.items() if k not in self.shrunk_examples ], key=lambda kv: (sort_key(kv[1].buffer), sort_key(repr(kv[0]))), ) self.debug("Shrinking %r" % (target,)) if not self.settings.report_multiple_bugs: # If multi-bug reporting is disabled, we shrink our currently-minimal # failure, allowing 'slips' to any bug with a smaller minimal example. self.shrink(example, lambda d: d.status == Status.INTERESTING) return def predicate(d): if d.status < Status.INTERESTING: return False return d.interesting_origin == target self.shrink(example, predicate) self.shrunk_examples.add(target) def clear_secondary_key(self): if self.has_existing_examples(): # If we have any smaller examples in the secondary corpus, now is # a good time to try them to see if they work as shrinks. They # probably won't, but it's worth a shot and gives us a good # opportunity to clear out the database. # It's not worth trying the primary corpus because we already # tried all of those in the initial phase. corpus = sorted( self.settings.database.fetch(self.secondary_key), key=sort_key ) for c in corpus: primary = {v.buffer for v in self.interesting_examples.values()} cap = max(map(sort_key, primary)) if sort_key(c) > cap: break else: self.cached_test_function(c) # We unconditionally remove c from the secondary key as it # is either now primary or worse than our primary example # of this reason for interestingness. self.settings.database.delete(self.secondary_key, c) def shrink(self, example, predicate): s = self.new_shrinker(example, predicate) s.shrink() return s.shrink_target def new_shrinker(self, example, predicate): return Shrinker(self, example, predicate) def cached_test_function(self, buffer): """Checks the tree to see if we've tested this buffer, and returns the previous result if we have. Otherwise we call through to ``test_function``, and return a fresh result. """ buffer = hbytes(buffer) def check_result(result): assert result is Overrun or ( isinstance(result, ConjectureResult) and result.status != Status.OVERRUN ) return result try: return check_result(self.__data_cache[buffer]) except KeyError: pass rewritten, status = self.tree.rewrite(buffer) try: result = check_result(self.__data_cache[rewritten]) except KeyError: pass else: assert result.status != Status.OVERRUN or result is Overrun self.__data_cache[buffer] = result return result # We didn't find a match in the tree, so we need to run the test # function normally. Note that test_function will automatically # add this to the tree so we don't need to update the cache. result = None if status != Status.OVERRUN: data = self.new_conjecture_data_for_buffer(buffer) self.test_function(data) result = check_result(data.as_result()) assert status is None or result.status == status status = result.status if status == Status.OVERRUN: result = Overrun assert result is not None self.__data_cache[buffer] = result return result def event_to_string(self, event): if isinstance(event, str): return event try: return self.events_to_strings[event] except KeyError: pass result = str(event) self.events_to_strings[event] = result return result def _draw_predecessor(rnd, xs): r = bytearray() any_strict = False for x in to_bytes_sequence(xs): if not any_strict: c = rnd.randint(0, x) if c < x: any_strict = True else: c = rnd.randint(0, 255) r.append(c) return hbytes(r) def _draw_successor(rnd, xs): r = bytearray() any_strict = False for x in to_bytes_sequence(xs): if not any_strict: c = rnd.randint(x, 255) if c > x: any_strict = True else: c = rnd.randint(0, 255) r.append(c) return hbytes(r) class TargetSelector(object): """Data structure for selecting targets to use for mutation. The main purpose of the TargetSelector is to maintain a pool of "reasonably useful" examples, while keeping the pool of bounded size. In particular it ensures: 1. We only retain examples of the best status we've seen so far (not counting INTERESTING, which is special). 2. We preferentially return examples we've never returned before when select() is called. 3. The number of retained examples is never more than self.pool_size, with past examples discarded automatically, preferring ones that we have already explored from. These invariants are fairly heavily prone to change - they're not especially well validated as being optimal, and are mostly just a decent compromise between diversity and keeping the pool size bounded. """ def __init__(self, random, pool_size=MUTATION_POOL_SIZE): self.random = random self.best_status = Status.OVERRUN self.pool_size = pool_size self.reset() def __len__(self): return len(self.fresh_examples) + len(self.used_examples) def reset(self): self.fresh_examples = [] self.used_examples = [] def add(self, data): if data.status == Status.INTERESTING: return if data.status < self.best_status: return if data.status > self.best_status: self.best_status = data.status self.reset() self.fresh_examples.append(data) if len(self) > self.pool_size: pop_random(self.random, self.used_examples or self.fresh_examples) assert self.pool_size == len(self) def select(self): if self.fresh_examples: result = pop_random(self.random, self.fresh_examples) self.used_examples.append(result) return result else: return self.random.choice(self.used_examples) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture/floats.py000066400000000000000000000174151354103617500340610ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from array import array from hypothesis.internal.compat import hrange from hypothesis.internal.conjecture.utils import calc_label_from_name from hypothesis.internal.floats import float_to_int, int_to_float """ This module implements support for arbitrary floating point numbers in Conjecture. It doesn't make any attempt to get a good distribution, only to get a format that will shrink well. It works by defining an encoding of non-negative floating point numbers (including NaN values with a zero sign bit) that has good lexical shrinking properties. This encoding is a tagged union of two separate encodings for floating point numbers, with the tag being the first bit of 64 and the remaining 63-bits being the payload. If the tag bit is 0, the next 7 bits are ignored, and the remaining 7 bytes are interpreted as a 7 byte integer in big-endian order and then converted to a float (there is some redundancy here, as 7 * 8 = 56, which is larger than the largest integer that floating point numbers can represent exactly, so multiple encodings may map to the same float). If the tag bit is 1, we instead use somemthing that is closer to the normal representation of floats (and can represent every non-negative float exactly) but has a better ordering: 1. NaNs are ordered after everything else. 2. Infinity is ordered after every finite number. 3. The sign is ignored unless two floating point numbers are identical in absolute magnitude. In that case, the positive is ordered before the negative. 4. Positive floating point numbers are ordered first by int(x) where encoding(x) < encoding(y) if int(x) < int(y). 5. If int(x) == int(y) then x and y are sorted towards lower denominators of their fractional parts. The format of this encoding of floating point goes as follows: [exponent] [mantissa] Each of these is the same size their equivalent in IEEE floating point, but are in a different format. We translate exponents as follows: 1. The maximum exponent (2 ** 11 - 1) is left unchanged. 2. We reorder the remaining exponents so that all of the positive exponents are first, in increasing order, followed by all of the negative exponents in decreasing order (where positive/negative is done by the unbiased exponent e - 1023). We translate the mantissa as follows: 1. If the unbiased exponent is <= 0 we reverse it bitwise. 2. If the unbiased exponent is >= 52 we leave it alone. 3. If the unbiased exponent is in the range [1, 51] then we reverse the low k bits, where k is 52 - unbiased exponent. The low bits correspond to the fractional part of the floating point number. Reversing it bitwise means that we try to minimize the low bits, which kills off the higher powers of 2 in the fraction first. """ MAX_EXPONENT = 0x7FF SPECIAL_EXPONENTS = (0, MAX_EXPONENT) BIAS = 1023 MAX_POSITIVE_EXPONENT = MAX_EXPONENT - 1 - BIAS DRAW_FLOAT_LABEL = calc_label_from_name("drawing a float") def exponent_key(e): if e == MAX_EXPONENT: return float("inf") unbiased = e - BIAS if unbiased < 0: return 10000 - unbiased else: return unbiased ENCODING_TABLE = array("H", sorted(hrange(MAX_EXPONENT + 1), key=exponent_key)) DECODING_TABLE = array("H", [0]) * len(ENCODING_TABLE) for i, b in enumerate(ENCODING_TABLE): DECODING_TABLE[b] = i del i, b def decode_exponent(e): """Take draw_bits(11) and turn it into a suitable floating point exponent such that lexicographically simpler leads to simpler floats.""" assert 0 <= e <= MAX_EXPONENT return ENCODING_TABLE[e] def encode_exponent(e): """Take a floating point exponent and turn it back into the equivalent result from conjecture.""" assert 0 <= e <= MAX_EXPONENT return DECODING_TABLE[e] def reverse_byte(b): result = 0 for _ in range(8): result <<= 1 result |= b & 1 b >>= 1 return result # Table mapping individual bytes to the equivalent byte with the bits of the # byte reversed. e.g. 1=0b1 is mapped to 0xb10000000=0x80=128. We use this # precalculated table to simplify calculating the bitwise reversal of a longer # integer. REVERSE_BITS_TABLE = bytearray(map(reverse_byte, range(256))) def reverse64(v): """Reverse a 64-bit integer bitwise. We do this by breaking it up into 8 bytes. The 64-bit integer is then the concatenation of each of these bytes. We reverse it by reversing each byte on its own using the REVERSE_BITS_TABLE above, and then concatenating the reversed bytes. In this case concatenating consists of shifting them into the right position for the word and then oring the bits together. """ assert v.bit_length() <= 64 return ( (REVERSE_BITS_TABLE[(v >> 0) & 0xFF] << 56) | (REVERSE_BITS_TABLE[(v >> 8) & 0xFF] << 48) | (REVERSE_BITS_TABLE[(v >> 16) & 0xFF] << 40) | (REVERSE_BITS_TABLE[(v >> 24) & 0xFF] << 32) | (REVERSE_BITS_TABLE[(v >> 32) & 0xFF] << 24) | (REVERSE_BITS_TABLE[(v >> 40) & 0xFF] << 16) | (REVERSE_BITS_TABLE[(v >> 48) & 0xFF] << 8) | (REVERSE_BITS_TABLE[(v >> 56) & 0xFF] << 0) ) MANTISSA_MASK = (1 << 52) - 1 def reverse_bits(x, n): assert x.bit_length() <= n <= 64 x = reverse64(x) x >>= 64 - n return x def update_mantissa(unbiased_exponent, mantissa): if unbiased_exponent <= 0: mantissa = reverse_bits(mantissa, 52) elif unbiased_exponent <= 51: n_fractional_bits = 52 - unbiased_exponent fractional_part = mantissa & ((1 << n_fractional_bits) - 1) mantissa ^= fractional_part mantissa |= reverse_bits(fractional_part, n_fractional_bits) return mantissa def lex_to_float(i): assert i.bit_length() <= 64 has_fractional_part = i >> 63 if has_fractional_part: exponent = (i >> 52) & ((1 << 11) - 1) exponent = decode_exponent(exponent) mantissa = i & MANTISSA_MASK mantissa = update_mantissa(exponent - BIAS, mantissa) assert mantissa.bit_length() <= 52 return int_to_float((exponent << 52) | mantissa) else: integral_part = i & ((1 << 56) - 1) return float(integral_part) def float_to_lex(f): if is_simple(f): assert f >= 0 return int(f) return base_float_to_lex(f) def base_float_to_lex(f): i = float_to_int(f) i &= (1 << 63) - 1 exponent = i >> 52 mantissa = i & MANTISSA_MASK mantissa = update_mantissa(exponent - BIAS, mantissa) exponent = encode_exponent(exponent) assert mantissa.bit_length() <= 52 return (1 << 63) | (exponent << 52) | mantissa def is_simple(f): try: i = int(f) except (ValueError, OverflowError): return False if i != f: return False return i.bit_length() <= 56 def draw_float(data): try: data.start_example(DRAW_FLOAT_LABEL) f = lex_to_float(data.draw_bits(64)) if data.draw_bits(1): f = -f return f finally: data.stop_example() def write_float(data, f): data.draw_bits(64, forced=float_to_lex(abs(f))) sign = float_to_int(f) >> 63 data.draw_bits(1, forced=sign) junkdrawer.py000066400000000000000000000147071354103617500346670ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """A module for miscellaneous useful bits and bobs that don't obviously belong anywhere else. If you spot a better home for anything that lives here, please move it.""" from __future__ import absolute_import, division, print_function from hypothesis.internal.compat import ( array_or_list, hbytes, int_to_bytes, integer_types, ) def replace_all(buffer, replacements): """Substitute multiple replacement values into a buffer. Replacements is a list of (start, end, value) triples. """ result = bytearray() prev = 0 offset = 0 for u, v, r in replacements: result.extend(buffer[prev:u]) result.extend(r) prev = v offset += len(r) - (v - u) result.extend(buffer[prev:]) assert len(result) == len(buffer) + offset return hbytes(result) ARRAY_CODES = ["B", "H", "I", "L", "Q", "O"] NEXT_ARRAY_CODE = dict(zip(ARRAY_CODES, ARRAY_CODES[1:])) class IntList(object): """Class for storing a list of non-negative integers compactly. We store them as the smallest size integer array we can get away with. When we try to add an integer that is too large, we upgrade the array to the smallest word size needed to store the new value.""" __slots__ = ("__underlying",) def __init__(self, values=()): for code in ARRAY_CODES: try: self.__underlying = array_or_list(code, values) break except OverflowError: pass else: # pragma: no cover assert False, "Could not create storage for %r" % (values,) if isinstance(self.__underlying, list): for v in self.__underlying: if v < 0 or not isinstance(v, integer_types): raise ValueError("Could not create IntList for %r" % (values,)) @classmethod def of_length(self, n): return IntList(array_or_list("B", [0]) * n) def count(self, n): return self.__underlying.count(n) def __repr__(self): return "IntList(%r)" % (list(self),) def __len__(self): return len(self.__underlying) def __getitem__(self, i): if isinstance(i, slice): return IntList(self.__underlying[i]) return self.__underlying[i] def __delitem__(self, i): del self.__underlying[i] def __iter__(self): return iter(self.__underlying) def __eq__(self, other): if self is other: return True if not isinstance(other, IntList): return NotImplemented return self.__underlying == other.__underlying def __ne__(self, other): if self is other: return False if not isinstance(other, IntList): return NotImplemented return self.__underlying != other.__underlying def append(self, n): i = len(self) self.__underlying.append(0) self[i] = n def __setitem__(self, i, n): while True: try: self.__underlying[i] = n return except OverflowError: assert n > 0 self.__upgrade() def extend(self, ls): for n in ls: self.append(n) def __upgrade(self): code = NEXT_ARRAY_CODE[self.__underlying.typecode] self.__underlying = array_or_list(code, self.__underlying) def pop_random(random, values): """Remove a random element of values, possibly changing the ordering of its elements.""" # We pick the element at a random index. Rather than removing that element # from the list (which would be an O(n) operation), we swap it to the end # and return the last element of the list. This changes the order of # the elements, but as long as these elements are only accessed through # random sampling that doesn't matter. i = random.randrange(0, len(values)) values[i], values[-1] = values[-1], values[i] return values.pop() def binary_search(lo, hi, f): """Binary searches in [lo , hi) to find n such that f(n) == f(lo) but f(n + 1) != f(lo). It is implicitly assumed and will not be checked that f(hi) != f(lo). """ reference = f(lo) while lo + 1 < hi: mid = (lo + hi) // 2 if f(mid) == reference: lo = mid else: hi = mid return lo def uniform(random, n): """Returns an hbytes of length n, distributed uniformly at random.""" return int_to_bytes(random.getrandbits(n * 8), n) class LazySequenceCopy(object): """A "copy" of a sequence that works by inserting a mask in front of the underlying sequence, so that you can mutate it without changing the underlying sequence. Effectively behaves as if you could do list(x) in O(1) time. The full list API is not supported yet but there's no reason in principle it couldn't be.""" def __init__(self, values): self.__values = values self.__len = len(values) self.__mask = None def __len__(self): return self.__len def pop(self): if len(self) == 0: raise IndexError("Cannot pop from empty list") result = self[-1] self.__len -= 1 if self.__mask is not None: self.__mask.pop(self.__len, None) return result def __getitem__(self, i): i = self.__check_index(i) default = self.__values[i] if self.__mask is None: return default else: return self.__mask.get(i, default) def __setitem__(self, i, v): i = self.__check_index(i) if self.__mask is None: self.__mask = {} self.__mask[i] = v def __check_index(self, i): n = len(self) if i < -n or i >= n: raise IndexError("Index %d out of range [0, %d)" % (i, n)) if i < 0: i += n assert 0 <= i < n return i hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture/shrinker.py000066400000000000000000001632761354103617500344250ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from collections import defaultdict import attr from hypothesis.internal.compat import hbytes, hrange, int_from_bytes, int_to_bytes from hypothesis.internal.conjecture.choicetree import ChoiceTree from hypothesis.internal.conjecture.data import ConjectureResult, Overrun, Status from hypothesis.internal.conjecture.floats import ( DRAW_FLOAT_LABEL, float_to_lex, lex_to_float, ) from hypothesis.internal.conjecture.junkdrawer import binary_search, replace_all from hypothesis.internal.conjecture.shrinking import Float, Integer, Lexical, Ordering from hypothesis.internal.conjecture.shrinking.common import find_integer if False: from typing import Dict # noqa def sort_key(buffer): """Returns a sort key such that "simpler" buffers are smaller than "more complicated" ones. We define sort_key so that x is simpler than y if x is shorter than y or if they have the same length and x < y lexicographically. This is called the shortlex order. The reason for using the shortlex order is: 1. If x is shorter than y then that means we had to make fewer decisions in constructing the test case when we ran x than we did when we ran y. 2. If x is the same length as y then replacing a byte with a lower byte corresponds to reducing the value of an integer we drew with draw_bits towards zero. 3. We want a total order, and given (2) the natural choices for things of the same size are either the lexicographic or colexicographic orders (the latter being the lexicographic order of the reverse of the string). Because values drawn early in generation potentially get used in more places they potentially have a more significant impact on the final result, so it makes sense to prioritise reducing earlier values over later ones. This makes the lexicographic order the more natural choice. """ return (len(buffer), buffer) SHRINK_PASS_DEFINITIONS = {} # type: Dict[str, ShrinkPassDefinition] @attr.s() class ShrinkPassDefinition(object): """A shrink pass bundles together a large number of local changes to the current shrink target. Each shrink pass is defined by some function and some arguments to that function. The ``generate_arguments`` function returns all arguments that might be useful to run on the current shrink target. The guarantee made by methods defined this way is that after they are called then *either* the shrink target has changed *or* each of ``fn(*args)`` has been called for every ``args`` in ``generate_arguments(self)``. No guarantee is made that all of these will be called if the shrink target changes. """ run_with_chooser = attr.ib() @property def name(self): return self.run_with_chooser.__name__ def __attrs_post_init__(self): assert self.name not in SHRINK_PASS_DEFINITIONS, self.name SHRINK_PASS_DEFINITIONS[self.name] = self def defines_shrink_pass(): """A convenient decorator for defining shrink passes.""" def accept(run_step): ShrinkPassDefinition(run_with_chooser=run_step) def run(self): # pragma: no cover assert False, "Shrink passes should not be run directly" run.__name__ = run_step.__name__ run.is_shrink_pass = True return run return accept class Shrinker(object): """A shrinker is a child object of a ConjectureRunner which is designed to manage the associated state of a particular shrink problem. That is, we have some initial ConjectureData object and some property of interest that it satisfies, and we want to find a ConjectureData object with a shortlex (see sort_key above) smaller buffer that exhibits the same property. Currently the only property of interest we use is that the status is INTERESTING and the interesting_origin takes on some fixed value, but we may potentially be interested in other use cases later. However we assume that data with a status < VALID never satisfies the predicate. The shrinker keeps track of a value shrink_target which represents the current best known ConjectureData object satisfying the predicate. It refines this value by repeatedly running *shrink passes*, which are methods that perform a series of transformations to the current shrink_target and evaluate the underlying test function to find new ConjectureData objects. If any of these satisfy the predicate, the shrink_target is updated automatically. Shrinking runs until no shrink pass can improve the shrink_target, at which point it stops. It may also be terminated if the underlying engine throws RunIsComplete, but that is handled by the calling code rather than the Shrinker. ======================= Designing Shrink Passes ======================= Generally a shrink pass is just any function that calls cached_test_function and/or incorporate_new_buffer a number of times, but there are a couple of useful things to bear in mind. A shrink pass *makes progress* if running it changes self.shrink_target (i.e. it tries a shortlex smaller ConjectureData object satisfying the predicate). The desired end state of shrinking is to find a value such that no shrink pass can make progress, i.e. that we are at a local minimum for each shrink pass. In aid of this goal, the main invariant that a shrink pass much satisfy is that whether it makes progress must be deterministic. It is fine (encouraged even) for the specific progress it makes to be non-deterministic, but if you run a shrink pass, it makes no progress, and then you immediately run it again, it should never succeed on the second time. This allows us to stop as soon as we have run each shrink pass and seen no progress on any of them. This means that e.g. it's fine to try each of N deletions or replacements in a random order, but it's not OK to try N random deletions (unless you have already shrunk at least once, though we don't currently take advantage of this loophole). Shrink passes need to be written so as to be robust against change in the underlying shrink target. It is generally safe to assume that the shrink target does not change prior to the point of first modification - e.g. if you change no bytes at index ``i``, all examples whose start is ``<= i`` still exist, as do all blocks, and the data object is still of length ``>= i + 1``. This can only be violated by bad user code which relies on an external source of non-determinism. When the underlying shrink_target changes, shrink passes should not run substantially more test_function calls on success than they do on failure. Say, no more than a constant factor more. In particular shrink passes should not iterate to a fixed point. This means that shrink passes are often written with loops that are carefully designed to do the right thing in the case that no shrinks occurred and try to adapt to any changes to do a reasonable job. e.g. say we wanted to write a shrink pass that tried deleting each individual byte (this isn't an especially good choice, but it leads to a simple illustrative example), we might do it by iterating over the buffer like so: .. code-block:: python i = 0 while i < len(self.shrink_target.buffer): if not self.incorporate_new_buffer( self.shrink_target.buffer[: i] + self.shrink_target.buffer[i + 1 :] ): i += 1 The reason for writing the loop this way is that i is always a valid index into the current buffer, even if the current buffer changes as a result of our actions. When the buffer changes, we leave the index where it is rather than restarting from the beginning, and carry on. This means that the number of steps we run in this case is always bounded above by the number of steps we would run if nothing works. Another thing to bear in mind about shrink pass design is that they should prioritise *progress*. If you have N operations that you need to run, you should try to order them in such a way as to avoid stalling, where you have long periods of test function invocations where no shrinks happen. This is bad because whenever we shrink we reduce the amount of work the shrinker has to do in future, and often speed up the test function, so we ideally wanted those shrinks to happen much earlier in the process. Sometimes stalls are inevitable of course - e.g. if the pass makes no progress, then the entire thing is just one long stall, but it's helpful to design it so that stalls are less likely in typical behaviour. The two easiest ways to do this are: * Just run the N steps in random order. As long as a reasonably large proportion of the operations suceed, this guarantees the expected stall length is quite short. The book keeping for making sure this does the right thing when it succeeds can be quite annoying. * When you have any sort of nested loop, loop in such a way that both loop variables change each time. This prevents stalls which occur when one particular value for the outer loop is impossible to make progress on, rendering the entire inner loop into a stall. However, although progress is good, too much progress can be a bad sign! If you're *only* seeing successful reductions, that's probably a sign that you are making changes that are too timid. Two useful things to offset this: * It's worth writing shrink passes which are *adaptive*, in the sense that when operations seem to be working really well we try to bundle multiple of them together. This can often be used to turn what would be O(m) successful calls into O(log(m)). * It's often worth trying one or two special minimal values before trying anything more fine grained (e.g. replacing the whole thing with zero). """ def derived_value(fn): """It's useful during shrinking to have access to derived values of the current shrink target. This decorator allows you to define these as cached properties. They are calculated once, then cached until the shrink target changes, then recalculated the next time they are used.""" def accept(self): try: return self.__derived_values[fn.__name__] except KeyError: return self.__derived_values.setdefault(fn.__name__, fn(self)) accept.__name__ = fn.__name__ return property(accept) def __init__(self, engine, initial, predicate): """Create a shrinker for a particular engine, with a given starting point and predicate. When shrink() is called it will attempt to find an example for which predicate is True and which is strictly smaller than initial. Note that initial is a ConjectureData object, and predicate takes ConjectureData objects. """ self.__engine = engine self.__predicate = predicate self.__derived_values = {} self.__pending_shrink_explanation = None self.initial_size = len(initial.buffer) # We keep track of the current best example on the shrink_target # attribute. self.shrink_target = None self.update_shrink_target(initial) self.shrinks = 0 self.initial_calls = self.__engine.call_count self.passes_by_name = {} self.passes = [] @derived_value def cached_calculations(self): return {} def cached(self, *keys): def accept(f): cache_key = (f.__name__,) + keys try: return self.cached_calculations[cache_key] except KeyError: return self.cached_calculations.setdefault(cache_key, f()) return accept def explain_next_call_as(self, explanation): self.__pending_shrink_explanation = explanation def clear_call_explanation(self): self.__pending_shrink_explanation = None def add_new_pass(self, run): """Creates a shrink pass corresponding to calling ``run(self)``""" definition = SHRINK_PASS_DEFINITIONS[run] p = ShrinkPass( run_with_chooser=definition.run_with_chooser, shrinker=self, index=len(self.passes), ) self.passes.append(p) self.passes_by_name[p.name] = p return p def shrink_pass(self, name): """Return the ShrinkPass object for the pass with the given name.""" if isinstance(name, ShrinkPass): return name if name not in self.passes_by_name: self.add_new_pass(name) return self.passes_by_name[name] @property def calls(self): """Return the number of calls that have been made to the underlying test function.""" return self.__engine.call_count def consider_new_buffer(self, buffer): """Returns True if after running this buffer the result would be the current shrink_target.""" buffer = hbytes(buffer) return buffer.startswith(self.buffer) or self.incorporate_new_buffer(buffer) def incorporate_new_buffer(self, buffer): """Either runs the test function on this buffer and returns True if that changed the shrink_target, or determines that doing so would be useless and returns False without running it.""" buffer = hbytes(buffer[: self.shrink_target.index]) # Sometimes an attempt at lexicographic minimization will do the wrong # thing because the buffer has changed under it (e.g. something has # turned into a write, the bit size has changed). The result would be # an invalid string, but it's better for us to just ignore it here as # it turns out to involve quite a lot of tricky book-keeping to get # this right and it's better to just handle it in one place. if sort_key(buffer) >= sort_key(self.shrink_target.buffer): return False if self.shrink_target.buffer.startswith(buffer): return False previous = self.shrink_target self.cached_test_function(buffer) return previous is not self.shrink_target def incorporate_test_data(self, data): """Takes a ConjectureData or Overrun object updates the current shrink_target if this data represents an improvement over it, returning True if it is.""" if data is Overrun or data is self.shrink_target: return if self.__predicate(data) and sort_key(data.buffer) < sort_key( self.shrink_target.buffer ): self.update_shrink_target(data) return True return False def cached_test_function(self, buffer): """Returns a cached version of the underlying test function, so that the result is either an Overrun object (if the buffer is too short to be a valid test case) or a ConjectureData object with status >= INVALID that would result from running this buffer.""" if self.__pending_shrink_explanation is not None: self.debug(self.__pending_shrink_explanation) self.__pending_shrink_explanation = None buffer = hbytes(buffer) result = self.__engine.cached_test_function(buffer) self.incorporate_test_data(result) return result def debug(self, msg): self.__engine.debug(msg) @property def random(self): return self.__engine.random def shrink(self): """Run the full set of shrinks and update shrink_target. This method is "mostly idempotent" - calling it twice is unlikely to have any effect, though it has a non-zero probability of doing so. """ # We assume that if an all-zero block of bytes is an interesting # example then we're not going to do better than that. # This might not technically be true: e.g. for integers() | booleans() # the simplest example is actually [1, 0]. Missing this case is fairly # harmless and this allows us to make various simplifying assumptions # about the structure of the data (principally that we're never # operating on a block of all zero bytes so can use non-zeroness as a # signpost of complexity). if not any(self.shrink_target.buffer) or self.incorporate_new_buffer( hbytes(len(self.shrink_target.buffer)) ): return try: self.greedy_shrink() finally: if self.__engine.report_debug_info: def s(n): return "s" if n != 1 else "" total_deleted = self.initial_size - len(self.shrink_target.buffer) self.debug("---------------------") self.debug("Shrink pass profiling") self.debug("---------------------") self.debug("") calls = self.__engine.call_count - self.initial_calls self.debug( ( "Shrinking made a total of %d call%s " "of which %d shrank. This deleted %d byte%s out of %d." ) % ( calls, s(calls), self.shrinks, total_deleted, s(total_deleted), self.initial_size, ) ) for useful in [True, False]: self.debug("") if useful: self.debug("Useful passes:") else: self.debug("Useless passes:") self.debug("") for p in sorted( self.passes, key=lambda t: (-t.calls, t.deletions, t.shrinks) ): if p.calls == 0: continue if (p.shrinks != 0) != useful: continue self.debug( ( " * %s made %d call%s of which " "%d shrank, deleting %d byte%s." ) % ( p.name, p.calls, s(p.calls), p.shrinks, p.deletions, s(p.deletions), ) ) self.debug("") def greedy_shrink(self): """Run a full set of greedy shrinks (that is, ones that will only ever move to a better target) and update shrink_target appropriately. This method iterates to a fixed point and so is idempontent - calling it twice will have exactly the same effect as calling it once. """ self.fixate_shrink_passes( [ block_program("X" * 5), block_program("X" * 4), block_program("X" * 3), block_program("X" * 2), block_program("X" * 1), "pass_to_descendant", "adaptive_example_deletion", "alphabet_minimize", "zero_examples", "reorder_examples", "minimize_floats", "minimize_duplicated_blocks", block_program("-XX"), "minimize_individual_blocks", ] ) @derived_value def shrink_pass_choice_trees(self): return defaultdict(ChoiceTree) def fixate_shrink_passes(self, passes): """Run steps from each pass in ``passes`` until the current shrink target is a fixed point of all of them.""" passes = list(map(self.shrink_pass, passes)) any_ran = True while any_ran: any_ran = False # We run remove_discarded after every step to do cleanup # keeping track of whether that actually works. Either there is # no discarded data and it is basically free, or it reliably works # and deletes data, or it doesn't work. In that latter case we turn # it off for the rest of this loop through the passes, but will # try again once all of the passes have been run. can_discard = self.remove_discarded() successful_passes = set() for sp in passes: # We run each pass until it has failed a certain number # of times, where a "failure" is any step where it made # at least one call and did not result in a shrink. # This gives passes which work reasonably often more of # chance to run. failures = 0 successes = 0 # The choice of 3 is fairly arbitrary and was hand tuned # to some particular examples. It is very unlikely that # is the best choice in general, but it's not an # unreasonable choice: Making it smaller than this would # give too high a chance of an otherwise very worthwhile # pass getting screened out too early if it got unlucky, # and making it much larger than this would result in us # spending too much time on bad passes. max_failures = 3 while failures < max_failures: prev_calls = self.calls prev = self.shrink_target if sp.step(): any_ran = True else: break if prev_calls != self.calls: if can_discard: can_discard = self.remove_discarded() if prev is self.shrink_target: failures += 1 else: successes += 1 if successes > 0: successful_passes.add(sp) # If only some of our shrink passes are doing anything useful # then run all of those to a fixed point before running the # full set. This is particularly important when an emergency # shrink pass unlocks some non-emergency ones and it suddenly # becomes very expensive to find a bunch of small changes. if 0 < len(successful_passes) < len(passes): self.fixate_shrink_passes(successful_passes) for sp in passes: sp.fixed_point_at = self.shrink_target @property def buffer(self): return self.shrink_target.buffer @property def blocks(self): return self.shrink_target.blocks @property def examples(self): return self.shrink_target.examples def all_block_bounds(self): return self.shrink_target.blocks.all_bounds() @derived_value def examples_by_label(self): """An index of all examples grouped by their label, with the examples stored in their normal index order.""" examples_by_label = defaultdict(list) for ex in self.examples: examples_by_label[ex.label].append(ex) return dict(examples_by_label) @derived_value def distinct_labels(self): return sorted(self.examples_by_label, key=str) @defines_shrink_pass() def pass_to_descendant(self, chooser): """Attempt to replace each example with a descendant example. This is designed to deal with strategies that call themselves recursively. For example, suppose we had: binary_tree = st.deferred( lambda: st.one_of( st.integers(), st.tuples(binary_tree, binary_tree))) This pass guarantees that we can replace any binary tree with one of its subtrees - each of those will create an interval that the parent could validly be replaced with, and this pass will try doing that. This is pretty expensive - it takes O(len(intervals)^2) - so we run it late in the process when we've got the number of intervals as far down as possible. """ label = chooser.choose( self.distinct_labels, lambda l: len(self.examples_by_label[l]) >= 2 ) ls = self.examples_by_label[label] i = chooser.choose(hrange(len(ls) - 1)) ancestor = ls[i] @self.cached(label, i) def descendants(): lo = i + 1 hi = len(ls) while lo + 1 < hi: mid = (lo + hi) // 2 if ls[mid].start >= ancestor.end: hi = mid else: lo = mid return ls[i + 1 : hi] descendant = chooser.choose(descendants, lambda ex: ex.length > 0) self.incorporate_new_buffer( self.buffer[: ancestor.start] + self.buffer[descendant.start : descendant.end] + self.buffer[ancestor.end :] ) def lower_common_block_offset(self): """Sometimes we find ourselves in a situation where changes to one part of the byte stream unlock changes to other parts. Sometimes this is good, but sometimes this can cause us to exhibit exponential slow downs! e.g. suppose we had the following: m = draw(integers(min_value=0)) n = draw(integers(min_value=0)) assert abs(m - n) > 1 If this fails then we'll end up with a loop where on each iteration we reduce each of m and n by 2 - m can't go lower because of n, then n can't go lower because of m. This will take us O(m) iterations to complete, which is exponential in the data size, as we gradually zig zag our way towards zero. This can only happen if we're failing to reduce the size of the byte stream: The number of iterations that reduce the length of the byte stream is bounded by that length. So what we do is this: We keep track of which blocks are changing, and then if there's some non-zero common offset to them we try and minimize them all at once by lowering that offset. This may not work, and it definitely won't get us out of all possible exponential slow downs (an example of where it doesn't is where the shape of the blocks changes as a result of this bouncing behaviour), but it fails fast when it doesn't work and gets us out of a really nastily slow case when it does. """ if len(self.__changed_blocks) <= 1: return current = self.shrink_target blocked = [current.buffer[u:v] for u, v in self.all_block_bounds()] changed = [ i for i in sorted(self.__changed_blocks) if not self.shrink_target.blocks[i].trivial ] if not changed: return ints = [int_from_bytes(blocked[i]) for i in changed] offset = min(ints) assert offset > 0 for i in hrange(len(ints)): ints[i] -= offset def reoffset(o): new_blocks = list(blocked) for i, v in zip(changed, ints): new_blocks[i] = int_to_bytes(v + o, len(blocked[i])) return self.incorporate_new_buffer(hbytes().join(new_blocks)) Integer.shrink(offset, reoffset, random=self.random) self.clear_change_tracking() def clear_change_tracking(self): self.__last_checked_changed_at = self.shrink_target self.__all_changed_blocks = set() def mark_changed(self, i): self.__changed_blocks.add(i) @property def __changed_blocks(self): if self.__last_checked_changed_at is not self.shrink_target: prev_target = self.__last_checked_changed_at new_target = self.shrink_target assert prev_target is not new_target prev = prev_target.buffer new = new_target.buffer assert sort_key(new) < sort_key(prev) if ( len(new_target.blocks) != len(prev_target.blocks) or new_target.blocks.endpoints != prev_target.blocks.endpoints ): self.__all_changed_blocks = set() else: blocks = new_target.blocks # Index of last block whose contents have been modified, found # by checking if the tail past this point has been modified. last_changed = binary_search( 0, len(blocks), lambda i: prev[blocks.start(i) :] != new[blocks.start(i) :], ) # Index of the first block whose contents have been changed, # because we know that this predicate is true for zero (because # the prefix from the start is empty), so the result must be True # for the bytes from the start of this block and False for the # bytes from the end, hence the change is in this block. first_changed = binary_search( 0, len(blocks), lambda i: prev[: blocks.start(i)] == new[: blocks.start(i)], ) # Between these two changed regions we now do a linear scan to # check if any specific block values have changed. for i in hrange(first_changed, last_changed + 1): u, v = blocks.bounds(i) if i not in self.__all_changed_blocks and prev[u:v] != new[u:v]: self.__all_changed_blocks.add(i) self.__last_checked_changed_at = new_target assert self.__last_checked_changed_at is self.shrink_target return self.__all_changed_blocks def update_shrink_target(self, new_target): assert isinstance(new_target, ConjectureResult) if self.shrink_target is not None: self.shrinks += 1 else: self.__all_changed_blocks = set() self.__last_checked_changed_at = new_target self.shrink_target = new_target self.__derived_values = {} def try_shrinking_blocks(self, blocks, b): """Attempts to replace each block in the blocks list with b. Returns True if it succeeded (which may include some additional modifications to shrink_target). In current usage it is expected that each of the blocks currently have the same value, although this is not essential. Note that b must be < the block at min(blocks) or this is not a valid shrink. This method will attempt to do some small amount of work to delete data that occurs after the end of the blocks. This is useful for cases where there is some size dependency on the value of a block. """ initial_attempt = bytearray(self.shrink_target.buffer) for i, block in enumerate(blocks): if block >= len(self.blocks): blocks = blocks[:i] break u, v = self.blocks[block].bounds n = min(self.blocks[block].length, len(b)) initial_attempt[v - n : v] = b[-n:] if not blocks: return False start = self.shrink_target.blocks[blocks[0]].start end = self.shrink_target.blocks[blocks[-1]].end initial_data = self.cached_test_function(initial_attempt) if initial_data.status == Status.INTERESTING: self.lower_common_block_offset() return initial_data is self.shrink_target # If this produced something completely invalid we ditch it # here rather than trying to persevere. if initial_data.status < Status.VALID: return False # We've shrunk inside our group of blocks, so we have no way to # continue. (This only happens when shrinking more than one block at # a time). if len(initial_data.buffer) < v: return False lost_data = len(self.shrink_target.buffer) - len(initial_data.buffer) # If this did not in fact cause the data size to shrink we # bail here because it's not worth trying to delete stuff from # the remainder. if lost_data <= 0: return False # We now look for contiguous regions to delete that might help fix up # this failed shrink. We only look for contiguous regions of the right # lengths because doing anything more than that starts to get very # expensive. See minimize_individual_blocks for where we # try to be more aggressive. regions_to_delete = {(end, end + lost_data)} for j in (blocks[-1] + 1, blocks[-1] + 2): if j >= min(len(initial_data.blocks), len(self.blocks)): continue # We look for a block very shortly after the last one that has # lost some of its size, and try to delete from the beginning so # that it retains the same integer value. This is a bit of a hyper # specific trick designed to make our integers() strategy shrink # well. r1, s1 = self.shrink_target.blocks[j].bounds r2, s2 = initial_data.blocks[j].bounds lost = (s1 - r1) - (s2 - r2) # Apparently a coverage bug? An assert False in the body of this # will reliably fail, but it shows up as uncovered. if lost <= 0 or r1 != r2: # pragma: no cover continue regions_to_delete.add((r1, r1 + lost)) for ex in self.shrink_target.examples: if ex.start > start: continue if ex.end <= end: continue replacement = initial_data.examples[ex.index] in_original = [c for c in ex.children if c.start >= end] in_replaced = [c for c in replacement.children if c.start >= end] if len(in_replaced) >= len(in_original) or not in_replaced: continue # We've found an example where some of the children went missing # as a result of this change, and just replacing it with the data # it would have had and removing the spillover didn't work. This # means that some of its children towards the right must be # important, so we try to arrange it so that it retains its # rightmost children instead of its leftmost. regions_to_delete.add( (in_original[0].start, in_original[-len(in_replaced)].start) ) for u, v in sorted(regions_to_delete, key=lambda x: x[1] - x[0], reverse=True): try_with_deleted = bytearray(initial_attempt) del try_with_deleted[u:v] if self.incorporate_new_buffer(try_with_deleted): return True return False def remove_discarded(self): """Try removing all bytes marked as discarded. This is primarily to deal with data that has been ignored while doing rejection sampling - e.g. as a result of an integer range, or a filtered strategy. Such data will also be handled by the adaptive_example_deletion pass, but that pass is necessarily more conservative and will try deleting each interval individually. The common case is that all data drawn and rejected can just be thrown away immediately in one block, so this pass will be much faster than trying each one individually when it works. returns False if there is discarded data and removing it does not work, otherwise returns True. """ while self.shrink_target.has_discards: discarded = [] for ex in self.shrink_target.examples: if ( ex.length > 0 and ex.discarded and (not discarded or ex.start >= discarded[-1][-1]) ): discarded.append((ex.start, ex.end)) # This can happen if we have discards but they are all of # zero length. This shouldn't happen very often so it's # faster to check for it here than at the point of example # generation. if not discarded: break attempt = bytearray(self.shrink_target.buffer) for u, v in reversed(discarded): del attempt[u:v] if not self.incorporate_new_buffer(attempt): return False return True @defines_shrink_pass() def adaptive_example_deletion(self, chooser): """Attempts to delete every example from the test case. That is, it is logically equivalent to trying ``self.buffer[:ex.start] + self.buffer[ex.end:]`` for every example ``ex``. The order in which examples are tried is randomized, and when deletion is successful it will attempt to adapt to delete more than one example at a time. """ example = chooser.choose(self.examples) if not self.incorporate_new_buffer( self.buffer[: example.start] + self.buffer[example.end :] ): return # If we successfully deleted one example there may be a useful # deletable region around here. original = self.shrink_target endpoints = set() for ex in original.examples: if ex.depth <= example.depth: endpoints.add(ex.start) endpoints.add(ex.end) partition = sorted(endpoints) j = partition.index(example.start) def delete_region(a, b): assert a <= j <= b if a < 0 or b >= len(partition) - 1: return False return self.consider_new_buffer( original.buffer[: partition[a]] + original.buffer[partition[b] :] ) to_right = find_integer(lambda n: delete_region(j, j + n)) if to_right > 0: find_integer(lambda n: delete_region(j - n, j + to_right)) def try_zero_example(self, ex): u = ex.start v = ex.end attempt = self.cached_test_function( self.buffer[:u] + hbytes(v - u) + self.buffer[v:] ) if attempt is Overrun: return False in_replacement = attempt.examples[ex.index] used = in_replacement.length if attempt is not self.shrink_target: if in_replacement.end < len(attempt.buffer) and used < ex.length: self.incorporate_new_buffer( self.buffer[:u] + hbytes(used) + self.buffer[v:] ) return self.examples[ex.index].trivial @defines_shrink_pass() def zero_examples(self, chooser): """Attempt to replace each example with a minimal version of itself.""" ex = chooser.choose(self.examples, lambda ex: not ex.trivial) # If the example is already trivial, assume there's nothing to do here. # We could attempt to use it as an adaptive replacement for other # similar examples, but that seems to be ineffective, resulting mostly # in redundant work rather than helping. if not self.try_zero_example(ex): return # If we zeroed the example we need to get the new one that replaced it. ex = self.examples[ex.index] original = self.shrink_target group = self.examples_by_label[ex.label] i = group.index(ex) replacement = self.buffer[ex.start : ex.end] # We first expand to cover the trivial region surrounding this group. # This avoids a situation where the adaptive phase "succeeds" a lot by # virtue of not doing anything and then goes into a galloping phase # where it does a bunch of useless work. def all_trivial(a, b): if a < 0 or b > len(group): return False return all(e.trivial for e in group[a:b]) start, end = expand_region(all_trivial, i, i + 1) # If we've got multiple trivial examples of different lengths then # this isn't going to work as a replacement for all of them and so we # skip out early. if any(e.length != len(replacement) for e in group[start:end]): return def can_zero(a, b): if a < 0 or b > len(group): return False regions = [] for e in group[a:b]: t = (e.start, e.end, replacement) if not regions or t[0] >= regions[-1][1]: regions.append(t) return self.consider_new_buffer(replace_all(original.buffer, regions)) expand_region(can_zero, start, end) @derived_value def blocks_by_non_zero_suffix(self): """Returns a list of blocks grouped by their non-zero suffix, as a list of (suffix, indices) pairs, skipping all groupings where there is only one index. This is only used for the arguments of minimize_duplicated_blocks. """ duplicates = defaultdict(list) for block in self.blocks: duplicates[non_zero_suffix(self.buffer[block.start : block.end])].append( block.index ) return duplicates @derived_value def duplicated_block_suffixes(self): return sorted(self.blocks_by_non_zero_suffix) @defines_shrink_pass() def minimize_duplicated_blocks(self, chooser): """Find blocks that have been duplicated in multiple places and attempt to minimize all of the duplicates simultaneously. This lets us handle cases where two values can't be shrunk independently of each other but can easily be shrunk together. For example if we had something like: ls = data.draw(lists(integers())) y = data.draw(integers()) assert y not in ls Suppose we drew y = 3 and after shrinking we have ls = [3]. If we were to replace both 3s with 0, this would be a valid shrink, but if we were to replace either 3 with 0 on its own the test would start passing. It is also useful for when that duplication is accidental and the value of the blocks doesn't matter very much because it allows us to replace more values at once. """ block = chooser.choose(self.duplicated_block_suffixes) targets = self.blocks_by_non_zero_suffix[block] if len(targets) <= 1: return Lexical.shrink( block, lambda b: self.try_shrinking_blocks(targets, b), random=self.random, full=False, ) @defines_shrink_pass() def minimize_floats(self, chooser): """Some shrinks that we employ that only really make sense for our specific floating point encoding that are hard to discover from any sort of reasonable general principle. This allows us to make transformations like replacing a NaN with an Infinity or replacing a float with its nearest integers that we would otherwise not be able to due to them requiring very specific transformations of the bit sequence. We only apply these transformations to blocks that "look like" our standard float encodings because they are only really meaningful there. The logic for detecting this is reasonably precise, but it doesn't matter if it's wrong. These are always valid transformations to make, they just don't necessarily correspond to anything particularly meaningful for non-float values. """ ex = chooser.choose( self.examples, lambda ex: ( ex.label == DRAW_FLOAT_LABEL and len(ex.children) == 2 and ex.children[0].length == 8 ), ) u = ex.children[0].start v = ex.children[0].end buf = self.shrink_target.buffer b = buf[u:v] f = lex_to_float(int_from_bytes(b)) b2 = int_to_bytes(float_to_lex(f), 8) if b == b2 or self.consider_new_buffer(buf[:u] + b2 + buf[v:]): Float.shrink( f, lambda x: self.consider_new_buffer( self.shrink_target.buffer[:u] + int_to_bytes(float_to_lex(x), 8) + self.shrink_target.buffer[v:] ), random=self.random, ) @defines_shrink_pass() def minimize_individual_blocks(self, chooser): """Attempt to minimize each block in sequence. This is the pass that ensures that e.g. each integer we draw is a minimum value. So it's the part that guarantees that if we e.g. do x = data.draw(integers()) assert x < 10 then in our shrunk example, x = 10 rather than say 97. If we are unsuccessful at minimizing a block of interest we then check if that's because it's changing the size of the test case and, if so, we also make an attempt to delete parts of the test case to see if that fixes it. We handle most of the common cases in try_shrinking_blocks which is pretty good at clearing out large contiguous blocks of dead space, but it fails when there is data that has to stay in particular places in the list. """ block = chooser.choose(self.blocks, lambda b: not b.trivial) initial = self.shrink_target u, v = block.bounds i = block.index Lexical.shrink( self.shrink_target.buffer[u:v], lambda b: self.try_shrinking_blocks((i,), b), random=self.random, full=False, ) if self.shrink_target is not initial: return lowered = ( self.buffer[: block.start] + int_to_bytes( int_from_bytes(self.buffer[block.start : block.end]) - 1, block.length ) + self.buffer[block.end :] ) attempt = self.cached_test_function(lowered) if ( attempt.status < Status.VALID or len(attempt.buffer) == len(self.buffer) or len(attempt.buffer) == block.end ): return # If it were then the lexical shrink should have worked and we could # never have got here. assert attempt is not self.shrink_target @self.cached(block.index) def first_example_after_block(): lo = 0 hi = len(self.examples) while lo + 1 < hi: mid = (lo + hi) // 2 ex = self.examples[mid] if ex.start >= block.end: hi = mid else: lo = mid return hi ex = self.examples[ chooser.choose( hrange(first_example_after_block, len(self.examples)), lambda i: self.examples[i].length > 0, ) ] u, v = block.bounds buf = bytearray(lowered) del buf[ex.start : ex.end] self.incorporate_new_buffer(buf) @defines_shrink_pass() def reorder_examples(self, chooser): """This pass allows us to reorder the children of each example. For example, consider the following: .. code-block:: python import hypothesis.strategies as st from hypothesis import given @given(st.text(), st.text()) def test_not_equal(x, y): assert x != y Without the ability to reorder x and y this could fail either with ``x=""``, ``y="0"``, or the other way around. With reordering it will reliably fail with ``x=""``, ``y="0"``. """ ex = chooser.choose(self.examples) label = chooser.choose(ex.children).label group = [c for c in ex.children if c.label == label] if len(group) <= 1: return st = self.shrink_target pieces = [st.buffer[ex.start : ex.end] for ex in group] endpoints = [(ex.start, ex.end) for ex in group] Ordering.shrink( pieces, lambda ls: self.consider_new_buffer( replace_all(st.buffer, [(u, v, r) for (u, v), r in zip(endpoints, ls)]) ), random=self.random, ) @derived_value def alphabet(self): return sorted(set(self.buffer)) @defines_shrink_pass() def alphabet_minimize(self, chooser): """Attempts to minimize the "alphabet" - the set of bytes that are used in the representation of the current buffer. The main benefit of this is that it significantly increases our cache hit rate by making things that are equivalent more likely to have the same representation, but it's also generally a rather effective "fuzzing" step that gives us a lot of good opportunities to slip to a smaller representation of the same bug. """ c = chooser.choose(self.alphabet) buf = self.buffer def can_replace_with(d): if d < 0: return False if self.consider_new_buffer(hbytes([d if b == c else b for b in buf])): if d <= 1: # For small values of d if this succeeds we take this # as evidence that it is worth doing a a bulk replacement # where we replace all values which are close # to c but smaller with d as well. This helps us substantially # in cases where we have a lot of "dead" bytes that don't really do # much, as it allows us to replace many of them in one go rather # than one at a time. An example of where this matters is # test_minimize_multiple_elements_in_silly_large_int_range_min_is_not_dupe # in test_shrink_quality.py def replace_range(k): if k > c: return False def should_replace_byte(b): return c - k <= b <= c and d < b return self.consider_new_buffer( hbytes([d if should_replace_byte(b) else b for b in buf]) ) find_integer(replace_range) return True if ( # If we cannot replace the current byte with its predecessor, # assume it is already minimal and continue on. This ensures # we make no more than one call per distinct byte value in the # event that no shrinks are possible here. not can_replace_with(c - 1) # We next try replacing with 0 or 1. If this works then # there is nothing else to do here. or can_replace_with(0) or can_replace_with(1) # Finally we try to replace with c - 2 before going on to the # binary search so that in cases which were already nearly # minimal we don't do log(n) extra work. or not can_replace_with(c - 2) ): return # Now binary search to find a small replacement. # Invariant: We cannot replace with lo, we can replace with hi. lo = 1 hi = c - 2 while lo + 1 < hi: mid = (lo + hi) // 2 if can_replace_with(mid): hi = mid else: lo = mid def run_block_program(self, i, description, original, repeats=1): """Block programs are a mini-DSL for block rewriting, defined as a sequence of commands that can be run at some index into the blocks Commands are: * "-", subtract one from this block. * "X", delete this block If a command does not apply (currently only because it's - on a zero block) the block will be silently skipped over. This method runs the block program in ``description`` at block index ``i`` on the ConjectureData ``original``. If ``repeats > 1`` then it will attempt to approximate the results of running it that many times. Returns True if this successfully changes the underlying shrink target, else False. """ if i + len(description) > len(original.blocks) or i < 0: return False attempt = bytearray(original.buffer) for _ in hrange(repeats): for k, d in reversed(list(enumerate(description))): j = i + k u, v = original.blocks[j].bounds if v > len(attempt): return False if d == "-": value = int_from_bytes(attempt[u:v]) if value == 0: return False else: attempt[u:v] = int_to_bytes(value - 1, v - u) elif d == "X": del attempt[u:v] else: # pragma: no cover assert False, "Unrecognised command %r" % (d,) return self.incorporate_new_buffer(attempt) def block_program(description): """Mini-DSL for block rewriting. A sequence of commands that will be run over all contiguous sequences of blocks of the description length in order. Commands are: * ".", keep this block unchanged * "-", subtract one from this block. * "0", replace this block with zero * "X", delete this block If a command does not apply (currently only because it's - on a zero block) the block will be silently skipped over. As a side effect of running a block program its score will be updated. """ name = "block_program(%r)" % (description,) if name not in SHRINK_PASS_DEFINITIONS: """Defines a shrink pass that runs the block program ``description`` at every block index.""" n = len(description) def run(self, chooser): """Adaptively attempt to run the block program at the current index. If this successfully applies the block program ``k`` times then this runs in ``O(log(k))`` test function calls.""" i = chooser.choose(hrange(len(self.shrink_target.blocks) - n)) # First, run the block program at the chosen index. If this fails, # don't do any extra work, so that failure is as cheap as possible. if not self.run_block_program(i, description, original=self.shrink_target): return # Because we run in a random order we will often find ourselves in the middle # of a region where we could run the block program. We thus start by moving # left to the beginning of that region if possible in order to to start from # the beginning of that region. def offset_left(k): return i - k * n i = offset_left( find_integer( lambda k: self.run_block_program( offset_left(k), description, original=self.shrink_target ) ) ) original = self.shrink_target # Now try to run the block program multiple times here. find_integer( lambda k: self.run_block_program( i, description, original=original, repeats=k ) ) run.__name__ = name defines_shrink_pass()(run) assert name in SHRINK_PASS_DEFINITIONS return name @attr.s(slots=True, cmp=False) class ShrinkPass(object): run_with_chooser = attr.ib() index = attr.ib() shrinker = attr.ib() next_prefix = attr.ib(default=()) fixed_point_at = attr.ib(default=None) successes = attr.ib(default=0) calls = attr.ib(default=0) shrinks = attr.ib(default=0) deletions = attr.ib(default=0) def step(self): if self.fixed_point_at is self.shrinker.shrink_target: return False tree = self.shrinker.shrink_pass_choice_trees[self] if tree.exhausted: return False initial_shrinks = self.shrinker.shrinks initial_calls = self.shrinker.calls size = len(self.shrinker.shrink_target.buffer) self.shrinker.explain_next_call_as(self.name) try: self.next_prefix = tree.step( self.next_prefix, lambda chooser: self.run_with_chooser(self.shrinker, chooser), ) finally: self.calls += self.shrinker.calls - initial_calls self.shrinks += self.shrinker.shrinks - initial_shrinks self.deletions += size - len(self.shrinker.shrink_target.buffer) self.shrinker.clear_call_explanation() return True @property def name(self): return self.run_with_chooser.__name__ def non_zero_suffix(b): """Returns the longest suffix of b that starts with a non-zero byte.""" i = 0 while i < len(b) and b[i] == 0: i += 1 return b[i:] def expand_region(f, a, b): """Attempts to find u, v with u <= a, v >= b such that f(u, v) is true. Assumes that f(a, b) is already true. """ b += find_integer(lambda k: f(a, b + k)) a -= find_integer(lambda k: f(a - k, b)) return (a, b) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture/shrinking/000077500000000000000000000000001354103617500342035ustar00rootroot00000000000000__init__.py000066400000000000000000000020101354103617500362260ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture/shrinking# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.internal.conjecture.shrinking.floats import Float from hypothesis.internal.conjecture.shrinking.integer import Integer from hypothesis.internal.conjecture.shrinking.lexical import Lexical from hypothesis.internal.conjecture.shrinking.ordering import Ordering __all__ = ["Lexical", "Integer", "Ordering", "Float"] common.py000066400000000000000000000155141354103617500357740ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture/shrinking# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function """This module implements various useful common functions for shrinking tasks. """ def find_integer(f): """Finds a (hopefully large) integer such that f(n) is True and f(n + 1) is False. f(0) is assumed to be True and will not be checked. """ # We first do a linear scan over the small numbers and only start to do # anything intelligent if f(4) is true. This is because it's very hard to # win big when the result is small. If the result is 0 and we try 2 first # then we've done twice as much work as we needed to! for i in range(1, 5): if not f(i): return i - 1 # We now know that f(4) is true. We want to find some number for which # f(n) is *not* true. # lo is the largest number for which we know that f(lo) is true. lo = 4 # Exponential probe upwards until we find some value hi such that f(hi) # is not true. Subsequently we maintain the invariant that hi is the # smallest number for which we know that f(hi) is not true. hi = 5 while f(hi): lo = hi hi *= 2 # Now binary search until lo + 1 = hi. At that point we have f(lo) and not # f(lo + 1), as desired.. while lo + 1 < hi: mid = (lo + hi) // 2 if f(mid): lo = mid else: hi = mid return lo class Shrinker(object): """A Shrinker object manages a single value and a predicate it should satisfy, and attempts to improve it in some direction, making it smaller and simpler.""" def __init__( self, initial, predicate, random, full=False, debug=False, name=None, **kwargs ): self.setup(**kwargs) self.current = self.make_immutable(initial) self.initial = self.current self.random = random self.full = full self.changes = 0 self.name = name self.__predicate = predicate self.__seen = set() self.debugging_enabled = debug @property def calls(self): return len(self.__seen) def __repr__(self): return "%s(%sinitial=%r, current=%r)" % ( type(self).__name__, "" if self.name is None else "%r, " % (self.name,), self.initial, self.current, ) def setup(self, **kwargs): """Runs initial setup code. Convenience function for children that doesn't require messing with the signature of init. """ def delegate(self, other_class, convert_to, convert_from, **kwargs): """Delegates shrinking to another shrinker class, by converting the current value to and from it with provided functions.""" self.call_shrinker( other_class, convert_to(self.current), lambda v: self.consider(convert_from(v)), **kwargs ) def call_shrinker(self, other_class, initial, predicate, **kwargs): """Calls another shrinker class, passing through the relevant context variables. Note we explicitly do not pass through full. """ return other_class.shrink(initial, predicate, random=self.random, **kwargs) def debug(self, *args): if self.debugging_enabled: print("DEBUG", self, *args) @classmethod def shrink(cls, initial, predicate, **kwargs): """Shrink the value ``initial`` subject to the constraint that it satisfies ``predicate``. Returns the shrunk value. """ shrinker = cls(initial, predicate, **kwargs) shrinker.run() return shrinker.current def run(self): """Run for an appropriate number of steps to improve the current value. If self.full is True, will run until no further improvements can be found. """ if self.short_circuit(): return if self.full: prev = -1 while self.changes != prev: prev = self.changes self.run_step() else: self.run_step() self.debug("COMPLETE") def incorporate(self, value): """Try using ``value`` as a possible candidate improvement. Return True if it works. """ value = self.make_immutable(value) self.check_invariants(value) if not self.left_is_better(value, self.current): if value != self.current and (value == value): self.debug( "Rejected %r as worse than self.current=%r" % (value, self.current) ) return False if value in self.__seen: return False self.__seen.add(value) if self.__predicate(value): self.debug("shrinking to %r" % (value,)) self.changes += 1 self.current = value return True return False def consider(self, value): """Returns True if make_immutable(value) == self.current after calling self.incorporate(value).""" value = self.make_immutable(value) if value == self.current: return True return self.incorporate(value) def make_immutable(self, value): """Convert value into an immutable (and hashable) representation of itself. It is these immutable versions that the shrinker will work on. Defaults to just returning the value. """ return value def check_invariants(self, value): """Make appropriate assertions about the value to ensure that it is valid for this shrinker. Does nothing by default. """ raise NotImplementedError() def short_circuit(self): """Possibly attempt to do some shrinking. If this returns True, the ``run`` method will terminate early without doing any more work. """ raise NotImplementedError() def left_is_better(self, left, right): """Returns True if the left is strictly simpler than the right according to the standards of this shrinker.""" raise NotImplementedError() def run_step(self): """Run a single step of the main shrink loop, attempting to improve the current value.""" raise NotImplementedError() floats.py000066400000000000000000000066211354103617500357730ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture/shrinking# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import math import sys from hypothesis.internal.conjecture.floats import float_to_lex from hypothesis.internal.conjecture.shrinking.common import Shrinker from hypothesis.internal.conjecture.shrinking.integer import Integer MAX_PRECISE_INTEGER = 2 ** 53 class Float(Shrinker): def setup(self): self.NAN = float("nan") self.debugging_enabled = True def make_immutable(self, f): f = float(f) if math.isnan(f): # Always use the same NAN so it works properly in self.seen f = self.NAN return f def check_invariants(self, value): # We only handle positive floats because we encode the sign separately # anyway. assert not (value < 0) def left_is_better(self, left, right): lex1 = float_to_lex(left) lex2 = float_to_lex(right) return lex1 < lex2 def short_circuit(self): for g in [sys.float_info.max, float("inf"), float("nan")]: self.consider(g) # If we're stuck at a nasty float don't try to shrink it further. These if math.isinf(self.current) or math.isnan(self.current): return True # If its too large to represent as an integer, bail out here. It's # better to try shrinking it in the main representation. return self.current >= MAX_PRECISE_INTEGER def run_step(self): # We check for a bunch of standard "large" floats. If we're currently # worse than them and the shrink downwards doesn't help, abort early # because there's not much useful we can do here. # Finally we get to the important bit: Each of these is a small change # to the floating point number that corresponds to a large change in # the lexical representation. Trying these ensures that our floating # point shrink can always move past these obstacles. In particular it # ensures we can always move to integer boundaries and shrink past a # change that would require shifting the exponent while not changing # the float value much. for g in [math.floor(self.current), math.ceil(self.current)]: self.consider(g) if self.consider(int(self.current)): self.debug("Just an integer now") self.delegate(Integer, convert_to=int, convert_from=float) return m, n = self.current.as_integer_ratio() i, r = divmod(m, n) # Now try to minimize the top part of the fraction as an integer. This # basically splits the float as k + x with 0 <= x < 1 and minimizes # k as an integer, but without the precision issues that would have. self.call_shrinker(Integer, i, lambda k: self.consider((i * n + r) / n)) integer.py000066400000000000000000000046741354103617500361460ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture/shrinking# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.internal.compat import hrange from hypothesis.internal.conjecture.shrinking.common import Shrinker, find_integer """ This module implements a shrinker for non-negative integers. """ class Integer(Shrinker): """Attempts to find a smaller integer. Guaranteed things to try ``0``, ``1``, ``initial - 1``, ``initial - 2``. Plenty of optimisations beyond that but those are the guaranteed ones. """ def short_circuit(self): for i in hrange(2): if self.consider(i): return True self.mask_high_bits() if self.size > 8: # see if we can squeeze the integer into a single byte. self.consider(self.current >> (self.size - 8)) self.consider(self.current & 0xFF) return self.current == 2 def check_invariants(self, value): assert value >= 0 def left_is_better(self, left, right): return left < right def run_step(self): self.shift_right() self.shrink_by_multiples(2) self.shrink_by_multiples(1) def shift_right(self): base = self.current find_integer(lambda k: k <= self.size and self.consider(base >> k)) def mask_high_bits(self): base = self.current n = base.bit_length() @find_integer def try_mask(k): if k >= n: return False mask = (1 << (n - k)) - 1 return self.consider(mask & base) @property def size(self): return self.current.bit_length() def shrink_by_multiples(self, k): base = self.current @find_integer def shrunk(n): attempt = base - n * k return attempt >= 0 and self.consider(attempt) return shrunk > 0 lexical.py000066400000000000000000000041431354103617500361210ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture/shrinking# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.internal.compat import hbytes, int_from_bytes, int_to_bytes from hypothesis.internal.conjecture.shrinking.common import Shrinker from hypothesis.internal.conjecture.shrinking.integer import Integer from hypothesis.internal.conjecture.shrinking.ordering import Ordering """ This module implements a lexicographic minimizer for blocks of bytes. """ class Lexical(Shrinker): def make_immutable(self, value): return hbytes(value) @property def size(self): return len(self.current) def check_invariants(self, value): assert len(value) == self.size def left_is_better(self, left, right): return left < right def incorporate_int(self, i): return self.incorporate(int_to_bytes(i, self.size)) @property def current_int(self): return int_from_bytes(self.current) def minimize_as_integer(self, full=False): Integer.shrink( self.current_int, lambda c: c == self.current_int or self.incorporate_int(c), random=self.random, full=full, ) def partial_sort(self): Ordering.shrink(self.current, self.consider, random=self.random) def short_circuit(self): """This is just an assemblage of other shrinkers, so we rely on their short circuiting.""" return False def run_step(self): self.minimize_as_integer() self.partial_sort() ordering.py000066400000000000000000000067671354103617500363270ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture/shrinking# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.internal.compat import hrange from hypothesis.internal.conjecture.shrinking.common import Shrinker, find_integer def identity(v): return v class Ordering(Shrinker): """A shrinker that tries to make a sequence more sorted. Will not change the length or the contents, only tries to reorder the elements of the sequence. """ def setup(self, key=identity): self.key = key def make_immutable(self, value): return tuple(value) def short_circuit(self): # If we can flat out sort the target then there's nothing more to do. return self.consider(sorted(self.current, key=self.key)) def left_is_better(self, left, right): return tuple(map(self.key, left)) < tuple(map(self.key, right)) def check_invariants(self, value): assert len(value) == len(self.current) assert sorted(value) == sorted(self.current) def run_step(self): self.sort_regions() self.sort_regions_with_gaps() def sort_regions(self): """Guarantees that for each i we have tried to swap index i with index i + 1. This uses an adaptive algorithm that works by sorting contiguous regions starting from each element. """ i = 0 while i + 1 < len(self.current): prefix = list(self.current[:i]) k = find_integer( lambda k: i + k <= len(self.current) and self.consider( prefix + sorted(self.current[i : i + k], key=self.key) + list(self.current[i + k :]) ) ) i += k def sort_regions_with_gaps(self): """Guarantees that for each i we have tried to swap index i with index i + 2. This uses an adaptive algorithm that works by sorting contiguous regions centered on each element, where that element is treated as fixed and the elements around it are sorted.. """ for i in hrange(1, len(self.current) - 1): if self.current[i - 1] <= self.current[i] <= self.current[i + 1]: continue def can_sort(a, b): if a < 0 or b > len(self.current): return False assert a <= i < b split = i - a values = sorted(self.current[a:i] + self.current[i + 1 : b]) return self.consider( list(self.current[:a]) + values[:split] + [self.current[i]] + values[split:] + list(self.current[b:]) ) left = i right = i + 1 right += find_integer(lambda k: can_sort(left, right + k)) find_integer(lambda k: can_sort(left - k, right)) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/conjecture/utils.py000066400000000000000000000333231354103617500337250ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import enum import hashlib import heapq from collections import OrderedDict from fractions import Fraction from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import ( abc, bit_length, floor, hrange, int_from_bytes, qualname, str_to_bytes, ) from hypothesis.internal.floats import int_to_float LABEL_MASK = 2 ** 64 - 1 def calc_label_from_name(name): hashed = hashlib.sha384(str_to_bytes(name)).digest() return int_from_bytes(hashed[:8]) def calc_label_from_cls(cls): return calc_label_from_name(qualname(cls)) def combine_labels(*labels): label = 0 for l in labels: label = (label << 1) & LABEL_MASK label ^= l return label INTEGER_RANGE_DRAW_LABEL = calc_label_from_name("another draw in integer_range()") BIASED_COIN_LABEL = calc_label_from_name("biased_coin()") SAMPLE_IN_SAMPLER_LABLE = calc_label_from_name("a sample() in Sampler") ONE_FROM_MANY_LABEL = calc_label_from_name("one more from many()") def integer_range(data, lower, upper, center=None): assert lower <= upper if lower == upper: # Write a value even when this is trival so that when a bound depends # on other values we don't suddenly disappear when the gap shrinks to # zero - if that happens then often the data stream becomes misaligned # and we fail to shrink in cases where we really should be able to. data.draw_bits(1, forced=0) return int(lower) if center is None: center = lower center = min(max(center, lower), upper) if center == upper: above = False elif center == lower: above = True else: above = boolean(data) if above: gap = upper - center else: gap = center - lower assert gap > 0 bits = bit_length(gap) probe = gap + 1 if bits > 24 and data.draw_bits(3): # For large ranges, we combine the uniform random distribution from draw_bits # with the weighting scheme used by WideRangeIntStrategy with moderate chance. # Cutoff at 2 ** 24 so unicode choice is uniform but 32bit distribution is not. idx = Sampler([4.0, 8.0, 1.0, 1.0, 0.5]).sample(data) sizes = [8, 16, 32, 64, 128] bits = min(bits, sizes[idx]) while probe > gap: data.start_example(INTEGER_RANGE_DRAW_LABEL) probe = data.draw_bits(bits) data.stop_example(discard=probe > gap) if above: result = center + probe else: result = center - probe assert lower <= result <= upper return int(result) try: from numpy import ndarray except ImportError: # pragma: no cover ndarray = () def check_sample(values, strategy_name): if isinstance(values, ndarray): if values.ndim != 1: raise InvalidArgument( ( "Only one-dimensional arrays are supported for sampling, " "and the given value has {ndim} dimensions (shape " "{shape}). This array would give samples of array slices " "instead of elements! Use np.ravel(values) to convert " "to a one-dimensional array, or tuple(values) if you " "want to sample slices." ).format(ndim=values.ndim, shape=values.shape) ) elif not isinstance(values, (OrderedDict, abc.Sequence, enum.EnumMeta)): raise InvalidArgument( "Cannot sample from {values}, not an ordered collection. " "Hypothesis goes to some length to ensure that the {strategy} " "strategy has stable results between runs. To replay a saved " "example, the sampled values must have the same iteration order " "on every run - ruling out sets, dicts, etc due to hash " "randomisation. Most cases can simply use `sorted(values)`, but " "mixed types or special values such as math.nan require careful " "handling - and note that when simplifying an example, " "Hypothesis treats earlier values as simpler.".format( values=repr(values), strategy=strategy_name ) ) return tuple(values) def choice(data, values): return values[integer_range(data, 0, len(values) - 1)] FLOAT_PREFIX = 0b1111111111 << 52 FULL_FLOAT = int_to_float(FLOAT_PREFIX | ((2 << 53) - 1)) - 1 def fractional_float(data): return (int_to_float(FLOAT_PREFIX | data.draw_bits(52)) - 1) / FULL_FLOAT def boolean(data): return bool(data.draw_bits(1)) def biased_coin(data, p): """Return False with probability p (assuming a uniform generator), shrinking towards False.""" data.start_example(BIASED_COIN_LABEL) while True: # The logic here is a bit complicated and special cased to make it # play better with the shrinker. # We imagine partitioning the real interval [0, 1] into 256 equal parts # and looking at each part and whether its interior is wholly <= p # or wholly >= p. At most one part can be neither. # We then pick a random part. If it's wholly on one side or the other # of p then we use that as the answer. If p is contained in the # interval then we start again with a new probability that is given # by the fraction of that interval that was <= our previous p. # We then take advantage of the fact that we have control of the # labelling to make this shrink better, using the following tricks: # If p is <= 0 or >= 1 the result of this coin is certain. We make sure # to write a byte to the data stream anyway so that these don't cause # difficulties when shrinking. if p <= 0: data.draw_bits(1, forced=0) result = False elif p >= 1: data.draw_bits(1, forced=1) result = True else: falsey = floor(256 * (1 - p)) truthy = floor(256 * p) remainder = 256 * p - truthy if falsey + truthy == 256: if isinstance(p, Fraction): m = p.numerator n = p.denominator else: m, n = p.as_integer_ratio() assert n & (n - 1) == 0, n # n is a power of 2 assert n > m > 0 truthy = m falsey = n - m bits = bit_length(n) - 1 partial = False else: bits = 8 partial = True i = data.draw_bits(bits) # We always label the region that causes us to repeat the loop as # 255 so that shrinking this byte never causes us to need to draw # more data. if partial and i == 255: p = remainder continue if falsey == 0: # Every other partition is truthy, so the result is true result = True elif truthy == 0: # Every other partition is falsey, so the result is false result = False elif i <= 1: # We special case so that zero is always false and 1 is always # true which makes shrinking easier because we can always # replace a truthy block with 1. This has the slightly weird # property that shrinking from 2 to 1 can cause the result to # grow, but the shrinker always tries 0 and 1 first anyway, so # this will usually be fine. result = bool(i) else: # Originally everything in the region 0 <= i < falsey was false # and everything above was true. We swapped one truthy element # into this region, so the region becomes 0 <= i <= falsey # except for i = 1. We know i > 1 here, so the test for truth # becomes i > falsey. result = i > falsey break data.stop_example() return result class Sampler(object): """Sampler based on Vose's algorithm for the alias method. See http://www.keithschwarz.com/darts-dice-coins/ for a good explanation. The general idea is that we store a table of triples (base, alternate, p). base. We then pick a triple uniformly at random, and choose its alternate value with probability p and else choose its base value. The triples are chosen so that the resulting mixture has the right distribution. We maintain the following invariants to try to produce good shrinks: 1. The table is in lexicographic (base, alternate) order, so that choosing an earlier value in the list always lowers (or at least leaves unchanged) the value. 2. base[i] < alternate[i], so that shrinking the draw always results in shrinking the chosen element. """ def __init__(self, weights): n = len(weights) self.table = [[i, None, None] for i in hrange(n)] total = sum(weights) num_type = type(total) zero = num_type(0) one = num_type(1) small = [] large = [] probabilities = [w / total for w in weights] scaled_probabilities = [] for i, p in enumerate(probabilities): scaled = p * n scaled_probabilities.append(scaled) if scaled == 1: self.table[i][2] = zero elif scaled < 1: small.append(i) else: large.append(i) heapq.heapify(small) heapq.heapify(large) while small and large: lo = heapq.heappop(small) hi = heapq.heappop(large) assert lo != hi assert scaled_probabilities[hi] > one assert self.table[lo][1] is None self.table[lo][1] = hi self.table[lo][2] = one - scaled_probabilities[lo] scaled_probabilities[hi] = ( scaled_probabilities[hi] + scaled_probabilities[lo] ) - one if scaled_probabilities[hi] < 1: heapq.heappush(small, hi) elif scaled_probabilities[hi] == 1: self.table[hi][2] = zero else: heapq.heappush(large, hi) while large: self.table[large.pop()][2] = zero while small: self.table[small.pop()][2] = zero for entry in self.table: assert entry[2] is not None if entry[1] is None: entry[1] = entry[0] elif entry[1] < entry[0]: entry[0], entry[1] = entry[1], entry[0] entry[2] = one - entry[2] self.table.sort() def sample(self, data): data.start_example(SAMPLE_IN_SAMPLER_LABLE) i = integer_range(data, 0, len(self.table) - 1) base, alternate, alternate_chance = self.table[i] use_alternate = biased_coin(data, alternate_chance) data.stop_example() if use_alternate: return alternate else: return base class many(object): """Utility class for collections. Bundles up the logic we use for "should I keep drawing more values?" and handles starting and stopping examples in the right place. Intended usage is something like: elements = many(data, ...) while elements.more(): add_stuff_to_result() """ def __init__(self, data, min_size, max_size, average_size): assert 0 <= min_size <= average_size <= max_size self.min_size = min_size self.max_size = max_size self.data = data self.stopping_value = 1 - 1.0 / (1 + average_size) self.count = 0 self.rejections = 0 self.drawn = False self.force_stop = False self.rejected = False def more(self): """Should I draw another element to add to the collection?""" if self.drawn: self.data.stop_example(discard=self.rejected) self.drawn = True self.rejected = False self.data.start_example(ONE_FROM_MANY_LABEL) if self.min_size == self.max_size: should_continue = self.count < self.min_size elif self.force_stop: should_continue = False else: if self.count < self.min_size: p_continue = 1.0 elif self.count >= self.max_size: p_continue = 0.0 else: p_continue = self.stopping_value should_continue = biased_coin(self.data, p_continue) if should_continue: self.count += 1 return True else: self.data.stop_example() return False def reject(self): """Reject the last example (i.e. don't count it towards our budget of elements because it's not going to go in the final collection).""" assert self.count > 0 self.count -= 1 self.rejections += 1 self.rejected = True if self.rejections > 2 * self.count: if self.count < self.min_size: self.data.mark_invalid() else: self.force_stop = True hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/coverage.py000066400000000000000000000071301354103617500322140ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import json import os import sys from contextlib import contextmanager from hypothesis.internal.reflection import proxies if False: from typing import Set, Dict, Tuple # noqa """ This module implements a custom coverage system that records conditions and then validates that every condition has been seen to be both True and False during the execution of our tests. The only thing we use it for at present is our argument validation functions, where we assert that every validation function has been seen to both pass and fail in the course of testing. When not running with a magic environment variable set, this module disables itself and has essentially no overhead. """ pretty_file_name_cache = {} # type: Dict[str, str] def pretty_file_name(f): try: return pretty_file_name_cache[f] except KeyError: pass parts = f.split(os.path.sep) if "hypothesis" in parts: parts = parts[-parts[::-1].index("hypothesis") :] result = os.path.sep.join(parts) pretty_file_name_cache[f] = result return result IN_COVERAGE_TESTS = os.getenv("HYPOTHESIS_INTERNAL_COVERAGE") == "true" if IN_COVERAGE_TESTS: # By this point, "branch-check" should have already been deleted by the # tox config. We can't delete it here because of #1718. written = set() # type: Set[Tuple[str, bool]] def record_branch(name, value): key = (name, value) if key in written: return written.add(key) with open("branch-check", "a") as log: log.write(json.dumps({"name": name, "value": value}) + "\n") description_stack = [] @contextmanager def check_block(name, depth): # We add an extra two callers to the stack: One for the contextmanager # function, one for our actual caller, so we want to go two extra # stack frames up. caller = sys._getframe(depth + 2) local_description = "%s at %s:%d" % ( name, pretty_file_name(caller.f_code.co_filename), caller.f_lineno, ) try: description_stack.append(local_description) description = " in ".join(reversed(description_stack)) + " passed" yield record_branch(description, True) except BaseException: record_branch(description, False) raise finally: description_stack.pop() @contextmanager def check(name): with check_block(name, 2): yield def check_function(f): @proxies(f) def accept(*args, **kwargs): # depth of 2 because of the proxy function calling us. with check_block(f.__name__, 2): return f(*args, **kwargs) return accept else: def check_function(f): return f # Mypy bug: https://github.com/python/mypy/issues/4117 @contextmanager # type: ignore def check(name): yield hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/detection.py000066400000000000000000000016071354103617500324020ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from types import MethodType def is_hypothesis_test(test): if isinstance(test, MethodType): return is_hypothesis_test(test.__func__) return getattr(test, "is_hypothesis_test", False) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/entropy.py000066400000000000000000000070751354103617500321310ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import contextlib import random from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import integer_types RANDOMS_TO_MANAGE = [random] # type: list try: import numpy.random as npr except ImportError: pass else: class NumpyRandomWrapper(object): """A shim to remove those darn underscores.""" seed = npr.seed getstate = npr.get_state setstate = npr.set_state RANDOMS_TO_MANAGE.append(NumpyRandomWrapper) def register_random(r): # type: (random.Random) -> None """Register the given Random instance for management by Hypothesis. You can pass ``random.Random`` instances (or other objects with seed, getstate, and setstate methods) to ``register_random(r)`` to have their states seeded and restored in the same way as the global PRNGs from the ``random`` and ``numpy.random`` modules. All global PRNGs, from e.g. simulation or scheduling frameworks, should be registered to prevent flaky tests. Hypothesis will ensure that the PRNG state is consistent for all test runs, or reproducibly varied if you choose to use the :func:`~hypothesis.strategies.random_module` strategy. """ if not (hasattr(r, "seed") and hasattr(r, "getstate") and hasattr(r, "setstate")): raise InvalidArgument("r=%r does not have all the required methods" % (r,)) if r not in RANDOMS_TO_MANAGE: RANDOMS_TO_MANAGE.append(r) def get_seeder_and_restorer(seed=0): """Return a pair of functions which respectively seed all and restore the state of all registered PRNGs. This is used by the core engine via `deterministic_PRNG`, and by users via `register_random`. We support registration of additional random.Random instances (or other objects with seed, getstate, and setstate methods) to force determinism on simulation or scheduling frameworks which avoid using the global random state. See e.g. #1709. """ assert isinstance(seed, integer_types) and 0 <= seed < 2 ** 32 states = [] # type: list def seed_all(): assert not states for r in RANDOMS_TO_MANAGE: states.append(r.getstate()) r.seed(seed) def restore_all(): assert len(states) == len(RANDOMS_TO_MANAGE) for r, state in zip(RANDOMS_TO_MANAGE, states): r.setstate(state) del states[:] return seed_all, restore_all @contextlib.contextmanager def deterministic_PRNG(): """Context manager that handles random.seed without polluting global state. See issue #1255 and PR #1295 for details and motivation - in short, leaving the global pseudo-random number generator (PRNG) seeded is a very bad idea in principle, and breaks all kinds of independence assumptions in practice. """ seed_all, restore_all = get_seeder_and_restorer() seed_all() try: yield finally: restore_all() hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/escalation.py000066400000000000000000000064461354103617500325540ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import sys import traceback from inspect import getframeinfo import hypothesis from hypothesis.errors import ( DeadlineExceeded, HypothesisException, MultipleFailures, StopTest, UnsatisfiedAssumption, ) from hypothesis.internal.compat import binary_type, encoded_filepath, text_type if False: from typing import Dict # noqa def belongs_to(package): root = os.path.dirname(package.__file__) cache = {text_type: {}, binary_type: {}} def accept(filepath): ftype = type(filepath) try: return cache[ftype][filepath] except KeyError: pass new_filepath = encoded_filepath(filepath) result = os.path.abspath(new_filepath).startswith(root) cache[ftype][filepath] = result cache[type(new_filepath)][new_filepath] = result return result accept.__name__ = "is_%s_file" % (package.__name__,) return accept PREVENT_ESCALATION = os.getenv("HYPOTHESIS_DO_NOT_ESCALATE") == "true" FILE_CACHE = {} # type: Dict[bytes, bool] is_hypothesis_file = belongs_to(hypothesis) HYPOTHESIS_CONTROL_EXCEPTIONS = (DeadlineExceeded, StopTest, UnsatisfiedAssumption) def mark_for_escalation(e): if not isinstance(e, HYPOTHESIS_CONTROL_EXCEPTIONS): e.hypothesis_internal_always_escalate = True def escalate_hypothesis_internal_error(): if PREVENT_ESCALATION: return error_type, e, tb = sys.exc_info() if getattr(e, "hypothesis_internal_always_escalate", False): raise filepath = traceback.extract_tb(tb)[-1][0] if is_hypothesis_file(filepath) and not isinstance( e, (HypothesisException,) + HYPOTHESIS_CONTROL_EXCEPTIONS ): raise def get_trimmed_traceback(): """Return the current traceback, minus any frames added by Hypothesis.""" error_type, _, tb = sys.exc_info() # Avoid trimming the traceback if we're in verbose mode, or the error # was raised inside Hypothesis (and is not a MultipleFailures) if hypothesis.settings.default.verbosity >= hypothesis.Verbosity.debug or ( is_hypothesis_file(traceback.extract_tb(tb)[-1][0]) and not isinstance(error_type, MultipleFailures) ): return tb while tb is not None and ( # If the frame is from one of our files, it's been added by Hypothesis. is_hypothesis_file(getframeinfo(tb.tb_frame)[0]) # But our `@proxies` decorator overrides the source location, # so we check for an attribute it injects into the frame too. or tb.tb_frame.f_globals.get("__hypothesistracebackhide__") is True ): tb = tb.tb_next return tb hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/floats.py000066400000000000000000000100521354103617500317060ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import math from hypothesis.internal.compat import ( CAN_PACK_HALF_FLOAT, quiet_raise, struct_pack, struct_unpack, ) try: import numpy except (ImportError, TypeError): # pragma: no cover # We catch TypeError because that can be raised if Numpy is installed on # PyPy for Python 2.7; and we only need a workaround until 2020-01-01. numpy = None # Format codes for (int, float) sized types, used for byte-wise casts. # See https://docs.python.org/3/library/struct.html#format-characters STRUCT_FORMATS = { 16: (b"!H", b"!e"), # Note: 'e' is new in Python 3.6, so we have helpers 32: (b"!I", b"!f"), 64: (b"!Q", b"!d"), } # There are two versions of this: the one that uses Numpy to support Python # 3.5 and earlier, and the elegant one for new versions. We use the new # one if Numpy is unavailable too, because it's slightly faster in all cases. if numpy and not CAN_PACK_HALF_FLOAT: # pragma: no cover def reinterpret_bits(x, from_, to): if from_ == b"!e": arr = numpy.array([x], dtype=">f2") if numpy.isfinite(x) and not numpy.isfinite(arr[0]): quiet_raise(OverflowError("%r too large for float16" % (x,))) buf = arr.tobytes() else: buf = struct_pack(from_, x) if to == b"!e": return float(numpy.frombuffer(buf, dtype=">f2")[0]) return struct_unpack(to, buf)[0] else: def reinterpret_bits(x, from_, to): return struct_unpack(to, struct_pack(from_, x))[0] def float_of(x, width): assert width in (16, 32, 64) if width == 64: return float(x) elif width == 32: return reinterpret_bits(float(x), b"!f", b"!f") else: return reinterpret_bits(float(x), b"!e", b"!e") def sign(x): try: return math.copysign(1.0, x) except TypeError: raise TypeError("Expected float but got %r of type %s" % (x, type(x).__name__)) def is_negative(x): return sign(x) < 0 def count_between_floats(x, y, width=64): assert x <= y if is_negative(x): if is_negative(y): return float_to_int(x, width) - float_to_int(y, width) + 1 else: return count_between_floats(x, -0.0, width) + count_between_floats( 0.0, y, width ) else: assert not is_negative(y) return float_to_int(y, width) - float_to_int(x, width) + 1 def float_to_int(value, width=64): fmt_int, fmt_flt = STRUCT_FORMATS[width] return reinterpret_bits(value, fmt_flt, fmt_int) def int_to_float(value, width=64): fmt_int, fmt_flt = STRUCT_FORMATS[width] return reinterpret_bits(value, fmt_int, fmt_flt) def next_up(value, width=64): """Return the first float larger than finite `val` - IEEE 754's `nextUp`. From https://stackoverflow.com/a/10426033, with thanks to Mark Dickinson. """ assert isinstance(value, float) if math.isnan(value) or (math.isinf(value) and value > 0): return value if value == 0.0 and is_negative(value): return 0.0 fmt_int, fmt_flt = STRUCT_FORMATS[width] # Note: n is signed; float_to_int returns unsigned fmt_int = fmt_int.lower() n = reinterpret_bits(value, fmt_flt, fmt_int) if n >= 0: n += 1 else: n -= 1 return reinterpret_bits(n, fmt_int, fmt_flt) def next_down(value, width=64): return -next_up(-value, width) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/healthcheck.py000066400000000000000000000025611354103617500326670ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.errors import FailedHealthCheck def fail_health_check(settings, message, label): # Tell pytest to omit the body of this function from tracebacks # https://docs.pytest.org/en/latest/example/simple.html#writing-well-integrated-assertion-helpers __tracebackhide__ = True if label in settings.suppress_health_check: return message += ( "\nSee https://hypothesis.readthedocs.io/en/latest/health" "checks.html for more information about this. " "If you want to disable just this health check, add %s " "to the suppress_health_check settings for this test." ) % (label,) raise FailedHealthCheck(message, label) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/intervalsets.py000066400000000000000000000050331354103617500331440ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function class IntervalSet(object): def __init__(self, intervals): self.intervals = tuple(intervals) self.offsets = [0] for u, v in self.intervals: self.offsets.append(self.offsets[-1] + v - u + 1) self.size = self.offsets.pop() def __len__(self): return self.size def __iter__(self): for u, v in self.intervals: for i in range(u, v + 1): yield i def __getitem__(self, i): if i < 0: i = self.size + i if i < 0 or i >= self.size: raise IndexError("Invalid index %d for [0, %d)" % (i, self.size)) # Want j = maximal such that offsets[j] <= i j = len(self.intervals) - 1 if self.offsets[j] > i: hi = j lo = 0 # Invariant: offsets[lo] <= i < offsets[hi] while lo + 1 < hi: mid = (lo + hi) // 2 if self.offsets[mid] <= i: lo = mid else: hi = mid j = lo t = i - self.offsets[j] u, v = self.intervals[j] r = u + t assert r <= v return r def __repr__(self): return "IntervalSet(%r)" % (self.intervals,) def index(self, value): for offset, (u, v) in zip(self.offsets, self.intervals): if u == value: return offset elif u > value: raise ValueError("%d is not in list" % (value,)) if value <= v: return offset + (value - u) raise ValueError("%d is not in list" % (value,)) def index_above(self, value): for offset, (u, v) in zip(self.offsets, self.intervals): if u >= value: return offset if value <= v: return offset + (value - u) return self.size hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/lazyformat.py000066400000000000000000000024601354103617500326120ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function class lazyformat(object): """A format string that isn't evaluated until it's needed.""" def __init__(self, format_string, *args): self.__format_string = format_string self.__args = args def __str__(self): return self.__format_string % self.__args def __eq__(self, other): return ( isinstance(other, lazyformat) and self.__format_string == other.__format_string and self.__args == other.__args ) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(self.__format_string) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/reflection.py000066400000000000000000000505151354103617500325600ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """This file can approximately be considered the collection of hypothesis going to really unreasonable lengths to produce pretty output.""" from __future__ import absolute_import, division, print_function import ast import hashlib import inspect import re import tokenize import types import uuid from functools import wraps from types import ModuleType from hypothesis.configuration import storage_directory from hypothesis.internal.compat import ( ARG_NAME_ATTRIBUTE, getfullargspec, hrange, isidentifier, qualname, str_to_bytes, to_str, to_unicode, update_code_location, ) from hypothesis.vendor.pretty import pretty def fully_qualified_name(f): """Returns a unique identifier for f pointing to the module it was defined on, and an containing functions.""" if f.__module__ is not None: return f.__module__ + "." + qualname(f) else: return qualname(f) def is_mock(obj): """Determine if the given argument is a mock type. We want to be able to detect these when dealing with various test args. As they are sneaky and can look like almost anything else, we'll check this by looking for random attributes. This is more robust than looking for types. """ for _ in range(10): if not hasattr(obj, str(uuid.uuid4())): return False return True def function_digest(function): """Returns a string that is stable across multiple invocations across multiple processes and is prone to changing significantly in response to minor changes to the function. No guarantee of uniqueness though it usually will be. """ hasher = hashlib.md5() try: hasher.update(to_unicode(inspect.getsource(function)).encode("utf-8")) # Different errors on different versions of python. What fun. except (OSError, IOError, TypeError): pass try: hasher.update(str_to_bytes(function.__name__)) except AttributeError: pass try: hasher.update(function.__module__.__name__.encode("utf-8")) except AttributeError: pass try: hasher.update(str_to_bytes(repr(getfullargspec(function)))) except TypeError: pass try: hasher.update(function._hypothesis_internal_add_digest) except AttributeError: pass return hasher.digest() def is_typed_named_tuple(cls): """Return True if cls is probably a subtype of `typing.NamedTuple`. Unfortunately types created with `class T(NamedTuple):` actually subclass `tuple` directly rather than NamedTuple. This is annoying, and means we just have to hope that nobody defines a different tuple subclass with similar attributes. """ return ( issubclass(cls, tuple) and hasattr(cls, "_fields") and hasattr(cls, "_field_types") ) def required_args(target, args=(), kwargs=()): """Return a set of names of required args to target that were not supplied in args or kwargs. This is used in builds() to determine which arguments to attempt to fill from type hints. target may be any callable (including classes and bound methods). args and kwargs should be as they are passed to builds() - that is, a tuple of values and a dict of names: values. """ # We start with a workaround for NamedTuples, which don't have nice inits if inspect.isclass(target) and is_typed_named_tuple(target): provided = set(kwargs) | set(target._fields[: len(args)]) return set(target._fields) - provided # Then we try to do the right thing with getfullargspec try: spec = getfullargspec( getattr(target, "__init__", target) if inspect.isclass(target) else target ) except TypeError: # pragma: no cover return None # self appears in the argspec of __init__ and bound methods, but it's an # error to explicitly supply it - so we might skip the first argument. skip_self = int(inspect.isclass(target) or inspect.ismethod(target)) # Start with the args that were not supplied and all kwonly arguments, # then remove all positional arguments with default values, and finally # remove kwonly defaults and any supplied keyword arguments return ( set(spec.args[skip_self + len(args) :] + spec.kwonlyargs) - set(spec.args[len(spec.args) - len(spec.defaults or ()) :]) - set(spec.kwonlydefaults or ()) - set(kwargs) ) def convert_keyword_arguments(function, args, kwargs): """Returns a pair of a tuple and a dictionary which would be equivalent passed as positional and keyword args to the function. Unless function has. **kwargs the dictionary will always be empty. """ argspec = getfullargspec(function) new_args = [] kwargs = dict(kwargs) defaults = dict(argspec.kwonlydefaults or {}) if argspec.defaults: for name, value in zip( argspec.args[-len(argspec.defaults) :], argspec.defaults ): defaults[name] = value n = max(len(args), len(argspec.args)) for i in hrange(n): if i < len(args): new_args.append(args[i]) else: arg_name = argspec.args[i] if arg_name in kwargs: new_args.append(kwargs.pop(arg_name)) elif arg_name in defaults: new_args.append(defaults[arg_name]) else: raise TypeError("No value provided for argument %r" % (arg_name)) if kwargs and not (argspec.varkw or argspec.kwonlyargs): if len(kwargs) > 1: raise TypeError( "%s() got unexpected keyword arguments %s" % (function.__name__, ", ".join(map(repr, kwargs))) ) else: bad_kwarg = next(iter(kwargs)) raise TypeError( "%s() got an unexpected keyword argument %r" % (function.__name__, bad_kwarg) ) return tuple(new_args), kwargs def convert_positional_arguments(function, args, kwargs): """Return a tuple (new_args, new_kwargs) where all possible arguments have been moved to kwargs. new_args will only be non-empty if function has a variadic argument. """ argspec = getfullargspec(function) new_kwargs = dict(argspec.kwonlydefaults or {}) new_kwargs.update(kwargs) if not argspec.varkw: for k in new_kwargs.keys(): if k not in argspec.args and k not in argspec.kwonlyargs: raise TypeError( "%s() got an unexpected keyword argument %r" % (function.__name__, k) ) if len(args) < len(argspec.args): for i in hrange(len(args), len(argspec.args) - len(argspec.defaults or ())): if argspec.args[i] not in kwargs: raise TypeError( "No value provided for argument %s" % (argspec.args[i],) ) for kw in argspec.kwonlyargs: if kw not in new_kwargs: raise TypeError("No value provided for argument %s" % kw) if len(args) > len(argspec.args) and not argspec.varargs: raise TypeError( "%s() takes at most %d positional arguments (%d given)" % (function.__name__, len(argspec.args), len(args)) ) for arg, name in zip(args, argspec.args): if name in new_kwargs: raise TypeError( "%s() got multiple values for keyword argument %r" % (function.__name__, name) ) else: new_kwargs[name] = arg return (tuple(args[len(argspec.args) :]), new_kwargs) def extract_all_lambdas(tree): lambdas = [] class Visitor(ast.NodeVisitor): def visit_Lambda(self, node): lambdas.append(node) Visitor().visit(tree) return lambdas def args_for_lambda_ast(l): return [getattr(n, ARG_NAME_ATTRIBUTE) for n in l.args.args] LINE_CONTINUATION = re.compile(r"\\\n") WHITESPACE = re.compile(r"\s+") PROBABLY_A_COMMENT = re.compile("""#[^'"]*$""") SPACE_FOLLOWS_OPEN_BRACKET = re.compile(r"\( ") SPACE_PRECEDES_CLOSE_BRACKET = re.compile(r" \)") def extract_lambda_source(f): """Extracts a single lambda expression from the string source. Returns a string indicating an unknown body if it gets confused in any way. This is not a good function and I am sorry for it. Forgive me my sins, oh lord """ argspec = getfullargspec(f) arg_strings = [] # In Python 2 you can have destructuring arguments to functions. This # results in an argspec with non-string values. I'm not very interested in # handling these properly, but it's important to not crash on them. bad_lambda = False for a in argspec.args: if isinstance(a, (tuple, list)): # pragma: no cover arg_strings.append("(%s)" % (", ".join(a),)) bad_lambda = True else: assert isinstance(a, str) arg_strings.append(a) if argspec.varargs: arg_strings.append("*" + argspec.varargs) elif argspec.kwonlyargs: arg_strings.append("*") for a in argspec.kwonlyargs or []: default = (argspec.kwonlydefaults or {}).get(a) if default: arg_strings.append("{}={}".format(a, default)) else: arg_strings.append(a) if arg_strings: if_confused = "lambda %s: " % (", ".join(arg_strings),) else: if_confused = "lambda: " if bad_lambda: # pragma: no cover return if_confused try: source = inspect.getsource(f) except IOError: return if_confused source = LINE_CONTINUATION.sub(" ", source) source = WHITESPACE.sub(" ", source) source = source.strip() assert "lambda" in source tree = None try: tree = ast.parse(source) except SyntaxError: for i in hrange(len(source) - 1, len("lambda"), -1): prefix = source[:i] if "lambda" not in prefix: break try: tree = ast.parse(prefix) source = prefix break except SyntaxError: continue if tree is None: if source.startswith("@"): # This will always eventually find a valid expression because # the decorator must be a valid Python function call, so will # eventually be syntactically valid and break out of the loop. Thus # this loop can never terminate normally, so a no branch pragma is # appropriate. for i in hrange(len(source) + 1): # pragma: no branch p = source[1:i] if "lambda" in p: try: tree = ast.parse(p) source = p break except SyntaxError: pass if tree is None: return if_confused all_lambdas = extract_all_lambdas(tree) aligned_lambdas = [l for l in all_lambdas if args_for_lambda_ast(l) == argspec.args] if len(aligned_lambdas) != 1: return if_confused lambda_ast = aligned_lambdas[0] assert lambda_ast.lineno == 1 # If the source code contains Unicode characters, the bytes of the original # file don't line up with the string indexes, and `col_offset` doesn't match # the string we're using. We need to convert the source code into bytes # before slicing. # # Under the hood, the inspect module is using `tokenize.detect_encoding` to # detect the encoding of the original source file. We'll use the same # approach to get the source code as bytes. # # See https://github.com/HypothesisWorks/hypothesis/issues/1700 for an # example of what happens if you don't correct for this. # # Note: if the code doesn't come from a file (but, for example, a doctest), # `getsourcefile` will return `None` and the `open()` call will fail with # an OSError. Or if `f` is a built-in function, in which case we get a # TypeError. In both cases, fall back to splitting the Unicode string. # It's not perfect, but it's the best we can do. # # Note 2: You can only detect the encoding with `tokenize.detect_encoding` # in Python 3.2 or later. But that's okay, because the only version that # affects for us is Python 2.7, and 2.7 doesn't support non-ASCII identifiers: # https://www.python.org/dev/peps/pep-3131/. In this case we'll get an # AttributeError. # try: with open(inspect.getsourcefile(f), "rb") as src_f: encoding, _ = tokenize.detect_encoding(src_f.readline) source_bytes = source.encode(encoding) source_bytes = source_bytes[lambda_ast.col_offset :].strip() source = source_bytes.decode(encoding) except (AttributeError, OSError, TypeError): source = source[lambda_ast.col_offset :].strip() # This ValueError can be thrown in Python 3 if: # # - There's a Unicode character in the line before the Lambda, and # - For some reason we can't detect the source encoding of the file # # because slicing on `lambda_ast.col_offset` will account for bytes, but # the slice will be on Unicode characters. # # In practice this seems relatively rare, so we just give up rather than # trying to recover. try: source = source[source.index("lambda") :] except ValueError: return if_confused for i in hrange(len(source), len("lambda"), -1): # pragma: no branch try: parsed = ast.parse(source[:i]) assert len(parsed.body) == 1 assert parsed.body if isinstance(parsed.body[0].value, ast.Lambda): source = source[:i] break except SyntaxError: pass lines = source.split("\n") lines = [PROBABLY_A_COMMENT.sub("", l) for l in lines] source = "\n".join(lines) source = WHITESPACE.sub(" ", source) source = SPACE_FOLLOWS_OPEN_BRACKET.sub("(", source) source = SPACE_PRECEDES_CLOSE_BRACKET.sub(")", source) source = source.strip() return source def get_pretty_function_description(f): if not hasattr(f, "__name__"): return repr(f) name = f.__name__ if name == "": return extract_lambda_source(f) elif isinstance(f, types.MethodType): self = f.__self__ if not (self is None or inspect.isclass(self)): return "%r.%s" % (self, name) return name def nicerepr(v): if inspect.isfunction(v): return get_pretty_function_description(v) elif isinstance(v, type): return v.__name__ else: return to_str(pretty(v)) def arg_string(f, args, kwargs, reorder=True): if reorder: args, kwargs = convert_positional_arguments(f, args, kwargs) argspec = getfullargspec(f) bits = [] for a in argspec.args: if a in kwargs: bits.append("%s=%s" % (a, nicerepr(kwargs.pop(a)))) if kwargs: for a in sorted(kwargs): bits.append("%s=%s" % (a, nicerepr(kwargs[a]))) return ", ".join([nicerepr(x) for x in args] + bits) def unbind_method(f): """Take something that might be a method or a function and return the underlying function.""" return getattr(f, "im_func", getattr(f, "__func__", f)) def check_valid_identifier(identifier): if not isidentifier(identifier): raise ValueError("%r is not a valid python identifier" % (identifier,)) def eval_directory(): return storage_directory("eval_source") eval_cache = {} # type: dict def source_exec_as_module(source): try: return eval_cache[source] except KeyError: pass result = ModuleType( "hypothesis_temporary_module_%s" % (hashlib.sha1(str_to_bytes(source)).hexdigest(),) ) assert isinstance(source, str) exec(source, result.__dict__) eval_cache[source] = result return result COPY_ARGSPEC_SCRIPT = """ from hypothesis.utils.conventions import not_set def accept(%(funcname)s): def %(name)s(%(argspec)s): return %(funcname)s(%(invocation)s) return %(name)s """.lstrip() def define_function_signature(name, docstring, argspec): """A decorator which sets the name, argspec and docstring of the function passed into it.""" check_valid_identifier(name) for a in argspec.args: check_valid_identifier(a) if argspec.varargs is not None: check_valid_identifier(argspec.varargs) if argspec.varkw is not None: check_valid_identifier(argspec.varkw) n_defaults = len(argspec.defaults or ()) if n_defaults: parts = [] for a in argspec.args[:-n_defaults]: parts.append(a) for a in argspec.args[-n_defaults:]: parts.append("%s=not_set" % (a,)) else: parts = list(argspec.args) used_names = list(argspec.args) + list(argspec.kwonlyargs) used_names.append(name) for a in argspec.kwonlyargs: check_valid_identifier(a) def accept(f): fargspec = getfullargspec(f) must_pass_as_kwargs = [] invocation_parts = [] for a in argspec.args: if a not in fargspec.args and not fargspec.varargs: must_pass_as_kwargs.append(a) else: invocation_parts.append(a) if argspec.varargs: used_names.append(argspec.varargs) parts.append("*" + argspec.varargs) invocation_parts.append("*" + argspec.varargs) elif argspec.kwonlyargs: parts.append("*") for k in must_pass_as_kwargs: invocation_parts.append("%(k)s=%(k)s" % {"k": k}) for k in argspec.kwonlyargs: invocation_parts.append("%(k)s=%(k)s" % {"k": k}) if k in (argspec.kwonlydefaults or []): parts.append("%(k)s=not_set" % {"k": k}) else: parts.append(k) if argspec.varkw: used_names.append(argspec.varkw) parts.append("**" + argspec.varkw) invocation_parts.append("**" + argspec.varkw) candidate_names = ["f"] + [ "f_%d" % (i,) for i in hrange(1, len(used_names) + 2) ] for funcname in candidate_names: # pragma: no branch if funcname not in used_names: break base_accept = source_exec_as_module( COPY_ARGSPEC_SCRIPT % { "name": name, "funcname": funcname, "argspec": ", ".join(parts), "invocation": ", ".join(invocation_parts), } ).accept result = base_accept(f) result.__doc__ = docstring result.__defaults__ = argspec.defaults if argspec.kwonlydefaults: result.__kwdefaults__ = argspec.kwonlydefaults if argspec.annotations: result.__annotations__ = argspec.annotations return result return accept def impersonate(target): """Decorator to update the attributes of a function so that to external introspectors it will appear to be the target function. Note that this updates the function in place, it doesn't return a new one. """ def accept(f): f.__code__ = update_code_location( f.__code__, target.__code__.co_filename, target.__code__.co_firstlineno ) f.__name__ = target.__name__ f.__module__ = target.__module__ f.__doc__ = target.__doc__ f.__globals__["__hypothesistracebackhide__"] = True return f return accept def proxies(target): def accept(proxy): return impersonate(target)( wraps(target)( define_function_signature( target.__name__.replace("", "_lambda_"), target.__doc__, getfullargspec(target), )(proxy) ) ) return accept hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/internal/validation.py000066400000000000000000000110711354103617500325520ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import decimal import math from numbers import Rational, Real from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import integer_types from hypothesis.internal.coverage import check_function @check_function def check_type(typ, arg, name=""): if name: name += "=" if not isinstance(arg, typ): if isinstance(typ, tuple) and len(typ) == 1: typ = typ[0] if isinstance(typ, tuple): typ_string = "one of %s" % (", ".join(t.__name__ for t in typ)) else: typ_string = typ.__name__ raise InvalidArgument( "Expected %s but got %s%r (type=%s)" % (typ_string, name, arg, type(arg).__name__) ) @check_function def check_valid_integer(value): """Checks that value is either unspecified, or a valid integer. Otherwise raises InvalidArgument. """ if value is None: return check_type(integer_types, value) @check_function def check_valid_bound(value, name): """Checks that value is either unspecified, or a valid interval bound. Otherwise raises InvalidArgument. """ if value is None or isinstance(value, integer_types + (Rational,)): return if not isinstance(value, (Real, decimal.Decimal)): raise InvalidArgument("%s=%r must be a real number." % (name, value)) if math.isnan(value): raise InvalidArgument(u"Invalid end point %s=%r" % (name, value)) @check_function def check_valid_magnitude(value, name): """Checks that value is either unspecified, or a non-negative valid interval bound. Otherwise raises InvalidArgument. """ check_valid_bound(value, name) if value is not None and value < 0: raise InvalidArgument("%s=%r must not be negative." % (name, value)) @check_function def try_convert(typ, value, name): if value is None: return None if isinstance(value, typ): return value try: return typ(value) except (TypeError, OverflowError, ValueError, ArithmeticError): raise InvalidArgument( "Cannot convert %s=%r of type %s to type %s" % (name, value, type(value).__name__, typ.__name__) ) @check_function def check_valid_size(value, name): """Checks that value is either unspecified, or a valid non-negative size expressed as an integer/float. Otherwise raises InvalidArgument. """ if value is None: if name == "min_size": from hypothesis._settings import note_deprecation note_deprecation( "min_size=None is deprecated; use min_size=0 instead.", since="2018-10-06", ) return if isinstance(value, float): if math.isnan(value): raise InvalidArgument(u"Invalid size %s=%r" % (name, value)) from hypothesis._settings import note_deprecation note_deprecation( "Float size are deprecated: " "%s should be an integer, got %r" % (name, value), since="2018-10-11", ) else: check_type(integer_types, value, name) if value < 0: raise InvalidArgument(u"Invalid size %s=%r < 0" % (name, value)) @check_function def check_valid_interval(lower_bound, upper_bound, lower_name, upper_name): """Checks that lower_bound and upper_bound are either unspecified, or they define a valid interval on the number line. Otherwise raises InvalidArgument. """ if lower_bound is None or upper_bound is None: return if upper_bound < lower_bound: raise InvalidArgument( "Cannot have %s=%r < %s=%r" % (upper_name, upper_bound, lower_name, lower_bound) ) @check_function def check_valid_sizes(min_size, max_size): check_valid_size(min_size, "min_size") check_valid_size(max_size, "max_size") check_valid_interval(min_size, max_size, "min_size", "max_size") hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/provisional.py000066400000000000000000000162021354103617500311520ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """This module contains various provisional APIs and strategies. It is intended for internal use, to ease code reuse, and is not stable. Point releases may move or break the contents at any time! Internet strategies should conform to :rfc:`3986` or the authoritative definitions it links to. If not, report the bug! """ # https://tools.ietf.org/html/rfc3696 from __future__ import absolute_import, division, print_function import os.path import string import hypothesis._strategies as st import hypothesis.internal.conjecture.utils as cu from hypothesis.errors import InvalidArgument from hypothesis.searchstrategy.strategies import SearchStrategy if False: from typing import Text # noqa from hypothesis.searchstrategy.strategies import SearchStrategy, Ex # noqa URL_SAFE_CHARACTERS = frozenset(string.ascii_letters + string.digits + "$-_.+!*'(),") # This file is sourced from http://data.iana.org/TLD/tlds-alpha-by-domain.txt # The file contains additional information about the date that it was last updated. with open( os.path.join(os.path.dirname(__file__), "vendor", "tlds-alpha-by-domain.txt") ) as tld_file: __header = next(tld_file) assert __header.startswith("#") TOP_LEVEL_DOMAINS = sorted((line.rstrip() for line in tld_file), key=len) TOP_LEVEL_DOMAINS.insert(0, "COM") class DomainNameStrategy(SearchStrategy): @staticmethod def clean_inputs(minimum, maximum, value, variable_name): if value is None: value = maximum elif not isinstance(value, int): raise InvalidArgument( "Expected integer but %s is a %s" % (variable_name, type(value).__name__) ) elif not minimum <= value <= maximum: raise InvalidArgument( "Invalid value %r < %s=%r < %r" % (minimum, variable_name, value, maximum) ) return value def __init__(self, max_length=None, max_element_length=None): """ A strategy for :rfc:`1035` fully qualified domain names. The upper limit for max_length is 255 in accordance with :rfc:`1035#section-2.3.4` The lower limit for max_length is 4, corresponding to a two letter domain with a single letter subdomain. The upper limit for max_element_length is 63 in accordance with :rfc:`1035#section-2.3.4` The lower limit for max_element_length is 1 in accordance with :rfc:`1035#section-2.3.4` """ # https://tools.ietf.org/html/rfc1035#section-2.3.4 max_length = self.clean_inputs(4, 255, max_length, "max_length") max_element_length = self.clean_inputs( 1, 63, max_element_length, "max_element_length" ) super(DomainNameStrategy, self).__init__() self.max_length = max_length self.max_element_length = max_element_length # These regular expressions are constructed to match the documented # information in https://tools.ietf.org/html/rfc1035#section-2.3.1 # which defines the allowed syntax of a subdomain string. if self.max_element_length == 1: self.label_regex = r"[a-zA-Z]" elif self.max_element_length == 2: self.label_regex = r"[a-zA-Z][a-zA-Z0-9]?" else: maximum_center_character_pattern_repetitions = self.max_element_length - 2 self.label_regex = r"[a-zA-Z]([a-zA-Z0-9\-]{0,%d}[a-zA-Z0-9])?" % ( maximum_center_character_pattern_repetitions, ) def do_draw(self, data): # 1 - Select a valid top-level domain (TLD) name # 2 - Check that the number of characters in our selected TLD won't # prevent us from generating at least a 1 character subdomain. # 3 - Randomize the TLD between upper and lower case characters. domain = data.draw( st.sampled_from(TOP_LEVEL_DOMAINS) .filter(lambda tld: len(tld) + 2 <= self.max_length) .flatmap( lambda tld: st.tuples( *[st.sampled_from([c.lower(), c.upper()]) for c in tld] ).map(u"".join) ) ) # The maximum possible number of subdomains is 126, # 1 character subdomain + 1 '.' character, * 126 = 252, # with a max of 255, that leaves 3 characters for a TLD. # Allowing any more subdomains would not leave enough # characters for even the shortest possible TLDs. elements = cu.many(data, min_size=1, average_size=1, max_size=126) while elements.more(): # Generate a new valid subdomain using the regex strategy. sub_domain = data.draw(st.from_regex(self.label_regex, fullmatch=True)) if len(domain) + len(sub_domain) >= self.max_length: data.stop_example(discard=True) break domain = sub_domain + "." + domain return domain @st.defines_strategy_with_reusable_values def domains( max_length=255, # type: int max_element_length=63, # type: int ): # type: (...) -> SearchStrategy[Text] """Generate :rfc:`1035` compliant fully qualified domain names.""" return DomainNameStrategy( max_length=max_length, max_element_length=max_element_length ) @st.defines_strategy_with_reusable_values def urls(): # type: () -> SearchStrategy[Text] """A strategy for :rfc:`3986`, generating http/https URLs.""" def url_encode(s): return "".join(c if c in URL_SAFE_CHARACTERS else "%%%02X" % ord(c) for c in s) schemes = st.sampled_from(["http", "https"]) ports = st.integers(min_value=0, max_value=2 ** 16 - 1).map(":{}".format) paths = st.lists(st.text(string.printable).map(url_encode)).map("/".join) return st.builds( u"{}://{}{}/{}".format, schemes, domains(), st.just(u"") | ports, paths ) @st.defines_strategy_with_reusable_values def ip4_addr_strings(): # type: () -> SearchStrategy[Text] """A strategy for IPv4 address strings. This consists of four strings representing integers [0..255], without zero-padding, joined by dots. """ return st.builds(u"{}.{}.{}.{}".format, *(4 * [st.integers(0, 255)])) @st.defines_strategy_with_reusable_values def ip6_addr_strings(): # type: () -> SearchStrategy[Text] """A strategy for IPv6 address strings. This consists of sixteen quads of hex digits (0000 .. FFFF), joined by colons. Values do not currently have zero-segments collapsed. """ part = st.integers(0, 2 ** 16 - 1).map(u"{:04x}".format) return st.tuples(*[part] * 8).map(lambda a: u":".join(a).upper()) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/py.typed000066400000000000000000000000001354103617500277170ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/reporting.py000066400000000000000000000035601354103617500306210ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import inspect from hypothesis._settings import Verbosity, settings from hypothesis.internal.compat import ( binary_type, escape_unicode_characters, print_unicode, ) from hypothesis.utils.dynamicvariables import DynamicVariable def silent(value): pass def default(value): try: print_unicode(value) except UnicodeEncodeError: print_unicode(escape_unicode_characters(value)) reporter = DynamicVariable(default) def current_reporter(): return reporter.value def with_reporter(new_reporter): return reporter.with_value(new_reporter) def current_verbosity(): return settings.default.verbosity def to_text(textish): if inspect.isfunction(textish): textish = textish() if isinstance(textish, binary_type): textish = textish.decode("utf-8") return textish def verbose_report(text): if current_verbosity() >= Verbosity.verbose: base_report(text) def debug_report(text): if current_verbosity() >= Verbosity.debug: base_report(text) def report(text): if current_verbosity() >= Verbosity.normal: base_report(text) def base_report(text): current_reporter()(to_text(text)) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/searchstrategy/000077500000000000000000000000001354103617500312625ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/searchstrategy/__init__.py000066400000000000000000000016121354103617500333730ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """Package defining SearchStrategy, which is the core type that Hypothesis uses to explore data.""" from __future__ import absolute_import, division, print_function from .strategies import SearchStrategy, check_strategy __all__ = ["SearchStrategy", "check_strategy"] hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/searchstrategy/attrs.py000066400000000000000000000153641354103617500330020ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from functools import reduce from itertools import chain import attr import hypothesis.strategies as st from hypothesis.errors import ResolutionFailed from hypothesis.internal.compat import get_type_hints, string_types from hypothesis.searchstrategy.types import is_a_type, type_sorting_key from hypothesis.utils.conventions import infer def from_attrs(target, args, kwargs, to_infer): """An internal version of builds(), specialised for Attrs classes.""" fields = attr.fields(target) kwargs = {k: v for k, v in kwargs.items() if v is not infer} for name in to_infer: kwargs[name] = from_attrs_attribute(getattr(fields, name), target) # We might make this strategy more efficient if we added a layer here that # retries drawing if validation fails, for improved composition. # The treatment of timezones in datetimes() provides a precedent. return st.tuples(st.tuples(*args), st.fixed_dictionaries(kwargs)).map( lambda value: target(*value[0], **value[1]) ) def from_attrs_attribute(attrib, target): """Infer a strategy from the metadata on an attr.Attribute object.""" # Try inferring from the default argument. Note that this will only help # the user passed `infer` to builds() for this attribute, but in that case # we use it as the minimal example. default = st.nothing() if isinstance(attrib.default, attr.Factory): if not getattr(attrib.default, "takes_self", False): # new in 17.1 default = st.builds(attrib.default.factory) elif attrib.default is not attr.NOTHING: default = st.just(attrib.default) # Try inferring None, exact values, or type from attrs provided validators. null = st.nothing() # updated to none() on seeing an OptionalValidator in_collections = [] # list of in_ validator collections to sample from validator_types = set() # type constraints to pass to types_to_strategy() if attrib.validator is not None: validator = attrib.validator if isinstance(validator, attr.validators._OptionalValidator): null = st.none() validator = validator.validator if isinstance(validator, attr.validators._AndValidator): vs = validator._validators else: vs = [validator] for v in vs: if isinstance(v, attr.validators._InValidator): if isinstance(v.options, string_types): in_collections.append(list(all_substrings(v.options))) else: in_collections.append(v.options) elif isinstance(v, attr.validators._InstanceOfValidator): validator_types.add(v.type) # This is the important line. We compose the final strategy from various # parts. The default value, if any, is the minimal shrink, followed by # None (again, if allowed). We then prefer to sample from values passed # to an in_ validator if available, but infer from a type otherwise. # Pick one because (sampled_from((1, 2)) | from_type(int)) would usually # fail validation by generating e.g. zero! if in_collections: sample = st.sampled_from(list(ordered_intersection(in_collections))) strat = default | null | sample else: strat = default | null | types_to_strategy(attrib, validator_types) # Better to give a meaningful error here than an opaque "could not draw" # when we try to get a value but have lost track of where this was created. if strat.is_empty: raise ResolutionFailed( "Cannot infer a strategy from the default, validator, type, or " "converter for attribute=%r of class=%r" % (attrib, target) ) return strat def types_to_strategy(attrib, types): """Find all the type metadata for this attribute, reconcile it, and infer a strategy from the mess.""" # If we know types from the validator(s), that's sufficient. if len(types) == 1: typ, = types if isinstance(typ, tuple): return st.one_of(*map(st.from_type, typ)) return st.from_type(typ) elif types: # We have a list of tuples of types, and want to find a type # (or tuple of types) that is a subclass of all of of them. type_tuples = [k if isinstance(k, tuple) else (k,) for k in types] # Flatten the list, filter types that would fail validation, and # sort so that ordering is stable between runs and shrinks well. allowed = [ t for t in set(sum(type_tuples, ())) if all(issubclass(t, tup) for tup in type_tuples) ] allowed.sort(key=type_sorting_key) return st.one_of([st.from_type(t) for t in allowed]) # Otherwise, try the `type` attribute as a fallback, and finally try # the type hints on a converter (desperate!) before giving up. if is_a_type(getattr(attrib, "type", None)): # The convoluted test is because variable annotations may be stored # in string form; attrs doesn't evaluate them and we don't handle them. # See PEP 526, PEP 563, and Hypothesis issue #1004 for details. return st.from_type(attrib.type) converter = getattr(attrib, "converter", None) if isinstance(converter, type): return st.from_type(converter) elif callable(converter): hints = get_type_hints(converter) if "return" in hints: return st.from_type(hints["return"]) return st.nothing() def ordered_intersection(in_): """Set union of n sequences, ordered for reproducibility across runs.""" intersection = reduce(set.intersection, in_, set(in_[0])) for x in chain.from_iterable(in_): if x in intersection: yield x intersection.remove(x) def all_substrings(s): """Generate all substrings of `s`, in order of length then occurrence. Includes the empty string (first), and any duplicates that are present. >>> list(all_substrings('010')) ['', '0', '1', '0', '01', '10', '010'] """ yield s[:0] for n, _ in enumerate(s): for i in range(len(s) - n): yield s[i : i + n + 1] hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/searchstrategy/collections.py000066400000000000000000000200651354103617500341550ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.internal.conjecture.utils as cu from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import OrderedDict from hypothesis.internal.conjecture.junkdrawer import LazySequenceCopy from hypothesis.internal.conjecture.utils import combine_labels from hypothesis.searchstrategy.strategies import ( MappedSearchStrategy, SearchStrategy, filter_not_satisfied, ) class TupleStrategy(SearchStrategy): """A strategy responsible for fixed length tuples based on heterogenous strategies for each of their elements.""" def __init__(self, strategies): SearchStrategy.__init__(self) self.element_strategies = tuple(strategies) def do_validate(self): for s in self.element_strategies: s.validate() def calc_label(self): return combine_labels( self.class_label, *[s.label for s in self.element_strategies] ) def __repr__(self): if len(self.element_strategies) == 1: tuple_string = "%s," % (repr(self.element_strategies[0]),) else: tuple_string = ", ".join(map(repr, self.element_strategies)) return "TupleStrategy((%s))" % (tuple_string,) def calc_has_reusable_values(self, recur): return all(recur(e) for e in self.element_strategies) def do_draw(self, data): return tuple(data.draw(e) for e in self.element_strategies) def calc_is_empty(self, recur): return any(recur(e) for e in self.element_strategies) class ListStrategy(SearchStrategy): """A strategy for lists which takes a strategy for its elements and the allowed lengths, and generates lists with the correct size and contents.""" def __init__(self, elements, min_size=0, max_size=float("inf")): SearchStrategy.__init__(self) self.min_size = min_size or 0 self.max_size = max_size if max_size is not None else float("inf") assert 0 <= self.min_size <= self.max_size self.average_size = min( max(self.min_size * 2, self.min_size + 5), 0.5 * (self.min_size + self.max_size), ) self.element_strategy = elements def calc_label(self): return combine_labels(self.class_label, self.element_strategy.label) def do_validate(self): self.element_strategy.validate() if self.is_empty: raise InvalidArgument( ( "Cannot create non-empty lists with elements drawn from " "strategy %r because it has no values." ) % (self.element_strategy,) ) if self.element_strategy.is_empty and 0 < self.max_size < float("inf"): raise InvalidArgument( "Cannot create a collection of max_size=%r, because no " "elements can be drawn from the element strategy %r" % (self.max_size, self.element_strategy) ) def calc_is_empty(self, recur): if self.min_size == 0: return False else: return recur(self.element_strategy) def do_draw(self, data): if self.element_strategy.is_empty: assert self.min_size == 0 return [] elements = cu.many( data, min_size=self.min_size, max_size=self.max_size, average_size=self.average_size, ) result = [] while elements.more(): result.append(data.draw(self.element_strategy)) return result def __repr__(self): return "%s(%r, min_size=%r, max_size=%r)" % ( self.__class__.__name__, self.element_strategy, self.min_size, self.max_size, ) class UniqueListStrategy(ListStrategy): def __init__(self, elements, min_size, max_size, keys): super(UniqueListStrategy, self).__init__(elements, min_size, max_size) self.keys = keys def do_draw(self, data): if self.element_strategy.is_empty: assert self.min_size == 0 return [] elements = cu.many( data, min_size=self.min_size, max_size=self.max_size, average_size=self.average_size, ) seen_sets = tuple(set() for _ in self.keys) result = [] # We construct a filtered strategy here rather than using a check-and-reject # approach because some strategies have special logic for generation under a # filter, and FilteredStrategy can consolidate multiple filters. filtered = self.element_strategy.filter( lambda val: all( key(val) not in seen for (key, seen) in zip(self.keys, seen_sets) ) ) while elements.more(): value = filtered.filtered_strategy.do_filtered_draw( data=data, filter_strategy=filtered ) if value is filter_not_satisfied: elements.reject() else: for key, seen in zip(self.keys, seen_sets): seen.add(key(value)) result.append(value) assert self.max_size >= len(result) >= self.min_size return result class UniqueSampledListStrategy(ListStrategy): def __init__(self, elements, min_size, max_size, keys): super(UniqueSampledListStrategy, self).__init__(elements, min_size, max_size) self.keys = keys def do_draw(self, data): should_draw = cu.many( data, min_size=self.min_size, max_size=self.max_size, average_size=self.average_size, ) seen_sets = tuple(set() for _ in self.keys) result = [] remaining = LazySequenceCopy(self.element_strategy.elements) while should_draw.more(): i = len(remaining) - 1 j = cu.integer_range(data, 0, i) if j != i: remaining[i], remaining[j] = remaining[j], remaining[i] value = remaining.pop() if all(key(value) not in seen for (key, seen) in zip(self.keys, seen_sets)): for key, seen in zip(self.keys, seen_sets): seen.add(key(value)) result.append(value) else: should_draw.reject() assert self.max_size >= len(result) >= self.min_size return result class FixedKeysDictStrategy(MappedSearchStrategy): """A strategy which produces dicts with a fixed set of keys, given a strategy for each of their equivalent values. e.g. {'foo' : some_int_strategy} would generate dicts with the single key 'foo' mapping to some integer. """ def __init__(self, strategy_dict): self.dict_type = type(strategy_dict) if isinstance(strategy_dict, OrderedDict): self.keys = tuple(strategy_dict.keys()) else: try: self.keys = tuple(sorted(strategy_dict.keys())) except TypeError: self.keys = tuple(sorted(strategy_dict.keys(), key=repr)) super(FixedKeysDictStrategy, self).__init__( strategy=TupleStrategy(strategy_dict[k] for k in self.keys) ) def calc_is_empty(self, recur): return recur(self.mapped_strategy) def __repr__(self): return "FixedKeysDictStrategy(%r, %r)" % (self.keys, self.mapped_strategy) def pack(self, value): return self.dict_type(zip(self.keys, value)) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/searchstrategy/datetime.py000066400000000000000000000102441354103617500334310ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import datetime as dt from calendar import monthrange from hypothesis.internal.conjecture import utils from hypothesis.searchstrategy.strategies import SearchStrategy __all__ = ["DateStrategy", "DatetimeStrategy", "TimedeltaStrategy"] def is_pytz_timezone(tz): if not isinstance(tz, dt.tzinfo): return False module = type(tz).__module__ return module == "pytz" or module.startswith("pytz.") class DatetimeStrategy(SearchStrategy): def __init__(self, min_value, max_value, timezones_strat): assert isinstance(min_value, dt.datetime) assert isinstance(max_value, dt.datetime) assert min_value.tzinfo is None assert max_value.tzinfo is None assert min_value <= max_value assert isinstance(timezones_strat, SearchStrategy) self.min_dt = min_value self.max_dt = max_value self.tz_strat = timezones_strat def do_draw(self, data): result = dict() cap_low, cap_high = True, True for name in ("year", "month", "day", "hour", "minute", "second", "microsecond"): low = getattr(self.min_dt if cap_low else dt.datetime.min, name) high = getattr(self.max_dt if cap_high else dt.datetime.max, name) if name == "day" and not cap_high: _, high = monthrange(**result) if name == "year": val = utils.integer_range(data, low, high, 2000) else: val = utils.integer_range(data, low, high) result[name] = val cap_low = cap_low and val == low cap_high = cap_high and val == high result = dt.datetime(**result) tz = data.draw(self.tz_strat) try: if is_pytz_timezone(tz): # Can't just construct; see http://pytz.sourceforge.net return tz.normalize(tz.localize(result)) return result.replace(tzinfo=tz) except (ValueError, OverflowError): msg = "Failed to draw a datetime between %r and %r with timezone from %r." data.note_event(msg % (self.min_dt, self.max_dt, self.tz_strat)) data.mark_invalid() class DateStrategy(SearchStrategy): def __init__(self, min_value, max_value): assert isinstance(min_value, dt.date) assert isinstance(max_value, dt.date) assert min_value < max_value self.min_value = min_value self.days_apart = (max_value - min_value).days self.center = (dt.date(2000, 1, 1) - min_value).days def do_draw(self, data): days = utils.integer_range(data, 0, self.days_apart, center=self.center) return self.min_value + dt.timedelta(days=days) class TimedeltaStrategy(SearchStrategy): def __init__(self, min_value, max_value): assert isinstance(min_value, dt.timedelta) assert isinstance(max_value, dt.timedelta) assert min_value < max_value self.min_value = min_value self.max_value = max_value def do_draw(self, data): result = dict() low_bound = True high_bound = True for name in ("days", "seconds", "microseconds"): low = getattr(self.min_value if low_bound else dt.timedelta.min, name) high = getattr(self.max_value if high_bound else dt.timedelta.max, name) val = utils.integer_range(data, low, high, 0) result[name] = val low_bound = low_bound and val == low high_bound = high_bound and val == high return dt.timedelta(**result) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/searchstrategy/deferred.py000066400000000000000000000072271354103617500334240ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import inspect from hypothesis.errors import InvalidArgument from hypothesis.internal.reflection import get_pretty_function_description from hypothesis.searchstrategy.strategies import SearchStrategy class DeferredStrategy(SearchStrategy): """A strategy which may be used before it is fully defined.""" def __init__(self, definition): SearchStrategy.__init__(self) self.__wrapped_strategy = None self.__in_repr = False self.__is_empty = None self.__definition = definition @property def wrapped_strategy(self): if self.__wrapped_strategy is None: if not inspect.isfunction(self.__definition): raise InvalidArgument( ( "Excepted a definition to be a function but got %r of type" " %s instead." ) % (self.__definition, type(self.__definition).__name__) ) result = self.__definition() if result is self: raise InvalidArgument("Cannot define a deferred strategy to be itself") if not isinstance(result, SearchStrategy): raise InvalidArgument( ( "Expected definition to return a SearchStrategy but " "returned %r of type %s" ) % (result, type(result).__name__) ) self.__wrapped_strategy = result del self.__definition return self.__wrapped_strategy @property def branches(self): return self.wrapped_strategy.branches @property def supports_find(self): return self.wrapped_strategy.supports_find def calc_label(self): """Deferred strategies don't have a calculated label, because we would end up having to calculate the fixed point of some hash function in order to calculate it when they recursively refer to themself! The label for the wrapped strategy will still appear because it will be passed to draw. """ # This is actually the same as the parent class implementation, but we # include it explicitly here in order to document that this is a # deliberate decision. return self.class_label def calc_is_empty(self, recur): return recur(self.wrapped_strategy) def calc_has_reusable_values(self, recur): return recur(self.wrapped_strategy) def __repr__(self): if self.__wrapped_strategy is not None: if self.__in_repr: return "(deferred@%r)" % (id(self),) try: self.__in_repr = True return repr(self.__wrapped_strategy) finally: self.__in_repr = False else: return "deferred(%s)" % (get_pretty_function_description(self.__definition)) def do_draw(self, data): return data.draw(self.wrapped_strategy) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/searchstrategy/flatmapped.py000066400000000000000000000035261354103617500337570ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.internal.reflection import get_pretty_function_description from hypothesis.internal.validation import check_type from hypothesis.searchstrategy.strategies import SearchStrategy class FlatMapStrategy(SearchStrategy): def __init__(self, strategy, expand): super(FlatMapStrategy, self).__init__() self.flatmapped_strategy = strategy self.expand = expand def calc_is_empty(self, recur): return recur(self.flatmapped_strategy) def __repr__(self): if not hasattr(self, u"_cached_repr"): self._cached_repr = u"%r.flatmap(%s)" % ( self.flatmapped_strategy, get_pretty_function_description(self.expand), ) return self._cached_repr def do_draw(self, data): source = data.draw(self.flatmapped_strategy) expanded_source = self.expand(source) check_type(SearchStrategy, expanded_source) return data.draw(expanded_source) @property def branches(self): return [ FlatMapStrategy(strategy=strategy, expand=self.expand) for strategy in self.flatmapped_strategy.branches ] hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/searchstrategy/functions.py000066400000000000000000000034711354103617500336510ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.control import note from hypothesis.errors import InvalidState from hypothesis.internal.reflection import arg_string, nicerepr, proxies from hypothesis.searchstrategy.strategies import SearchStrategy class FunctionStrategy(SearchStrategy): supports_find = False def __init__(self, like, returns): super(FunctionStrategy, self).__init__() self.like = like self.returns = returns def calc_is_empty(self, recur): return recur(self.returns) def do_draw(self, data): @proxies(self.like) def inner(*args, **kwargs): if data.frozen: raise InvalidState( "This generated %s function can only be called within the " "scope of the @given that created it." % (nicerepr(self.like),) ) data.can_reproduce_example_from_repr = False val = data.draw(self.returns) note( "Called function: %s(%s) -> %r" % (nicerepr(self.like), arg_string(self.like, args, kwargs), val) ) return val return inner hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/searchstrategy/lazy.py000066400000000000000000000123441354103617500326170ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.internal.compat import getfullargspec from hypothesis.internal.reflection import ( arg_string, convert_keyword_arguments, convert_positional_arguments, ) from hypothesis.searchstrategy.strategies import SearchStrategy if False: from typing import Dict # noqa unwrap_cache = {} # type: Dict[SearchStrategy, SearchStrategy] unwrap_depth = 0 def unwrap_strategies(s): global unwrap_depth if not isinstance(s, SearchStrategy): return s try: return unwrap_cache[s] except KeyError: pass unwrap_cache[s] = s try: unwrap_depth += 1 try: result = unwrap_strategies(s.wrapped_strategy) unwrap_cache[s] = result try: assert result.force_has_reusable_values == s.force_has_reusable_values except AttributeError: pass try: result.force_has_reusable_values = s.force_has_reusable_values except AttributeError: pass return result except AttributeError: return s finally: unwrap_depth -= 1 if unwrap_depth <= 0: unwrap_cache.clear() assert unwrap_depth >= 0 class LazyStrategy(SearchStrategy): """A strategy which is defined purely by conversion to and from another strategy. Its parameter and distribution come from that other strategy. """ def __init__(self, function, args, kwargs): SearchStrategy.__init__(self) self.__wrapped_strategy = None self.__representation = None self.function = function self.__args = args self.__kwargs = kwargs @property def supports_find(self): return self.wrapped_strategy.supports_find def calc_is_empty(self, recur): return recur(self.wrapped_strategy) def calc_has_reusable_values(self, recur): return recur(self.wrapped_strategy) def calc_is_cacheable(self, recur): for source in (self.__args, self.__kwargs.values()): for v in source: if isinstance(v, SearchStrategy) and not v.is_cacheable: return False return True @property def wrapped_strategy(self): if self.__wrapped_strategy is None: unwrapped_args = tuple(unwrap_strategies(s) for s in self.__args) unwrapped_kwargs = { k: unwrap_strategies(v) for k, v in self.__kwargs.items() } base = self.function(*self.__args, **self.__kwargs) if unwrapped_args == self.__args and unwrapped_kwargs == self.__kwargs: self.__wrapped_strategy = base else: self.__wrapped_strategy = self.function( *unwrapped_args, **unwrapped_kwargs ) return self.__wrapped_strategy def do_validate(self): w = self.wrapped_strategy assert isinstance(w, SearchStrategy), "%r returned non-strategy %r" % (self, w) w.validate() def __repr__(self): if self.__representation is None: _args = self.__args _kwargs = self.__kwargs argspec = getfullargspec(self.function) defaults = dict(argspec.kwonlydefaults or {}) if argspec.defaults is not None: for name, value in zip( reversed(argspec.args), reversed(argspec.defaults) ): defaults[name] = value if len(argspec.args) > 1 or argspec.defaults: _args, _kwargs = convert_positional_arguments( self.function, _args, _kwargs ) else: _args, _kwargs = convert_keyword_arguments( self.function, _args, _kwargs ) kwargs_for_repr = dict(_kwargs) for k, v in defaults.items(): if k in kwargs_for_repr and kwargs_for_repr[k] is defaults[k]: del kwargs_for_repr[k] self.__representation = "%s(%s)" % ( self.function.__name__, arg_string(self.function, _args, kwargs_for_repr, reorder=False), ) return self.__representation def do_draw(self, data): return data.draw(self.wrapped_strategy) def do_filtered_draw(self, data, filter_strategy): return self.wrapped_strategy.do_filtered_draw( data=data, filter_strategy=filter_strategy ) @property def label(self): return self.wrapped_strategy.label hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/searchstrategy/misc.py000066400000000000000000000126311354103617500325720ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.internal.conjecture.utils as d from hypothesis.internal.compat import bit_length, hrange from hypothesis.searchstrategy.strategies import SearchStrategy, filter_not_satisfied class BoolStrategy(SearchStrategy): """A strategy that produces Booleans with a Bernoulli conditional distribution.""" def __repr__(self): return "BoolStrategy()" def calc_has_reusable_values(self, recur): return True def do_draw(self, data): return d.boolean(data) def is_simple_data(value): try: hash(value) return True except TypeError: return False class JustStrategy(SearchStrategy): """A strategy which always returns a single fixed value.""" def __init__(self, value): SearchStrategy.__init__(self) self.value = value def __repr__(self): return "just(%r)" % (self.value,) def calc_has_reusable_values(self, recur): return True def calc_is_cacheable(self, recur): return is_simple_data(self.value) def do_draw(self, data): return self.value class SampledFromStrategy(SearchStrategy): """A strategy which samples from a set of elements. This is essentially equivalent to using a OneOfStrategy over Just strategies but may be more efficient and convenient. The conditional distribution chooses uniformly at random from some non-empty subset of the elements. """ def __init__(self, elements): SearchStrategy.__init__(self) self.elements = d.check_sample(elements, "sampled_from") assert self.elements def calc_has_reusable_values(self, recur): return True def calc_is_cacheable(self, recur): return is_simple_data(self.elements) def do_draw(self, data): return d.choice(data, self.elements) def do_filtered_draw(self, data, filter_strategy): # Set of indices that have been tried so far, so that we never test # the same element twice during a draw. known_bad_indices = set() def check_index(i): """Return ``True`` if the element at ``i`` satisfies the filter condition. """ if i in known_bad_indices: return False ok = filter_strategy.condition(self.elements[i]) if not ok: if not known_bad_indices: filter_strategy.note_retried(data) known_bad_indices.add(i) return ok # Start with ordinary rejection sampling. It's fast if it works, and # if it doesn't work then it was only a small amount of overhead. for _ in hrange(3): i = d.integer_range(data, 0, len(self.elements) - 1) if check_index(i): return self.elements[i] # If we've tried all the possible elements, give up now. max_good_indices = len(self.elements) - len(known_bad_indices) if not max_good_indices: return filter_not_satisfied # Figure out the bit-length of the index that we will write back after # choosing an allowed element. write_length = bit_length(len(self.elements)) # Impose an arbitrary cutoff to prevent us from wasting too much time # on very large element lists. cutoff = 10000 max_good_indices = min(max_good_indices, cutoff) # Before building the list of allowed indices, speculatively choose # one of them. We don't yet know how many allowed indices there will be, # so this choice might be out-of-bounds, but that's OK. speculative_index = d.integer_range(data, 0, max_good_indices - 1) # Calculate the indices of allowed values, so that we can choose one # of them at random. But if we encounter the speculatively-chosen one, # just use that and return immediately. allowed_indices = [] for i in hrange(min(len(self.elements), cutoff)): if check_index(i): allowed_indices.append(i) if len(allowed_indices) > speculative_index: # Early-exit case: We reached the speculative index, so # we just return the corresponding element. data.draw_bits(write_length, forced=i) return self.elements[i] # The speculative index didn't work out, but at this point we've built # the complete list of allowed indices, so we can just choose one of # them. if allowed_indices: i = d.choice(data, allowed_indices) data.draw_bits(write_length, forced=i) return self.elements[i] # If there are no allowed indices, the filter couldn't be satisfied. return filter_not_satisfied hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/searchstrategy/numbers.py000066400000000000000000000123531354103617500333130ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import math import hypothesis.internal.conjecture.floats as flt import hypothesis.internal.conjecture.utils as d from hypothesis.control import assume from hypothesis.internal.conjecture.utils import calc_label_from_name from hypothesis.internal.floats import sign from hypothesis.searchstrategy.strategies import SearchStrategy class WideRangeIntStrategy(SearchStrategy): distribution = d.Sampler([4.0, 8.0, 1.0, 1.0, 0.5]) sizes = [8, 16, 32, 64, 128] def __repr__(self): return "WideRangeIntStrategy()" def do_draw(self, data): size = self.sizes[self.distribution.sample(data)] r = data.draw_bits(size) sign = r & 1 r >>= 1 if sign: r = -r return int(r) class BoundedIntStrategy(SearchStrategy): """A strategy for providing integers in some interval with inclusive endpoints.""" def __init__(self, start, end): SearchStrategy.__init__(self) self.start = start self.end = end def __repr__(self): return "BoundedIntStrategy(%d, %d)" % (self.start, self.end) def do_draw(self, data): return d.integer_range(data, self.start, self.end) NASTY_FLOATS = sorted( [ 0.0, 0.5, 1.1, 1.5, 1.9, 1.0 / 3, 10e6, 10e-6, 1.175494351e-38, 2.2250738585072014e-308, 1.7976931348623157e308, 3.402823466e38, 9007199254740992, 1 - 10e-6, 2 + 10e-6, 1.192092896e-07, 2.2204460492503131e-016, ] + [float("inf"), float("nan")] * 5, key=flt.float_to_lex, ) NASTY_FLOATS = list(map(float, NASTY_FLOATS)) NASTY_FLOATS.extend([-x for x in NASTY_FLOATS]) FLOAT_STRATEGY_DO_DRAW_LABEL = calc_label_from_name( "getting another float in FloatStrategy" ) class FloatStrategy(SearchStrategy): """Generic superclass for strategies which produce floats.""" def __init__(self, allow_infinity, allow_nan): SearchStrategy.__init__(self) assert isinstance(allow_infinity, bool) assert isinstance(allow_nan, bool) self.allow_infinity = allow_infinity self.allow_nan = allow_nan self.nasty_floats = [f for f in NASTY_FLOATS if self.permitted(f)] weights = [0.2 * len(self.nasty_floats)] + [0.8] * len(self.nasty_floats) self.sampler = d.Sampler(weights) def __repr__(self): return "{}(allow_infinity={}, allow_nan={})".format( self.__class__.__name__, self.allow_infinity, self.allow_nan ) def permitted(self, f): assert isinstance(f, float) if not self.allow_infinity and math.isinf(f): return False if not self.allow_nan and math.isnan(f): return False return True def do_draw(self, data): while True: data.start_example(FLOAT_STRATEGY_DO_DRAW_LABEL) i = self.sampler.sample(data) if i == 0: result = flt.draw_float(data) else: result = self.nasty_floats[i - 1] flt.write_float(data, result) data.stop_example() if self.permitted(result): return result def float_order_key(k): return (sign(k), k) class FixedBoundedFloatStrategy(SearchStrategy): """A strategy for floats distributed between two endpoints. The conditional distribution tries to produce values clustered closer to one of the ends. """ def __init__(self, lower_bound, upper_bound): SearchStrategy.__init__(self) self.lower_bound = float(lower_bound) self.upper_bound = float(upper_bound) assert not math.isinf(self.upper_bound - self.lower_bound) lb = float_order_key(self.lower_bound) ub = float_order_key(self.upper_bound) self.critical = [z for z in (-0.0, 0.0) if lb <= float_order_key(z) <= ub] self.critical.append(self.lower_bound) self.critical.append(self.upper_bound) def __repr__(self): return "FixedBoundedFloatStrategy(%s, %s)" % ( self.lower_bound, self.upper_bound, ) def do_draw(self, data): f = self.lower_bound + ( self.upper_bound - self.lower_bound ) * d.fractional_float(data) assume(self.lower_bound <= f <= self.upper_bound) assume(sign(self.lower_bound) <= sign(f) <= sign(self.upper_bound)) # Special handling for bounds of -0.0 for g in [self.lower_bound, self.upper_bound]: if f == g: f = math.copysign(f, g) return f hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/searchstrategy/recursive.py000066400000000000000000000074371354103617500336560ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from contextlib import contextmanager from hypothesis.errors import InvalidArgument from hypothesis.internal.lazyformat import lazyformat from hypothesis.internal.reflection import get_pretty_function_description from hypothesis.searchstrategy.strategies import OneOfStrategy, SearchStrategy class LimitReached(BaseException): pass class LimitedStrategy(SearchStrategy): def __init__(self, strategy): super(LimitedStrategy, self).__init__() self.base_strategy = strategy self.marker = 0 self.currently_capped = False def do_validate(self): self.base_strategy.validate() def do_draw(self, data): assert self.currently_capped if self.marker <= 0: raise LimitReached() self.marker -= 1 return data.draw(self.base_strategy) @contextmanager def capped(self, max_templates): assert not self.currently_capped try: self.currently_capped = True self.marker = max_templates yield finally: self.currently_capped = False class RecursiveStrategy(SearchStrategy): def __init__(self, base, extend, max_leaves): self.max_leaves = max_leaves self.base = base self.limited_base = LimitedStrategy(base) self.extend = extend strategies = [self.limited_base, self.extend(self.limited_base)] while 2 ** len(strategies) <= max_leaves: strategies.append(extend(OneOfStrategy(tuple(strategies)))) self.strategy = OneOfStrategy(strategies) def __repr__(self): if not hasattr(self, "_cached_repr"): self._cached_repr = "recursive(%r, %s, max_leaves=%d)" % ( self.base, get_pretty_function_description(self.extend), self.max_leaves, ) return self._cached_repr def do_validate(self): if not isinstance(self.base, SearchStrategy): raise InvalidArgument( "Expected base to be SearchStrategy but got %r" % (self.base,) ) extended = self.extend(self.limited_base) if not isinstance(extended, SearchStrategy): raise InvalidArgument( "Expected extend(%r) to be a SearchStrategy but got %r" % (self.limited_base, extended) ) self.limited_base.validate() self.extend(self.limited_base).validate() def do_draw(self, data): count = 0 while True: try: with self.limited_base.capped(self.max_leaves): return data.draw(self.strategy) except LimitReached: # Workaround for possible coverage bug - this branch is definitely # covered but for some reason is showing up as not covered. if count == 0: # pragma: no branch data.note_event( lazyformat( "Draw for %r exceeded max_leaves and had to be retried", self, ) ) count += 1 hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/searchstrategy/regex.py000066400000000000000000000424051354103617500327530ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import operator import re import sre_constants as sre import sre_parse import sys import hypothesis.strategies as st from hypothesis import reject from hypothesis.internal.charmap import as_general_categories, categories from hypothesis.internal.compat import PY3, hrange, hunichr, int_to_byte, text_type HAS_SUBPATTERN_FLAGS = sys.version_info[:2] >= (3, 6) UNICODE_CATEGORIES = set(categories()) SPACE_CHARS = set(u" \t\n\r\f\v") UNICODE_SPACE_CHARS = SPACE_CHARS | set(u"\x1c\x1d\x1e\x1f\x85") UNICODE_DIGIT_CATEGORIES = {"Nd"} UNICODE_SPACE_CATEGORIES = set(as_general_categories("Z")) UNICODE_LETTER_CATEGORIES = set(as_general_categories("L")) UNICODE_WORD_CATEGORIES = set(as_general_categories(["L", "N"])) # This is verbose, but correct on all versions of Python BYTES_ALL = {int_to_byte(i) for i in range(256)} BYTES_DIGIT = {b for b in BYTES_ALL if re.match(b"\\d", b)} BYTES_SPACE = {b for b in BYTES_ALL if re.match(b"\\s", b)} BYTES_WORD = {b for b in BYTES_ALL if re.match(b"\\w", b)} BYTES_LOOKUP = { sre.CATEGORY_DIGIT: BYTES_DIGIT, sre.CATEGORY_SPACE: BYTES_SPACE, sre.CATEGORY_WORD: BYTES_WORD, sre.CATEGORY_NOT_DIGIT: BYTES_ALL - BYTES_DIGIT, sre.CATEGORY_NOT_SPACE: BYTES_ALL - BYTES_SPACE, sre.CATEGORY_NOT_WORD: BYTES_ALL - BYTES_WORD, } # On Python 2, these unicode chars are matched by \W, meaning 'not word', # but unicodedata.category(c) returns one of the word categories above. UNICODE_WEIRD_NONWORD_CHARS = set(u"\U00012432\U00012433\U00012456\U00012457") GROUP_CACHE_STRATEGY = st.shared(st.builds(dict), key="hypothesis.regex.group_cache") @st.composite def update_group(draw, group_name, strategy): cache = draw(GROUP_CACHE_STRATEGY) result = draw(strategy) cache[group_name] = result return result @st.composite def reuse_group(draw, group_name): cache = draw(GROUP_CACHE_STRATEGY) try: return cache[group_name] except KeyError: reject() @st.composite def group_conditional(draw, group_name, if_yes, if_no): cache = draw(GROUP_CACHE_STRATEGY) if group_name in cache: return draw(if_yes) else: return draw(if_no) @st.composite def clear_cache_after_draw(draw, base_strategy): cache = draw(GROUP_CACHE_STRATEGY) result = draw(base_strategy) cache.clear() return result class Context(object): __slots__ = ["flags"] def __init__(self, groups=None, flags=0): self.flags = flags class CharactersBuilder(object): """Helper object that allows to configure `characters` strategy with various unicode categories and characters. Also allows negation of configured set. :param negate: If True, configure :func:`hypothesis.strategies.characters` to match anything other than configured character set :param flags: Regex flags. They affect how and which characters are matched """ def __init__(self, negate=False, flags=0): self._categories = set() self._whitelist_chars = set() self._blacklist_chars = set() self._negate = negate self._ignorecase = flags & re.IGNORECASE self._unicode = not bool(flags & re.ASCII) if PY3 else bool(flags & re.UNICODE) self.code_to_char = hunichr @property def strategy(self): """Returns resulting strategy that generates configured char set.""" max_codepoint = None if self._unicode else 127 if self._negate: black_chars = self._blacklist_chars - self._whitelist_chars return st.characters( blacklist_categories=self._categories | {"Cc", "Cs"}, blacklist_characters=self._whitelist_chars, whitelist_characters=black_chars, max_codepoint=max_codepoint, ) white_chars = self._whitelist_chars - self._blacklist_chars return st.characters( whitelist_categories=self._categories, blacklist_characters=self._blacklist_chars, whitelist_characters=white_chars, max_codepoint=max_codepoint, ) def add_category(self, category): """Update unicode state to match sre_parse object ``category``.""" if category == sre.CATEGORY_DIGIT: self._categories |= UNICODE_DIGIT_CATEGORIES elif category == sre.CATEGORY_NOT_DIGIT: self._categories |= UNICODE_CATEGORIES - UNICODE_DIGIT_CATEGORIES elif category == sre.CATEGORY_SPACE: self._categories |= UNICODE_SPACE_CATEGORIES self._whitelist_chars |= ( UNICODE_SPACE_CHARS if self._unicode else SPACE_CHARS ) elif category == sre.CATEGORY_NOT_SPACE: self._categories |= UNICODE_CATEGORIES - UNICODE_SPACE_CATEGORIES self._blacklist_chars |= ( UNICODE_SPACE_CHARS if self._unicode else SPACE_CHARS ) elif category == sre.CATEGORY_WORD: self._categories |= UNICODE_WORD_CATEGORIES self._whitelist_chars.add(u"_") if self._unicode and not PY3: # pragma: no cover self._blacklist_chars |= UNICODE_WEIRD_NONWORD_CHARS elif category == sre.CATEGORY_NOT_WORD: self._categories |= UNICODE_CATEGORIES - UNICODE_WORD_CATEGORIES self._blacklist_chars.add(u"_") if self._unicode and not PY3: # pragma: no cover self._whitelist_chars |= UNICODE_WEIRD_NONWORD_CHARS else: # pragma: no cover raise AssertionError("Unknown character category: %s" % category) def add_char(self, char): """Add given char to the whitelist.""" c = self.code_to_char(char) self._whitelist_chars.add(c) if ( self._ignorecase and re.match(re.escape(c), c.swapcase(), flags=re.IGNORECASE) is not None ): self._whitelist_chars.add(c.swapcase()) class BytesBuilder(CharactersBuilder): def __init__(self, negate=False, flags=0): self._whitelist_chars = set() self._blacklist_chars = set() self._negate = negate self._ignorecase = flags & re.IGNORECASE self.code_to_char = int_to_byte @property def strategy(self): """Returns resulting strategy that generates configured char set.""" allowed = self._whitelist_chars if self._negate: allowed = BYTES_ALL - allowed return st.sampled_from(sorted(allowed)) def add_category(self, category): """Update characters state to match sre_parse object ``category``.""" self._whitelist_chars |= BYTES_LOOKUP[category] @st.composite def maybe_pad(draw, regex, strategy, left_pad_strategy, right_pad_strategy): """Attempt to insert padding around the result of a regex draw while preserving the match.""" result = draw(strategy) left_pad = draw(left_pad_strategy) if left_pad and regex.search(left_pad + result): result = left_pad + result right_pad = draw(right_pad_strategy) if right_pad and regex.search(result + right_pad): result += right_pad return result def base_regex_strategy(regex, parsed=None): if parsed is None: parsed = sre_parse.parse(regex.pattern, flags=regex.flags) return clear_cache_after_draw( _strategy( parsed, Context(flags=regex.flags), isinstance(regex.pattern, text_type) ) ) def regex_strategy(regex, fullmatch): if not hasattr(regex, "pattern"): regex = re.compile(regex) is_unicode = isinstance(regex.pattern, text_type) parsed = sre_parse.parse(regex.pattern, flags=regex.flags) if not parsed: if is_unicode: return st.text() else: return st.binary() if is_unicode: base_padding_strategy = st.text() empty = st.just(u"") newline = st.just(u"\n") else: base_padding_strategy = st.binary() empty = st.just(b"") newline = st.just(b"\n") right_pad = base_padding_strategy left_pad = base_padding_strategy if fullmatch: right_pad = empty elif parsed[-1][0] == sre.AT: if parsed[-1][1] == sre.AT_END_STRING: right_pad = empty elif parsed[-1][1] == sre.AT_END: if regex.flags & re.MULTILINE: right_pad = st.one_of( empty, st.builds(operator.add, newline, right_pad) ) else: right_pad = st.one_of(empty, newline) if fullmatch: left_pad = empty elif parsed[0][0] == sre.AT: if parsed[0][1] == sre.AT_BEGINNING_STRING: left_pad = empty elif parsed[0][1] == sre.AT_BEGINNING: if regex.flags & re.MULTILINE: left_pad = st.one_of(empty, st.builds(operator.add, left_pad, newline)) else: left_pad = empty base = base_regex_strategy(regex, parsed).filter(regex.search) return maybe_pad(regex, base, left_pad, right_pad) def _strategy(codes, context, is_unicode): """Convert SRE regex parse tree to strategy that generates strings matching that regex represented by that parse tree. `codes` is either a list of SRE regex elements representations or a particular element representation. Each element is a tuple of element code (as string) and parameters. E.g. regex 'ab[0-9]+' compiles to following elements: [ (LITERAL, 97), (LITERAL, 98), (MAX_REPEAT, (1, 4294967295, [ (IN, [ (RANGE, (48, 57)) ]) ])) ] The function recursively traverses regex element tree and converts each element to strategy that generates strings that match that element. Context stores 1. List of groups (for backreferences) 2. Active regex flags (e.g. IGNORECASE, DOTALL, UNICODE, they affect behavior of various inner strategies) """ def recurse(codes): return _strategy(codes, context, is_unicode) if is_unicode: empty = u"" to_char = hunichr else: empty = b"" to_char = int_to_byte binary_char = st.binary(min_size=1, max_size=1) if not isinstance(codes, tuple): # List of codes strategies = [] i = 0 while i < len(codes): if codes[i][0] == sre.LITERAL and not context.flags & re.IGNORECASE: # Merge subsequent "literals" into one `just()` strategy # that generates corresponding text if no IGNORECASE j = i + 1 while j < len(codes) and codes[j][0] == sre.LITERAL: j += 1 if i + 1 < j: strategies.append( st.just( empty.join( [to_char(charcode) for (_, charcode) in codes[i:j]] ) ) ) i = j continue strategies.append(recurse(codes[i])) i += 1 # We handle this separately at the top level, but some regex can # contain empty lists internally, so we need to handle this here too. if not strategies: return st.just(empty) if len(strategies) == 1: return strategies[0] return st.tuples(*strategies).map(empty.join) else: # Single code code, value = codes if code == sre.LITERAL: # Regex 'a' (single char) c = to_char(value) if ( context.flags & re.IGNORECASE and c != c.swapcase() and re.match(re.escape(c), c.swapcase(), re.IGNORECASE) is not None ): # We do the explicit check for swapped-case matching because # eg 'ß'.upper() == 'SS' and ignorecase doesn't match it. return st.sampled_from([c, c.swapcase()]) return st.just(c) elif code == sre.NOT_LITERAL: # Regex '[^a]' (negation of a single char) c = to_char(value) blacklist = set(c) if ( context.flags & re.IGNORECASE and re.match(re.escape(c), c.swapcase(), re.IGNORECASE) is not None ): blacklist |= set(c.swapcase()) if is_unicode: return st.characters(blacklist_characters=blacklist) else: return binary_char.filter(lambda c: c not in blacklist) elif code == sre.IN: # Regex '[abc0-9]' (set of characters) negate = value[0][0] == sre.NEGATE if is_unicode: builder = CharactersBuilder(negate, context.flags) else: builder = BytesBuilder(negate, context.flags) for charset_code, charset_value in value: if charset_code == sre.NEGATE: # Regex '[^...]' (negation) # handled by builder = CharactersBuilder(...) above pass elif charset_code == sre.LITERAL: # Regex '[a]' (single char) builder.add_char(charset_value) elif charset_code == sre.RANGE: # Regex '[a-z]' (char range) low, high = charset_value for char_code in hrange(low, high + 1): builder.add_char(char_code) elif charset_code == sre.CATEGORY: # Regex '[\w]' (char category) builder.add_category(charset_value) else: # pragma: no cover # Currently there are no known code points other than # handled here. This code is just future proofing raise AssertionError("Unknown charset code: %s" % charset_code) return builder.strategy elif code == sre.ANY: # Regex '.' (any char) if is_unicode: if context.flags & re.DOTALL: return st.characters() return st.characters(blacklist_characters=u"\n") else: if context.flags & re.DOTALL: return binary_char return binary_char.filter(lambda c: c != b"\n") elif code == sre.AT: # Regexes like '^...', '...$', '\bfoo', '\Bfoo' # An empty string (or newline) will match the token itself, but # we don't and can't check the position (eg '%' at the end) return st.just(empty) elif code == sre.SUBPATTERN: # Various groups: '(...)', '(:...)' or '(?P...)' old_flags = context.flags if HAS_SUBPATTERN_FLAGS: # pragma: no cover # This feature is available only in specific Python versions context.flags = (context.flags | value[1]) & ~value[2] strat = _strategy(value[-1], context, is_unicode) context.flags = old_flags if value[0]: strat = update_group(value[0], strat) return strat elif code == sre.GROUPREF: # Regex '\\1' or '(?P=name)' (group reference) return reuse_group(value) elif code == sre.ASSERT: # Regex '(?=...)' or '(?<=...)' (positive lookahead/lookbehind) return recurse(value[1]) elif code == sre.ASSERT_NOT: # Regex '(?!...)' or '(? 50: # pragma: no cover key = frozenset(mapping.items()) assert key not in seen, (key, name) seen.add(key) to_update = needs_update needs_update = set() for strat in to_update: new_value = getattr(strat, calculation)(recur2(strat)) if new_value != mapping[strat]: needs_update.update(listeners[strat]) mapping[strat] = new_value # We now have a complete and accurate calculation of the # property values for everything we have seen in the course of # running this calculation. We simultaneously update all of # them (not just the strategy we started out with). for k, v in mapping.items(): setattr(k, cache_key, v) return getattr(self, cache_key) accept.__name__ = name return property(accept) # Returns True if this strategy can never draw a value and will always # result in the data being marked invalid. # The fact that this returns False does not guarantee that a valid value # can be drawn - this is not intended to be perfect, and is primarily # intended to be an optimisation for some cases. is_empty = recursive_property("is_empty", True) # Returns True if values from this strategy can safely be reused without # this causing unexpected behaviour. has_reusable_values = recursive_property("has_reusable_values", True) # Whether this strategy is suitable for holding onto in a cache. is_cacheable = recursive_property("is_cacheable", True) def calc_is_cacheable(self, recur): return True def calc_is_empty(self, recur): # Note: It is correct and significant that the default return value # from calc_is_empty is False despite the default value for is_empty # being true. The reason for this is that strategies should be treated # as empty absent evidence to the contrary, but most basic strategies # are trivially non-empty and it would be annoying to have to override # this method to show that. return False def calc_has_reusable_values(self, recur): return False def example(self, random=not_set): # type: (UniqueIdentifier) -> Ex """Provide an example of the sort of value that this strategy generates. This is biased to be slightly simpler than is typical for values from this strategy, for clarity purposes. This method shouldn't be taken too seriously. It's here for interactive exploration of the API, not for any sort of real testing. This method is part of the public API. """ if random is not not_set: note_deprecation("The random argument does nothing", since="2019-07-08") context = _current_build_context.value if context is not None: if context.data is not None and context.data.depth > 0: raise HypothesisException( "Using example() inside a strategy definition is a bad " "idea. Instead consider using hypothesis.strategies.builds() " "or @hypothesis.strategies.composite to define your strategy." " See https://hypothesis.readthedocs.io/en/latest/data.html" "#hypothesis.strategies.builds or " "https://hypothesis.readthedocs.io/en/latest/data.html" "#composite-strategies for more details." ) else: raise HypothesisException( "Using example() inside a test function is a bad " "idea. Instead consider using hypothesis.strategies.data() " "to draw more examples during testing. See " "https://hypothesis.readthedocs.io/en/latest/data.html" "#drawing-interactively-in-tests for more details." ) from hypothesis.core import given # Note: this function has a weird name because it might appear in # tracebacks, and we want users to know that they can ignore it. @given(self) @settings( database=None, max_examples=10, deadline=None, verbosity=Verbosity.quiet, phases=(Phase.generate,), suppress_health_check=HealthCheck.all(), ) def example_generating_inner_function(ex): examples.append(ex) examples = [] # type: List[Ex] example_generating_inner_function() return random_choice(examples) def map(self, pack): # type: (Callable[[Ex], T]) -> SearchStrategy[T] """Returns a new strategy that generates values by generating a value from this strategy and then calling pack() on the result, giving that. This method is part of the public API. """ return MappedSearchStrategy(pack=pack, strategy=self) def flatmap(self, expand): # type: (Callable[[Ex], SearchStrategy[T]]) -> SearchStrategy[T] """Returns a new strategy that generates values by generating a value from this strategy, say x, then generating a value from strategy(expand(x)) This method is part of the public API. """ from hypothesis.searchstrategy.flatmapped import FlatMapStrategy return FlatMapStrategy(expand=expand, strategy=self) def filter(self, condition): # type: (Callable[[Ex], bool]) -> SearchStrategy[Ex] """Returns a new strategy that generates values from this strategy which satisfy the provided condition. Note that if the condition is too hard to satisfy this might result in your tests failing with Unsatisfiable. This method is part of the public API. """ return FilteredStrategy(conditions=(condition,), strategy=self) def do_filtered_draw(self, data, filter_strategy): # Hook for strategies that want to override the behaviour of # FilteredStrategy. Most strategies don't, so by default we delegate # straight back to the default filtered-draw implementation. return filter_strategy.default_do_filtered_draw(data) @property def branches(self): # type: () -> List[SearchStrategy[Ex]] return [self] def __or__(self, other): """Return a strategy which produces values by randomly drawing from one of this strategy or the other strategy. This method is part of the public API. """ if not isinstance(other, SearchStrategy): raise ValueError("Cannot | a SearchStrategy with %r" % (other,)) return one_of_strategies((self, other)) def validate(self): # type: () -> None """Throw an exception if the strategy is not valid. This can happen due to lazy construction """ if self.validate_called: return try: self.validate_called = True self.do_validate() self.is_empty self.has_reusable_values except Exception: self.validate_called = False raise LABELS = {} # type: dict @property def class_label(self): cls = self.__class__ try: return cls.LABELS[cls] except KeyError: pass result = calc_label_from_cls(cls) cls.LABELS[cls] = result return result @property def label(self): if self.__label is calculating: return 0 if self.__label is None: self.__label = calculating self.__label = self.calc_label() return self.__label def calc_label(self): return self.class_label def do_validate(self): pass def do_draw(self, data): # type: (ConjectureData) -> Ex raise NotImplementedError("%s.do_draw" % (type(self).__name__,)) def __init__(self): pass class OneOfStrategy(SearchStrategy): """Implements a union of strategies. Given a number of strategies this generates values which could have come from any of them. The conditional distribution draws uniformly at random from some non-empty subset of these strategies and then draws from the conditional distribution of that strategy. """ def __init__(self, strategies): SearchStrategy.__init__(self) strategies = tuple(strategies) self.original_strategies = list(strategies) self.__element_strategies = None self.__in_branches = False def calc_is_empty(self, recur): return all(recur(e) for e in self.original_strategies) def calc_has_reusable_values(self, recur): return all(recur(e) for e in self.original_strategies) def calc_is_cacheable(self, recur): return all(recur(e) for e in self.original_strategies) @property def element_strategies(self): if self.__element_strategies is None: strategies = [] for arg in self.original_strategies: check_strategy(arg) if not arg.is_empty: strategies.extend([s for s in arg.branches if not s.is_empty]) pruned = [] seen = set() for s in strategies: if s is self: continue if s in seen: continue seen.add(s) pruned.append(s) branch_labels = [] shift = bit_length(len(pruned)) for i, p in enumerate(pruned): branch_labels.append( (((self.label ^ p.label) << shift) + i) & LABEL_MASK ) self.__element_strategies = pruned self.__branch_labels = tuple(branch_labels) return self.__element_strategies @property def branch_labels(self): self.element_strategies assert len(self.__branch_labels) == len(self.element_strategies) return self.__branch_labels def calc_label(self): return combine_labels( self.class_label, *[p.label for p in self.original_strategies] ) def do_draw(self, data): # type: (ConjectureData) -> Ex n = len(self.element_strategies) assert n > 0 if n == 1: return data.draw(self.element_strategies[0]) i = cu.integer_range(data, 0, n - 1) return data.draw(self.element_strategies[i], label=self.branch_labels[i]) def __repr__(self): return "one_of(%s)" % ", ".join(map(repr, self.original_strategies)) def do_validate(self): for e in self.element_strategies: e.validate() @property def branches(self): if not self.__in_branches: try: self.__in_branches = True return self.element_strategies finally: self.__in_branches = False else: return [self] class MappedSearchStrategy(SearchStrategy): """A strategy which is defined purely by conversion to and from another strategy. Its parameter and distribution come from that other strategy. """ def __init__(self, strategy, pack=None): SearchStrategy.__init__(self) self.mapped_strategy = strategy if pack is not None: self.pack = pack def calc_is_empty(self, recur): return recur(self.mapped_strategy) def calc_is_cacheable(self, recur): return recur(self.mapped_strategy) def __repr__(self): if not hasattr(self, "_cached_repr"): self._cached_repr = "%r.map(%s)" % ( self.mapped_strategy, get_pretty_function_description(self.pack), ) return self._cached_repr def do_validate(self): self.mapped_strategy.validate() def pack(self, x): """Take a value produced by the underlying mapped_strategy and turn it into a value suitable for outputting from this strategy.""" raise NotImplementedError("%s.pack()" % (self.__class__.__name__)) def do_draw(self, data): # type: (ConjectureData) -> Ex for _ in range(3): i = data.index try: data.start_example(MAPPED_SEARCH_STRATEGY_DO_DRAW_LABEL) result = self.pack(data.draw(self.mapped_strategy)) data.stop_example() return result except UnsatisfiedAssumption: data.stop_example(discard=True) if data.index == i: raise raise UnsatisfiedAssumption() @property def branches(self): # type: () -> List[SearchStrategy[Ex]] return [ MappedSearchStrategy(pack=self.pack, strategy=strategy) for strategy in self.mapped_strategy.branches ] filter_not_satisfied = UniqueIdentifier("filter not satisfied") class FilteredStrategy(SearchStrategy): def __init__(self, strategy, conditions): super(FilteredStrategy, self).__init__() if isinstance(strategy, FilteredStrategy): # Flatten chained filters into a single filter with multiple # conditions. self.flat_conditions = strategy.flat_conditions + conditions self.filtered_strategy = strategy.filtered_strategy else: self.flat_conditions = conditions self.filtered_strategy = strategy assert self.flat_conditions assert isinstance(self.flat_conditions, tuple) assert not isinstance(self.filtered_strategy, FilteredStrategy) self.__condition = None def calc_is_empty(self, recur): return recur(self.filtered_strategy) def calc_is_cacheable(self, recur): return recur(self.filtered_strategy) def __repr__(self): if not hasattr(self, "_cached_repr"): self._cached_repr = "%r%s" % ( self.filtered_strategy, "".join( ".filter(%s)" % get_pretty_function_description(cond) for cond in self.flat_conditions ), ) return self._cached_repr def do_validate(self): self.filtered_strategy.validate() @property def condition(self): if self.__condition is None: assert self.flat_conditions if len(self.flat_conditions) == 1: # Avoid an extra indirection in the common case of only one # condition. self.__condition = self.flat_conditions[0] else: self.__condition = lambda x: all( cond(x) for cond in self.flat_conditions ) return self.__condition def do_draw(self, data): # type: (ConjectureData) -> Ex result = self.filtered_strategy.do_filtered_draw( data=data, filter_strategy=self ) if result is not filter_not_satisfied: return result data.note_event("Aborted test because unable to satisfy %r" % (self,)) data.mark_invalid() raise AssertionError("Unreachable, for Mypy") # pragma: no cover def note_retried(self, data): data.note_event(lazyformat("Retried draw from %r to satisfy filter", self)) def default_do_filtered_draw(self, data): for i in hrange(3): start_index = data.index value = data.draw(self.filtered_strategy) if self.condition(value): return value else: if i == 0: self.note_retried(data) # This is to guard against the case where we consume no data. # As long as we consume data, we'll eventually pass or raise. # But if we don't this could be an infinite loop. assume(data.index > start_index) return filter_not_satisfied @property def branches(self): # type: () -> List[SearchStrategy[Ex]] return [ FilteredStrategy(strategy=strategy, conditions=self.flat_conditions) for strategy in self.filtered_strategy.branches ] @check_function def check_strategy(arg, name=""): check_type(SearchStrategy, arg, name) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/searchstrategy/strings.py000066400000000000000000000074731354103617500333400ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.errors import InvalidArgument from hypothesis.internal import charmap from hypothesis.internal.compat import binary_type, hunichr from hypothesis.internal.conjecture.utils import integer_range from hypothesis.internal.intervalsets import IntervalSet from hypothesis.searchstrategy.strategies import MappedSearchStrategy, SearchStrategy class OneCharStringStrategy(SearchStrategy): """A strategy which generates single character strings of text type.""" def __init__( self, whitelist_categories=None, blacklist_categories=None, blacklist_characters=None, min_codepoint=None, max_codepoint=None, whitelist_characters=None, ): assert set(whitelist_categories or ()).issubset(charmap.categories()) assert set(blacklist_categories or ()).issubset(charmap.categories()) intervals = charmap.query( include_categories=whitelist_categories, exclude_categories=blacklist_categories, min_codepoint=min_codepoint, max_codepoint=max_codepoint, include_characters=whitelist_characters, exclude_characters=blacklist_characters, ) if not intervals: arguments = [ ("whitelist_categories", whitelist_categories), ("blacklist_categories", blacklist_categories), ("whitelist_characters", whitelist_characters), ("blacklist_characters", blacklist_characters), ("min_codepoint", min_codepoint), ("max_codepoint", max_codepoint), ] raise InvalidArgument( "No characters are allowed to be generated by this " "combination of arguments: " + ", ".join("%s=%r" % arg for arg in arguments if arg[1] is not None) ) self.intervals = IntervalSet(intervals) self.zero_point = self.intervals.index_above(ord("0")) def do_draw(self, data): i = integer_range(data, 0, len(self.intervals) - 1, center=self.zero_point) return hunichr(self.intervals[i]) class StringStrategy(MappedSearchStrategy): """A strategy for text strings, defined in terms of a strategy for lists of single character text strings.""" def __init__(self, list_of_one_char_strings_strategy): super(StringStrategy, self).__init__(strategy=list_of_one_char_strings_strategy) def __repr__(self): return "%r.map(u''.join)" % self.mapped_strategy def pack(self, ls): return u"".join(ls) class BinaryStringStrategy(MappedSearchStrategy): """A strategy for strings of bytes, defined in terms of a strategy for lists of bytes.""" def __repr__(self): return "%r.map(bytearray).map(%s)" % ( self.mapped_strategy, binary_type.__name__, ) def pack(self, x): assert isinstance(x, list), repr(x) ba = bytearray(x) return binary_type(ba) class FixedSizeBytes(SearchStrategy): def __init__(self, size): self.size = size def do_draw(self, data): return binary_type(data.draw_bytes(self.size)) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/searchstrategy/types.py000066400000000000000000000330311354103617500330000ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import collections import datetime import decimal import fractions import functools import inspect import io import numbers import sys import uuid import hypothesis.strategies as st from hypothesis.errors import InvalidArgument, ResolutionFailed from hypothesis.internal.compat import ( PY2, ForwardRef, abc, binary_type, text_type, typing_root_type, ) def type_sorting_key(t): """Minimise to None, then non-container types, then container types.""" if not is_a_type(t): raise InvalidArgument("thing=%s must be a type" % (t,)) if t is None or t is type(None): # noqa: E721 return (-1, repr(t)) if not isinstance(t, type): # pragma: no cover # Some generics in the typing module are not actually types in 3.7 return (2, repr(t)) return (int(issubclass(t, abc.Container)), repr(t)) def try_issubclass(thing, superclass): thing = getattr(thing, "__origin__", None) or thing superclass = getattr(superclass, "__origin__", None) or superclass try: return issubclass(thing, superclass) except (AttributeError, TypeError): # pragma: no cover # Some types can't be the subject or object of an instance or # subclass check under Python 3.5 return False def is_a_new_type(thing): # At runtime, `typing.NewType` returns an identity function rather # than an actual type, but we can check whether that thing matches. return ( hasattr(thing, "__supertype__") and hasattr(typing, "NewType") and inspect.isfunction(thing) and getattr(thing, "__module__", None) == "typing" ) def is_a_type(thing): """Return True if thing is a type or a generic type like thing.""" return ( isinstance(thing, type) or isinstance(thing, typing_root_type) or is_a_new_type(thing) ) def from_typing_type(thing): # We start with special-case support for Union and Tuple - the latter # isn't actually a generic type. Support for Callable may be added to # this section later. # We then explicitly error on non-Generic types, which don't carry enough # information to sensibly resolve to strategies at runtime. # Finally, we run a variation of the subclass lookup in st.from_type # among generic types in the lookup. import typing # Under 3.6 Union is handled directly in st.from_type, as the argument is # not an instance of `type`. However, under Python 3.5 Union *is* a type # and we have to handle it here, including failing if it has no parameters. if hasattr(thing, "__union_params__"): # pragma: no cover args = sorted(thing.__union_params__ or (), key=type_sorting_key) if not args: raise ResolutionFailed("Cannot resolve Union of no types.") return st.one_of([st.from_type(t) for t in args]) if getattr(thing, "__origin__", None) == tuple or isinstance( thing, getattr(typing, "TupleMeta", ()) ): elem_types = getattr(thing, "__tuple_params__", None) or () elem_types += getattr(thing, "__args__", None) or () if ( getattr(thing, "__tuple_use_ellipsis__", False) or len(elem_types) == 2 and elem_types[-1] is Ellipsis ): return st.lists(st.from_type(elem_types[0])).map(tuple) elif len(elem_types) == 1 and elem_types[0] == (): return st.tuples() # Empty tuple; see issue #1583 return st.tuples(*map(st.from_type, elem_types)) if isinstance(thing, typing.TypeVar): if getattr(thing, "__bound__", None) is not None: return st.from_type(thing.__bound__) if getattr(thing, "__constraints__", None): return st.shared( st.sampled_from(thing.__constraints__), key="typevar-with-constraint" ).flatmap(st.from_type) # Constraints may be None or () on various Python versions. return st.text() # An arbitrary type for the typevar # Now, confirm that we're dealing with a generic type as we expected if not isinstance(thing, typing_root_type): # pragma: no cover raise ResolutionFailed("Cannot resolve %s to a strategy" % (thing,)) # Parametrised generic types have their __origin__ attribute set to the # un-parametrised version, which we need to use in the subclass checks. # e.g.: typing.List[int].__origin__ == typing.List mapping = { k: v for k, v in _global_type_lookup.items() if isinstance(k, typing_root_type) and try_issubclass(k, thing) } if typing.Dict in mapping: # The subtype relationships between generic and concrete View types # are sometimes inconsistent under Python 3.5, so we pop them out to # preserve our invariant that all examples of from_type(T) are # instances of type T - and simplify the strategy for abstract types # such as Container for t in (typing.KeysView, typing.ValuesView, typing.ItemsView): mapping.pop(t, None) strategies = [ v if isinstance(v, st.SearchStrategy) else v(thing) for k, v in mapping.items() if sum(try_issubclass(k, T) for T in mapping) == 1 ] empty = ", ".join(repr(s) for s in strategies if s.is_empty) if empty or not strategies: # pragma: no cover raise ResolutionFailed( "Could not resolve %s to a strategy; consider using " "register_type_strategy" % (empty or thing,) ) return st.one_of(strategies) _global_type_lookup = { # Types with core Hypothesis strategies type(None): st.none(), bool: st.booleans(), int: st.integers(), float: st.floats(), complex: st.complex_numbers(), fractions.Fraction: st.fractions(), decimal.Decimal: st.decimals(), text_type: st.text(), binary_type: st.binary(), datetime.datetime: st.datetimes(), datetime.date: st.dates(), datetime.time: st.times(), datetime.timedelta: st.timedeltas(), uuid.UUID: st.uuids(), tuple: st.builds(tuple), list: st.builds(list), set: st.builds(set), frozenset: st.builds(frozenset), dict: st.builds(dict), type(lambda: None): st.functions(), # Built-in types type(Ellipsis): st.just(Ellipsis), type(NotImplemented): st.just(NotImplemented), bytearray: st.binary().map(bytearray), memoryview: st.binary().map(memoryview), numbers.Real: st.floats(), numbers.Rational: st.fractions(), numbers.Number: st.complex_numbers(), numbers.Integral: st.integers(), numbers.Complex: st.complex_numbers(), slice: st.builds( slice, st.none() | st.integers(), st.none() | st.integers(), st.none() | st.integers(), ) # Pull requests with more types welcome! } if PY2: # xrange's |stop - start| must fit in a C long int64_strat = st.integers(-sys.maxint // 2, sys.maxint // 2) # noqa _global_type_lookup.update( { int: st.integers().filter(lambda x: isinstance(x, int)), long: st.integers().map(long), # noqa xrange: st.integers(min_value=0, max_value=sys.maxint).map(xrange) # noqa | st.builds(xrange, int64_strat, int64_strat) # noqa | st.builds( xrange, int64_strat, int64_strat, int64_strat.filter(bool) # noqa ), } ) else: _global_type_lookup.update( { range: st.integers(min_value=0).map(range) | st.builds(range, st.integers(), st.integers()) | st.builds(range, st.integers(), st.integers(), st.integers().filter(bool)) } ) _global_type_lookup[type] = st.sampled_from( [type(None)] + sorted(_global_type_lookup, key=str) ) try: from hypothesis.extra.pytz import timezones _global_type_lookup[datetime.tzinfo] = timezones() except ImportError: # pragma: no cover pass try: # pragma: no cover import numpy as np from hypothesis.extra.numpy import ( arrays, array_shapes, scalar_dtypes, nested_dtypes, ) _global_type_lookup.update( { np.dtype: nested_dtypes(), np.ndarray: arrays(scalar_dtypes(), array_shapes(max_dims=2)), } ) except ImportError: # pragma: no cover pass try: import typing except ImportError: # pragma: no cover pass else: _global_type_lookup.update( { typing.ByteString: st.binary(), typing.io.BinaryIO: st.builds(io.BytesIO, st.binary()), typing.io.TextIO: st.builds(io.StringIO, st.text()), typing.Reversible: st.lists(st.integers()), typing.SupportsAbs: st.complex_numbers(), typing.SupportsComplex: st.complex_numbers(), typing.SupportsFloat: st.floats(), typing.SupportsInt: st.floats(), } ) try: # These aren't present in the typing module backport. _global_type_lookup[typing.SupportsBytes] = st.binary() _global_type_lookup[typing.SupportsRound] = st.complex_numbers() except AttributeError: # pragma: no cover pass def register(type_, fallback=None): if isinstance(type_, str): # Use the name of generic types which are not available on all # versions, and the function just won't be added to the registry type_ = getattr(typing, type_, None) if type_ is None: # pragma: no cover return lambda f: f def inner(func): if fallback is None: _global_type_lookup[type_] = func return func @functools.wraps(func) def really_inner(thing): if getattr(thing, "__args__", None) is None: return fallback return func(thing) _global_type_lookup[type_] = really_inner return really_inner return inner @register("Type") def resolve_Type(thing): if thing.__args__ is None: return st.just(type) args = (thing.__args__[0],) if getattr(args[0], "__origin__", None) is typing.Union: args = args[0].__args__ elif hasattr(args[0], "__union_params__"): # pragma: no cover args = args[0].__union_params__ if isinstance(ForwardRef, type): # pragma: no cover # Duplicate check from from_type here - only paying when needed. for a in args: if type(a) == ForwardRef: raise ResolutionFailed( "thing=%s cannot be resolved. Upgrading to " "python>=3.6 may fix this problem via improvements " "to the typing module." % (thing,) ) return st.sampled_from(sorted(args, key=type_sorting_key)) @register(typing.List, st.builds(list)) def resolve_List(thing): return st.lists(st.from_type(thing.__args__[0])) @register(typing.Set, st.builds(set)) def resolve_Set(thing): return st.sets(st.from_type(thing.__args__[0])) @register(typing.FrozenSet, st.builds(frozenset)) def resolve_FrozenSet(thing): return st.frozensets(st.from_type(thing.__args__[0])) @register(typing.Dict, st.builds(dict)) def resolve_Dict(thing): # If thing is a Collection instance, we need to fill in the values keys_vals = [st.from_type(t) for t in thing.__args__] * 2 return st.dictionaries(keys_vals[0], keys_vals[1]) @register("DefaultDict", st.builds(collections.defaultdict)) def resolve_DefaultDict(thing): return resolve_Dict(thing).map(lambda d: collections.defaultdict(None, d)) @register(typing.ItemsView, st.builds(dict).map(dict.items)) def resolve_ItemsView(thing): return resolve_Dict(thing).map(dict.items) @register(typing.KeysView, st.builds(dict).map(dict.keys)) def resolve_KeysView(thing): return st.dictionaries(st.from_type(thing.__args__[0]), st.none()).map( dict.keys ) @register(typing.ValuesView, st.builds(dict).map(dict.values)) def resolve_ValuesView(thing): return st.dictionaries(st.integers(), st.from_type(thing.__args__[0])).map( dict.values ) @register(typing.Iterator, st.iterables(st.nothing())) def resolve_Iterator(thing): return st.iterables(st.from_type(thing.__args__[0])) @register(typing.Callable, st.functions()) def resolve_Callable(thing): # Generated functions either accept no arguments, or arbitrary arguments. # This is looser than ideal, but anything tighter would generally break # use of keyword arguments and we'd rather not force positional-only. if not thing.__args__: # pragma: no cover # varies by minor version return st.functions() return st.functions( like=(lambda: None) if len(thing.__args__) == 1 else (lambda *a, **k: None), returns=st.from_type(thing.__args__[-1]), ) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/stateful.py000066400000000000000000000703401354103617500304370ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """This module provides support for a stateful style of testing, where tests attempt to find a sequence of operations that cause a breakage rather than just a single value. Notably, the set of steps available at any point may depend on the execution to date. """ from __future__ import absolute_import, division, print_function import inspect from copy import copy from unittest import TestCase import attr import hypothesis.internal.conjecture.utils as cu import hypothesis.strategies as st from hypothesis._settings import ( HealthCheck, Verbosity, note_deprecation, settings as Settings, ) from hypothesis.control import current_build_context from hypothesis.core import given from hypothesis.errors import HypothesisException, InvalidArgument, InvalidDefinition from hypothesis.internal.compat import quiet_raise, string_types from hypothesis.internal.reflection import function_digest, nicerepr, proxies from hypothesis.internal.validation import check_type from hypothesis.reporting import current_verbosity, report from hypothesis.searchstrategy.strategies import OneOfStrategy, SearchStrategy from hypothesis.vendor.pretty import CUnicodeIO, RepresentationPrinter STATE_MACHINE_RUN_LABEL = cu.calc_label_from_name("another state machine step") if False: from typing import Any, Dict, List, Text # noqa class TestCaseProperty(object): # pragma: no cover def __get__(self, obj, typ=None): if obj is not None: typ = type(obj) return typ._to_test_case() def __set__(self, obj, value): raise AttributeError(u"Cannot set TestCase") def __delete__(self, obj): raise AttributeError(u"Cannot delete TestCase") def run_state_machine_as_test(state_machine_factory, settings=None): """Run a state machine definition as a test, either silently doing nothing or printing a minimal breaking program and raising an exception. state_machine_factory is anything which returns an instance of GenericStateMachine when called with no arguments - it can be a class or a function. settings will be used to control the execution of the test. """ if settings is None: try: settings = state_machine_factory.TestCase.settings check_type(Settings, settings, "state_machine_factory.TestCase.settings") except AttributeError: settings = Settings(deadline=None, suppress_health_check=HealthCheck.all()) check_type(Settings, settings, "settings") @settings @given(st.data()) def run_state_machine(factory, data): machine = factory() if isinstance(machine, GenericStateMachine) and not isinstance( machine, RuleBasedStateMachine ): note_deprecation( "%s inherits from GenericStateMachine, which is deprecated. Use a " "RuleBasedStateMachine, or a test function with st.data(), instead." % (type(machine).__name__,), since="2019-05-29", ) else: check_type(RuleBasedStateMachine, machine, "state_machine_factory()") data.conjecture_data.hypothesis_runner = machine n_steps = settings.stateful_step_count should_continue = cu.many( data.conjecture_data, min_size=1, max_size=n_steps, average_size=n_steps ) print_steps = ( current_build_context().is_final or current_verbosity() >= Verbosity.debug ) try: if print_steps: machine.print_start() machine.check_invariants() while should_continue.more(): value = data.conjecture_data.draw(machine.steps()) if print_steps: machine.print_step(value) machine.execute_step(value) machine.check_invariants() finally: if print_steps: machine.print_end() machine.teardown() # Use a machine digest to identify stateful tests in the example database run_state_machine.hypothesis.inner_test._hypothesis_internal_add_digest = function_digest( state_machine_factory ) # Copy some attributes so @seed and @reproduce_failure "just work" run_state_machine._hypothesis_internal_use_seed = getattr( state_machine_factory, "_hypothesis_internal_use_seed", None ) run_state_machine._hypothesis_internal_use_reproduce_failure = getattr( state_machine_factory, "_hypothesis_internal_use_reproduce_failure", None ) run_state_machine(state_machine_factory) class GenericStateMachineMeta(type): def __init__(self, *args, **kwargs): super(GenericStateMachineMeta, self).__init__(*args, **kwargs) def __setattr__(self, name, value): if name == "settings" and isinstance(value, Settings): raise AttributeError( ( "Assigning {cls}.settings = {value} does nothing. Assign " "to {cls}.TestCase.settings, or use @{value} as a decorator " "on the {cls} class." ).format(cls=self.__name__, value=value) ) return type.__setattr__(self, name, value) class GenericStateMachine( GenericStateMachineMeta("GenericStateMachine", (object,), {}) # type: ignore ): """A GenericStateMachine is a deprecated approach to stateful testing. In earlier versions of Hypothesis, you would define ``steps``, ``execute_step``, ``teardown``, and ``check_invariants`` methods; and the engine would then run something like the following:: @given(st.data()) def test_the_stateful_thing(data): x = MyStatemachineSubclass() x.check_invariants() try: for _ in range(50): step = data.draw(x.steps()) x.execute_step(step) x.check_invariants() finally: x.teardown() We now recommend using rule-based stateful testing instead wherever possible. If your test is better expressed in the above format than as a rule-based state machine, we suggest "unrolling" your method definitions into a simple test function with the above control flow. """ def steps(self): """Return a SearchStrategy instance the defines the available next steps.""" raise NotImplementedError(u"%r.steps()" % (self,)) def execute_step(self, step): """Execute a step that has been previously drawn from self.steps()""" raise NotImplementedError(u"%r.execute_step()" % (self,)) def print_start(self): """Called right at the start of printing. By default does nothing. """ def print_end(self): """Called right at the end of printing. By default does nothing. """ def print_step(self, step): """Print a step to the current reporter. This is called right before a step is executed. """ self.step_count = getattr(self, u"step_count", 0) + 1 report(u"Step #%d: %s" % (self.step_count, nicerepr(step))) def teardown(self): """Called after a run has finished executing to clean up any necessary state. Does nothing by default. """ def check_invariants(self): """Called after initializing and after executing each step.""" _test_case_cache = {} # type: dict TestCase = TestCaseProperty() @classmethod def _to_test_case(state_machine_class): try: return state_machine_class._test_case_cache[state_machine_class] except KeyError: pass class StateMachineTestCase(TestCase): settings = Settings(deadline=None, suppress_health_check=HealthCheck.all()) # We define this outside of the class and assign it because you can't # assign attributes to instance method values in Python 2 def runTest(self): run_state_machine_as_test(state_machine_class) runTest.is_hypothesis_test = True StateMachineTestCase.runTest = runTest base_name = state_machine_class.__name__ StateMachineTestCase.__name__ = str(base_name + u".TestCase") StateMachineTestCase.__qualname__ = str( getattr(state_machine_class, u"__qualname__", base_name) + u".TestCase" ) state_machine_class._test_case_cache[state_machine_class] = StateMachineTestCase return StateMachineTestCase @attr.s() class Rule(object): targets = attr.ib() function = attr.ib() arguments = attr.ib() precondition = attr.ib() bundles = attr.ib(init=False) def __attrs_post_init__(self): arguments = {} bundles = [] for k, v in sorted(self.arguments.items()): assert not isinstance(v, BundleReferenceStrategy) if isinstance(v, Bundle): bundles.append(v) consume = isinstance(v, BundleConsumer) arguments[k] = BundleReferenceStrategy(v.name, consume) else: arguments[k] = v self.bundles = tuple(bundles) self.arguments_strategy = st.fixed_dictionaries(arguments) self_strategy = st.runner() class BundleReferenceStrategy(SearchStrategy): def __init__(self, name, consume=False): self.name = name self.consume = consume def do_draw(self, data): machine = data.draw(self_strategy) bundle = machine.bundle(self.name) if not bundle: data.mark_invalid() # Shrink towards the right rather than the left. This makes it easier # to delete data generated earlier, as when the error is towards the # end there can be a lot of hard to remove padding. position = cu.integer_range(data, 0, len(bundle) - 1, center=len(bundle)) if self.consume: return bundle.pop(position) else: return bundle[position] class Bundle(SearchStrategy): def __init__(self, name, consume=False): self.name = name self.__reference_strategy = BundleReferenceStrategy(name, consume) def do_draw(self, data): machine = data.draw(self_strategy) reference = data.draw(self.__reference_strategy) return machine.names_to_values[reference.name] class BundleConsumer(Bundle): def __init__(self, bundle): super(BundleConsumer, self).__init__(bundle.name, consume=True) def consumes(bundle): """When introducing a rule in a RuleBasedStateMachine, this function can be used to mark bundles from which each value used in a step with the given rule should be removed. This function returns a strategy object that can be manipulated and combined like any other. For example, a rule declared with ``@rule(value1=b1, value2=consumes(b2), value3=lists(consumes(b3)))`` will consume a value from Bundle ``b2`` and several values from Bundle ``b3`` to populate ``value2`` and ``value3`` each time it is executed. """ if not isinstance(bundle, Bundle): raise TypeError("Argument to be consumed must be a bundle.") return BundleConsumer(bundle) @attr.s() class MultipleResults(object): values = attr.ib() def multiple(*args): """This function can be used to pass multiple results to the target(s) of a rule. Just use ``return multiple(result1, result2, ...)`` in your rule. It is also possible to use ``return multiple()`` with no arguments in order to end a rule without passing any result. """ return MultipleResults(args) def _convert_targets(targets, target): """Single validator and convertor for target arguments.""" if target is not None: if targets: note_deprecation( "Passing both targets=%r and target=%r is redundant, and " "will become an error in a future version of Hypothesis. " "Pass targets=%r instead." % (targets, target, tuple(targets) + (target,)), since="2018-08-18", ) targets = tuple(targets) + (target,) converted_targets = [] for t in targets: if isinstance(t, string_types): note_deprecation( "Got %r as a target, but passing the name of a Bundle is " "deprecated - please pass the Bundle directly." % (t,), since="2018-08-18", ) elif not isinstance(t, Bundle): msg = ( "Got invalid target %r of type %r, but all targets must " "be either a Bundle or the name of a Bundle." ) if isinstance(t, OneOfStrategy): msg += ( "\nIt looks like you passed `one_of(a, b)` or `a | b` as " "a target. You should instead pass `targets=(a, b)` to " "add the return value of this rule to both the `a` and " "`b` bundles, or define a rule for each target if it " "should be added to exactly one." ) raise InvalidArgument(msg % (t, type(t))) while isinstance(t, Bundle): t = t.name converted_targets.append(t) return tuple(converted_targets) RULE_MARKER = u"hypothesis_stateful_rule" INITIALIZE_RULE_MARKER = u"hypothesis_stateful_initialize_rule" PRECONDITION_MARKER = u"hypothesis_stateful_precondition" INVARIANT_MARKER = u"hypothesis_stateful_invariant" def rule(targets=(), target=None, **kwargs): """Decorator for RuleBasedStateMachine. Any name present in target or targets will define where the end result of this function should go. If both are empty then the end result will be discarded. ``target`` must be a Bundle, or if the result should go to multiple bundles you can pass a tuple of them as the ``targets`` argument. It is invalid to use both arguments for a single rule. If the result should go to exactly one of several bundles, define a separate rule for each case. kwargs then define the arguments that will be passed to the function invocation. If their value is a Bundle, or if it is ``consumes(b)`` where ``b`` is a Bundle, then values that have previously been produced for that bundle will be provided. If ``consumes`` is used, the value will also be removed from the bundle. Any other kwargs should be strategies and values from them will be provided. """ converted_targets = _convert_targets(targets, target) def accept(f): existing_rule = getattr(f, RULE_MARKER, None) existing_initialize_rule = getattr(f, INITIALIZE_RULE_MARKER, None) if existing_rule is not None or existing_initialize_rule is not None: raise InvalidDefinition( "A function cannot be used for two distinct rules. ", Settings.default ) precondition = getattr(f, PRECONDITION_MARKER, None) rule = Rule( targets=converted_targets, arguments=kwargs, function=f, precondition=precondition, ) @proxies(f) def rule_wrapper(*args, **kwargs): return f(*args, **kwargs) setattr(rule_wrapper, RULE_MARKER, rule) return rule_wrapper return accept def initialize(targets=(), target=None, **kwargs): """Decorator for RuleBasedStateMachine. An initialize decorator behaves like a rule, but the decorated method is called at most once in a run. All initialize decorated methods will be called before any rule decorated methods, in an arbitrary order. """ converted_targets = _convert_targets(targets, target) def accept(f): existing_rule = getattr(f, RULE_MARKER, None) existing_initialize_rule = getattr(f, INITIALIZE_RULE_MARKER, None) if existing_rule is not None or existing_initialize_rule is not None: raise InvalidDefinition( "A function cannot be used for two distinct rules. ", Settings.default ) precondition = getattr(f, PRECONDITION_MARKER, None) if precondition: raise InvalidDefinition( "An initialization rule cannot have a precondition. ", Settings.default ) rule = Rule( targets=converted_targets, arguments=kwargs, function=f, precondition=precondition, ) @proxies(f) def rule_wrapper(*args, **kwargs): return f(*args, **kwargs) setattr(rule_wrapper, INITIALIZE_RULE_MARKER, rule) return rule_wrapper return accept @attr.s() class VarReference(object): name = attr.ib() def precondition(precond): """Decorator to apply a precondition for rules in a RuleBasedStateMachine. Specifies a precondition for a rule to be considered as a valid step in the state machine. The given function will be called with the instance of RuleBasedStateMachine and should return True or False. Usually it will need to look at attributes on that instance. For example:: class MyTestMachine(RuleBasedStateMachine): state = 1 @precondition(lambda self: self.state != 0) @rule(numerator=integers()) def divide_with(self, numerator): self.state = numerator / self.state This is better than using assume in your rule since more valid rules should be able to be run. """ def decorator(f): @proxies(f) def precondition_wrapper(*args, **kwargs): return f(*args, **kwargs) existing_initialize_rule = getattr(f, INITIALIZE_RULE_MARKER, None) if existing_initialize_rule is not None: raise InvalidDefinition( "An initialization rule cannot have a precondition. ", Settings.default ) rule = getattr(f, RULE_MARKER, None) if rule is None: setattr(precondition_wrapper, PRECONDITION_MARKER, precond) else: new_rule = Rule( targets=rule.targets, arguments=rule.arguments, function=rule.function, precondition=precond, ) setattr(precondition_wrapper, RULE_MARKER, new_rule) invariant = getattr(f, INVARIANT_MARKER, None) if invariant is not None: new_invariant = Invariant(function=invariant.function, precondition=precond) setattr(precondition_wrapper, INVARIANT_MARKER, new_invariant) return precondition_wrapper return decorator @attr.s() class Invariant(object): function = attr.ib() precondition = attr.ib() def invariant(): """Decorator to apply an invariant for rules in a RuleBasedStateMachine. The decorated function will be run after every rule and can raise an exception to indicate failed invariants. For example:: class MyTestMachine(RuleBasedStateMachine): state = 1 @invariant() def is_nonzero(self): assert self.state != 0 """ def accept(f): existing_invariant = getattr(f, INVARIANT_MARKER, None) if existing_invariant is not None: raise InvalidDefinition( "A function cannot be used for two distinct invariants.", Settings.default, ) precondition = getattr(f, PRECONDITION_MARKER, None) rule = Invariant(function=f, precondition=precondition) @proxies(f) def invariant_wrapper(*args, **kwargs): return f(*args, **kwargs) setattr(invariant_wrapper, INVARIANT_MARKER, rule) return invariant_wrapper return accept LOOP_LABEL = cu.calc_label_from_name("RuleStrategy loop iteration") class RuleStrategy(SearchStrategy): def __init__(self, machine): SearchStrategy.__init__(self) self.machine = machine self.rules = list(machine.rules()) # The order is a bit arbitrary. Primarily we're trying to group rules # that write to the same location together, and to put rules with no # target first as they have less effect on the structure. We order from # fewer to more arguments on grounds that it will plausibly need less # data. This probably won't work especially well and we could be # smarter about it, but it's better than just doing it in definition # order. self.rules.sort( key=lambda rule: ( sorted(rule.targets), len(rule.arguments), rule.function.__name__, ) ) def do_draw(self, data): try: rule = data.draw(st.sampled_from(self.rules).filter(self.is_valid)) except HypothesisException: # FailedHealthCheck or UnsatisfiedAssumption depending on user settings. msg = u"No progress can be made from state %r" % (self.machine,) quiet_raise(InvalidDefinition(msg)) return (rule, data.draw(rule.arguments_strategy)) def is_valid(self, rule): if rule.precondition and not rule.precondition(self.machine): return False for b in rule.bundles: bundle = self.machine.bundle(b.name) if not bundle: return False return True class RuleBasedStateMachine(GenericStateMachine): """A RuleBasedStateMachine gives you a more structured way to define state machines. The idea is that a state machine carries a bunch of types of data divided into Bundles, and has a set of rules which may read data from bundles (or just from normal strategies) and push data onto bundles. At any given point a random applicable rule will be executed. """ _rules_per_class = {} # type: Dict[type, List[classmethod]] _invariants_per_class = {} # type: Dict[type, List[classmethod]] _base_rules_per_class = {} # type: Dict[type, List[classmethod]] _initializers_per_class = {} # type: Dict[type, List[classmethod]] _base_initializers_per_class = {} # type: Dict[type, List[classmethod]] def __init__(self): if not self.rules(): raise InvalidDefinition( u"Type %s defines no rules" % (type(self).__name__,) ) self.bundles = {} # type: Dict[Text, list] self.name_counter = 1 self.names_to_values = {} # type: Dict[Text, Any] self.__stream = CUnicodeIO() self.__printer = RepresentationPrinter(self.__stream) self._initialize_rules_to_run = copy(self.initialize_rules()) self.__rules_strategy = RuleStrategy(self) def __pretty(self, value): if isinstance(value, VarReference): return value.name self.__stream.seek(0) self.__stream.truncate(0) self.__printer.output_width = 0 self.__printer.buffer_width = 0 self.__printer.buffer.clear() self.__printer.pretty(value) self.__printer.flush() return self.__stream.getvalue() def __repr__(self): return u"%s(%s)" % (type(self).__name__, nicerepr(self.bundles)) def upcoming_name(self): return u"v%d" % (self.name_counter,) def new_name(self): result = self.upcoming_name() self.name_counter += 1 return result def bundle(self, name): return self.bundles.setdefault(name, []) @classmethod def initialize_rules(cls): try: return cls._initializers_per_class[cls] except KeyError: pass for k, v in inspect.getmembers(cls): r = getattr(v, INITIALIZE_RULE_MARKER, None) if r is not None: cls.define_initialize_rule( r.targets, r.function, r.arguments, r.precondition ) cls._initializers_per_class[cls] = cls._base_initializers_per_class.pop(cls, []) return cls._initializers_per_class[cls] @classmethod def rules(cls): try: return cls._rules_per_class[cls] except KeyError: pass for k, v in inspect.getmembers(cls): r = getattr(v, RULE_MARKER, None) if r is not None: cls.define_rule(r.targets, r.function, r.arguments, r.precondition) cls._rules_per_class[cls] = cls._base_rules_per_class.pop(cls, []) return cls._rules_per_class[cls] @classmethod def invariants(cls): try: return cls._invariants_per_class[cls] except KeyError: pass target = [] for k, v in inspect.getmembers(cls): i = getattr(v, INVARIANT_MARKER, None) if i is not None: target.append(i) cls._invariants_per_class[cls] = target return cls._invariants_per_class[cls] @classmethod def define_initialize_rule(cls, targets, function, arguments, precondition=None): converted_arguments = {} for k, v in arguments.items(): converted_arguments[k] = v if cls in cls._initializers_per_class: target = cls._initializers_per_class[cls] else: target = cls._base_initializers_per_class.setdefault(cls, []) return target.append(Rule(targets, function, converted_arguments, precondition)) @classmethod def define_rule(cls, targets, function, arguments, precondition=None): converted_arguments = {} for k, v in arguments.items(): converted_arguments[k] = v if cls in cls._rules_per_class: target = cls._rules_per_class[cls] else: target = cls._base_rules_per_class.setdefault(cls, []) return target.append(Rule(targets, function, converted_arguments, precondition)) def steps(self): # Pick initialize rules first if self._initialize_rules_to_run: return st.one_of( [ st.tuples(st.just(rule), st.fixed_dictionaries(rule.arguments)) for rule in self._initialize_rules_to_run ] ) return self.__rules_strategy def print_start(self): report(u"state = %s()" % (self.__class__.__name__,)) def print_end(self): report(u"state.teardown()") def print_step(self, step): rule, data = step data_repr = {} for k, v in data.items(): data_repr[k] = self.__pretty(v) self.step_count = getattr(self, u"step_count", 0) + 1 report( u"%sstate.%s(%s)" % ( u"%s = " % (self.upcoming_name(),) if rule.targets else u"", rule.function.__name__, u", ".join(u"%s=%s" % kv for kv in data_repr.items()), ) ) def _add_result_to_targets(self, targets, result): name = self.new_name() self.__printer.singleton_pprinters.setdefault( id(result), lambda obj, p, cycle: p.text(name) ) self.names_to_values[name] = result for target in targets: self.bundle(target).append(VarReference(name)) def execute_step(self, step): rule, data = step data = dict(data) for k, v in list(data.items()): if isinstance(v, VarReference): data[k] = self.names_to_values[v.name] result = rule.function(self, **data) if rule.targets: if isinstance(result, MultipleResults): for single_result in result.values: self._add_result_to_targets(rule.targets, single_result) else: self._add_result_to_targets(rule.targets, result) if self._initialize_rules_to_run: self._initialize_rules_to_run.remove(rule) def check_invariants(self): for invar in self.invariants(): if invar.precondition and not invar.precondition(self): continue invar.function(self) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/statistics.py000066400000000000000000000112731354103617500310020ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import math from hypothesis.internal.conjecture.data import Status from hypothesis.internal.conjecture.engine import MAX_SHRINKS, ExitReason from hypothesis.utils.dynamicvariables import DynamicVariable collector = DynamicVariable(None) class Statistics(object): def __init__(self, engine): self.passing_examples = len(engine.status_runtimes.get(Status.VALID, ())) self.invalid_examples = len( engine.status_runtimes.get(Status.INVALID, []) + engine.status_runtimes.get(Status.OVERRUN, []) ) self.failing_examples = len(engine.status_runtimes.get(Status.INTERESTING, ())) runtimes = sorted( engine.status_runtimes.get(Status.VALID, []) + engine.status_runtimes.get(Status.INVALID, []) + engine.status_runtimes.get(Status.INTERESTING, []) ) self.has_runs = bool(runtimes) if not self.has_runs: return n = max(0, len(runtimes) - 1) lower = int(runtimes[int(math.floor(n * 0.05))] * 1000) upper = int(runtimes[int(math.ceil(n * 0.95))] * 1000) if upper == 0: self.runtimes = "< 1ms" elif lower == upper: self.runtimes = "~ %dms" % (lower,) else: self.runtimes = "%d-%d ms" % (lower, upper) if engine.exit_reason == ExitReason.finished: self.exit_reason = "nothing left to do" elif engine.exit_reason == ExitReason.flaky: self.exit_reason = "test was flaky" elif engine.exit_reason == ExitReason.max_shrinks: self.exit_reason = "shrunk example %s times" % (MAX_SHRINKS,) elif engine.exit_reason == ExitReason.max_iterations: self.exit_reason = ( "settings.max_examples={}, but < 10% of examples satisfied " "assumptions" ).format(engine.settings.max_examples) else: self.exit_reason = "settings.%s=%r" % ( engine.exit_reason.name, getattr(engine.settings, engine.exit_reason.name), ) self.events = [ "%6.2f%%, %s" % (c / engine.call_count * 100, e) for e, c in sorted( engine.event_call_counts.items(), key=lambda x: (-x[1], x[0]) ) ] total_runtime = math.fsum(engine.all_runtimes) total_drawtime = math.fsum(engine.all_drawtimes) if total_drawtime == 0.0 and total_runtime >= 0.0: self.draw_time_percentage = "~ 0%" elif total_drawtime < 0.0 or total_runtime <= 0.0: # This weird condition is possible in two ways: # 1. drawtime and/or runtime are negative, due to clock changes # on Python 2 or old OSs (we use monotonic() where available) # 2. floating-point issues *very rarely* cause math.fsum to be # off by the lowest bit, so drawtime==0 and runtime!=0, eek! self.draw_time_percentage = "NaN" else: draw_time_percentage = 100.0 * min(1, total_drawtime / total_runtime) self.draw_time_percentage = "~ %d%%" % (round(draw_time_percentage),) def get_description(self): """Return a list of lines describing the statistics, to be printed.""" if not self.has_runs: return [" - Test was never run"] lines = [ " - %d passing examples, %d failing examples, %d invalid examples" % (self.passing_examples, self.failing_examples, self.invalid_examples), " - Typical runtimes: %s" % (self.runtimes,), " - Fraction of time spent in data generation: %s" % (self.draw_time_percentage,), " - Stopped because %s" % (self.exit_reason,), ] if self.events: lines.append(" - Events:") lines += [" * %s" % (event,) for event in self.events] return lines def note_engine_for_statistics(engine): callback = collector.value if callback is not None: callback(Statistics(engine)) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/strategies.py000066400000000000000000000051061354103617500307600ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis._strategies import ( DataObject, _strategies, binary, booleans, builds, characters, complex_numbers, composite, data, dates, datetimes, decimals, deferred, dictionaries, emails, fixed_dictionaries, floats, fractions, from_regex, from_type, frozensets, functions, integers, iterables, just, lists, none, nothing, one_of, permutations, random_module, randoms, recursive, register_type_strategy, runner, sampled_from, sets, shared, slices, text, timedeltas, times, tuples, uuids, ) from hypothesis.searchstrategy import SearchStrategy # The implementation of all of these lives in `_strategies.py`, but we # re-export them via this module to avoid exposing implementation details # to over-zealous tab completion in editors that do not respect __all__. __all__ = [ "binary", "booleans", "builds", "characters", "complex_numbers", "composite", "data", "DataObject", "dates", "datetimes", "decimals", "deferred", "dictionaries", "emails", "fixed_dictionaries", "floats", "fractions", "from_regex", "from_type", "frozensets", "functions", "integers", "iterables", "just", "lists", "none", "nothing", "one_of", "permutations", "random_module", "randoms", "recursive", "register_type_strategy", "runner", "sampled_from", "sets", "shared", "slices", "text", "timedeltas", "times", "tuples", "uuids", "SearchStrategy", ] assert _strategies.issubset(set(__all__)), _strategies - set(__all__) del _strategies, absolute_import, division, print_function _public = {n for n in dir() if n[0] not in "_@"} assert set(__all__) == _public, set(__all__) - _public del _public hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/types.py000066400000000000000000000025111354103617500277470ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from random import Random class RandomWithSeed(Random): """A subclass of Random designed to expose the seed it was initially provided with. We consistently use this instead of Random objects because it makes examples much easier to recreate. """ def __init__(self, seed): super(RandomWithSeed, self).__init__(seed) self.seed = seed def __copy__(self): result = RandomWithSeed(self.seed) result.setstate(self.getstate()) return result def __deepcopy__(self, table): return self.__copy__() def __repr__(self): return u"RandomWithSeed(%s)" % (self.seed,) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/utils/000077500000000000000000000000001354103617500273725ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/utils/__init__.py000066400000000000000000000015141354103617500315040ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """hypothesis.utils is a package for things that you can consider part of the semi-public Hypothesis API but aren't really the core point.""" from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/utils/conventions.py000066400000000000000000000024751354103617500323210ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function # Notes: we use instances of these objects as singletons which serve as # identifiers in various patches of code. The more specific types # (DefaultValueType and InferType) exist so that typecheckers such as Mypy # can distinguish them from the others. DefaultValueType is only used in # the Django extra. class UniqueIdentifier(object): def __init__(self, identifier): self.identifier = identifier def __repr__(self): return self.identifier class DefaultValueType(UniqueIdentifier): pass class InferType(UniqueIdentifier): pass infer = InferType(u"infer") not_set = UniqueIdentifier(u"not_set") hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/utils/dynamicvariables.py000066400000000000000000000024041354103617500332610ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import threading from contextlib import contextmanager class DynamicVariable(object): def __init__(self, default): self.default = default self.data = threading.local() @property def value(self): return getattr(self.data, "value", self.default) @value.setter def value(self, value): setattr(self.data, "value", value) @contextmanager def with_value(self, value): old_value = self.value try: self.data.value = value yield finally: self.data.value = old_value hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/vendor/000077500000000000000000000000001354103617500275275ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/vendor/__init__.py000066400000000000000000000013031354103617500316350ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis-python # # Most of this work is copyright (C) 2013-2016 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/vendor/pretty.py000066400000000000000000000704201354103617500314330ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis-python # # Most of this work is copyright (C) 2013-2016 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER # -*- coding: utf-8 -*- """ Python advanced pretty printer. This pretty printer is intended to replace the old `pprint` python module which does not allow developers to provide their own pretty print callbacks. This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`. Example Usage ------------- To directly print the representation of an object use `pprint`:: from pretty import pprint pprint(complex_object) To get a string of the output use `pretty`:: from pretty import pretty string = pretty(complex_object) Extending --------- The pretty library allows developers to add pretty printing rules for their own objects. This process is straightforward. All you have to do is to add a `_repr_pretty_` method to your object and call the methods on the pretty printer passed:: class MyObject(object): def _repr_pretty_(self, p, cycle): ... Here is an example implementation of a `_repr_pretty_` method for a list subclass:: class MyList(list): def _repr_pretty_(self, p, cycle): if cycle: p.text('MyList(...)') else: with p.group(8, 'MyList([', '])'): for idx, item in enumerate(self): if idx: p.text(',') p.breakable() p.pretty(item) The `cycle` parameter is `True` if pretty detected a cycle. You *have* to react to that or the result is an infinite loop. `p.text()` just adds non breaking text to the output, `p.breakable()` either adds a whitespace or breaks here. If you pass it an argument it's used instead of the default space. `p.pretty` prettyprints another object using the pretty print method. The first parameter to the `group` function specifies the extra indentation of the next line. In this example the next item will either be on the same line (if the items are short enough) or aligned with the right edge of the opening bracket of `MyList`. If you just want to indent something you can use the group function without open / close parameters. You can also use this code:: with p.indent(2): ... Inheritance diagram: .. inheritance-diagram:: IPython.lib.pretty :parts: 3 :copyright: 2007 by Armin Ronacher. Portions (c) 2009 by Robert Kern. :license: BSD License. """ from __future__ import absolute_import, division, print_function import datetime import platform import re import sys import types from collections import deque from contextlib import contextmanager from io import StringIO from hypothesis.internal.compat import PY3, cast_unicode, get_stream_enc, string_types __all__ = [ "pretty", "pprint", "PrettyPrinter", "RepresentationPrinter", "for_type_by_name", ] MAX_SEQ_LENGTH = 1000 _re_pattern_type = type(re.compile("")) PYPY = platform.python_implementation() == "PyPy" def _safe_getattr(obj, attr, default=None): """Safe version of getattr. Same as getattr, but will return ``default`` on any Exception, rather than raising. """ try: return getattr(obj, attr, default) except Exception: return default if PY3: CUnicodeIO = StringIO else: # pragma: no cover class CUnicodeIO(StringIO): """StringIO that casts str to unicode on Python 2.""" def write(self, text): return super(CUnicodeIO, self).write( cast_unicode(text, encoding=get_stream_enc(sys.stdout)) ) def pretty( obj, verbose=False, max_width=79, newline="\n", max_seq_length=MAX_SEQ_LENGTH ): """Pretty print the object's representation.""" stream = CUnicodeIO() printer = RepresentationPrinter( stream, verbose, max_width, newline, max_seq_length=max_seq_length ) printer.pretty(obj) printer.flush() return stream.getvalue() def pprint( obj, verbose=False, max_width=79, newline="\n", max_seq_length=MAX_SEQ_LENGTH ): """Like `pretty` but print to stdout.""" printer = RepresentationPrinter( sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length ) printer.pretty(obj) printer.flush() sys.stdout.write(newline) sys.stdout.flush() class _PrettyPrinterBase(object): @contextmanager def indent(self, indent): """with statement support for indenting/dedenting.""" self.indentation += indent try: yield finally: self.indentation -= indent @contextmanager def group(self, indent=0, open="", close=""): """like begin_group / end_group but for the with statement.""" self.begin_group(indent, open) try: yield finally: self.end_group(indent, close) class PrettyPrinter(_PrettyPrinterBase): """Baseclass for the `RepresentationPrinter` prettyprinter that is used to generate pretty reprs of objects. Contrary to the `RepresentationPrinter` this printer knows nothing about the default pprinters or the `_repr_pretty_` callback method. """ def __init__( self, output, max_width=79, newline="\n", max_seq_length=MAX_SEQ_LENGTH ): self.broken = False self.output = output self.max_width = max_width self.newline = newline self.max_seq_length = max_seq_length self.output_width = 0 self.buffer_width = 0 self.buffer = deque() root_group = Group(0) self.group_stack = [root_group] self.group_queue = GroupQueue(root_group) self.indentation = 0 def _break_outer_groups(self): while self.max_width < self.output_width + self.buffer_width: group = self.group_queue.deq() if not group: return while group.breakables: x = self.buffer.popleft() self.output_width = x.output(self.output, self.output_width) self.buffer_width -= x.width while self.buffer and isinstance(self.buffer[0], Text): x = self.buffer.popleft() self.output_width = x.output(self.output, self.output_width) self.buffer_width -= x.width def text(self, obj): """Add literal text to the output.""" width = len(obj) if self.buffer: text = self.buffer[-1] if not isinstance(text, Text): text = Text() self.buffer.append(text) text.add(obj, width) self.buffer_width += width self._break_outer_groups() else: self.output.write(obj) self.output_width += width def breakable(self, sep=" "): """Add a breakable separator to the output. This does not mean that it will automatically break here. If no breaking on this position takes place the `sep` is inserted which default to one space. """ width = len(sep) group = self.group_stack[-1] if group.want_break: self.flush() self.output.write(self.newline) self.output.write(" " * self.indentation) self.output_width = self.indentation self.buffer_width = 0 else: self.buffer.append(Breakable(sep, width, self)) self.buffer_width += width self._break_outer_groups() def break_(self): """Explicitly insert a newline into the output, maintaining correct indentation.""" self.flush() self.output.write(self.newline) self.output.write(" " * self.indentation) self.output_width = self.indentation self.buffer_width = 0 def begin_group(self, indent=0, open=""): """ Begin a group. If you want support for python < 2.5 which doesn't has the with statement this is the preferred way: p.begin_group(1, '{') ... p.end_group(1, '}') The python 2.5 expression would be this: with p.group(1, '{', '}'): ... The first parameter specifies the indentation for the next line ( usually the width of the opening text), the second the opening text. All parameters are optional. """ if open: self.text(open) group = Group(self.group_stack[-1].depth + 1) self.group_stack.append(group) self.group_queue.enq(group) self.indentation += indent def _enumerate(self, seq): """like enumerate, but with an upper limit on the number of items.""" for idx, x in enumerate(seq): if self.max_seq_length and idx >= self.max_seq_length: self.text(",") self.breakable() self.text("...") return yield idx, x def end_group(self, dedent=0, close=""): """End a group. See `begin_group` for more details. """ self.indentation -= dedent group = self.group_stack.pop() if not group.breakables: self.group_queue.remove(group) if close: self.text(close) def flush(self): """Flush data that is left in the buffer.""" for data in self.buffer: self.output_width += data.output(self.output, self.output_width) self.buffer.clear() self.buffer_width = 0 def _get_mro(obj_class): """Get a reasonable method resolution order of a class and its superclasses for both old-style and new-style classes.""" if not hasattr(obj_class, "__mro__"): # pragma: no cover # Old-style class. Mix in object to make a fake new-style class. try: obj_class = type(obj_class.__name__, (obj_class, object), {}) except TypeError: # Old-style extension type that does not descend from object. # FIXME: try to construct a more thorough MRO. mro = [obj_class] else: mro = obj_class.__mro__[1:-1] else: mro = obj_class.__mro__ return mro class RepresentationPrinter(PrettyPrinter): """Special pretty printer that has a `pretty` method that calls the pretty printer for a python object. This class stores processing data on `self` so you must *never* use this class in a threaded environment. Always lock it or reinstanciate it. Instances also have a verbose flag callbacks can access to control their output. For example the default instance repr prints all attributes and methods that are not prefixed by an underscore if the printer is in verbose mode. """ def __init__( self, output, verbose=False, max_width=79, newline="\n", singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None, max_seq_length=MAX_SEQ_LENGTH, ): PrettyPrinter.__init__( self, output, max_width, newline, max_seq_length=max_seq_length ) self.verbose = verbose self.stack = [] if singleton_pprinters is None: singleton_pprinters = _singleton_pprinters.copy() self.singleton_pprinters = singleton_pprinters if type_pprinters is None: type_pprinters = _type_pprinters.copy() self.type_pprinters = type_pprinters if deferred_pprinters is None: deferred_pprinters = _deferred_type_pprinters.copy() self.deferred_pprinters = deferred_pprinters def pretty(self, obj): """Pretty print the given object.""" obj_id = id(obj) cycle = obj_id in self.stack self.stack.append(obj_id) self.begin_group() try: obj_class = _safe_getattr(obj, "__class__", None) or type(obj) # First try to find registered singleton printers for the type. try: printer = self.singleton_pprinters[obj_id] except (TypeError, KeyError): pass else: return printer(obj, self, cycle) # Next walk the mro and check for either: # 1) a registered printer # 2) a _repr_pretty_ method for cls in _get_mro(obj_class): if cls in self.type_pprinters: # printer registered in self.type_pprinters return self.type_pprinters[cls](obj, self, cycle) else: # deferred printer printer = self._in_deferred_types(cls) if printer is not None: return printer(obj, self, cycle) else: # Finally look for special method names. # Some objects automatically create any requested # attribute. Try to ignore most of them by checking for # callability. if "_repr_pretty_" in cls.__dict__: meth = cls._repr_pretty_ if callable(meth): return meth(obj, self, cycle) return _default_pprint(obj, self, cycle) finally: self.end_group() self.stack.pop() def _in_deferred_types(self, cls): """Check if the given class is specified in the deferred type registry. Returns the printer from the registry if it exists, and None if the class is not in the registry. Successful matches will be moved to the regular type registry for future use. """ mod = _safe_getattr(cls, "__module__", None) name = _safe_getattr(cls, "__name__", None) key = (mod, name) printer = None if key in self.deferred_pprinters: # Move the printer over to the regular registry. printer = self.deferred_pprinters.pop(key) self.type_pprinters[cls] = printer return printer class Printable(object): def output(self, stream, output_width): # pragma: no cover raise NotImplementedError() class Text(Printable): def __init__(self): self.objs = [] self.width = 0 def output(self, stream, output_width): for obj in self.objs: stream.write(obj) return output_width + self.width def add(self, obj, width): self.objs.append(obj) self.width += width class Breakable(Printable): def __init__(self, seq, width, pretty): self.obj = seq self.width = width self.pretty = pretty self.indentation = pretty.indentation self.group = pretty.group_stack[-1] self.group.breakables.append(self) def output(self, stream, output_width): self.group.breakables.popleft() if self.group.want_break: stream.write(self.pretty.newline) stream.write(" " * self.indentation) return self.indentation if not self.group.breakables: self.pretty.group_queue.remove(self.group) stream.write(self.obj) return output_width + self.width class Group(Printable): def __init__(self, depth): self.depth = depth self.breakables = deque() self.want_break = False class GroupQueue(object): def __init__(self, *groups): self.queue = [] for group in groups: self.enq(group) def enq(self, group): depth = group.depth while depth > len(self.queue) - 1: self.queue.append([]) self.queue[depth].append(group) def deq(self): for stack in self.queue: for idx, group in enumerate(reversed(stack)): if group.breakables: del stack[idx] group.want_break = True return group for group in stack: group.want_break = True del stack[:] def remove(self, group): try: self.queue[group.depth].remove(group) except ValueError: pass try: _baseclass_reprs = (object.__repr__, types.InstanceType.__repr__) except AttributeError: # Python 3 _baseclass_reprs = (object.__repr__,) # type: ignore def _default_pprint(obj, p, cycle): """The default print function. Used if an object does not provide one and it's none of the builtin objects. """ klass = _safe_getattr(obj, "__class__", None) or type(obj) if _safe_getattr(klass, "__repr__", None) not in _baseclass_reprs: # A user-provided repr. Find newlines and replace them with p.break_() _repr_pprint(obj, p, cycle) return p.begin_group(1, "<") p.pretty(klass) p.text(" at 0x%x" % id(obj)) if cycle: p.text(" ...") elif p.verbose: first = True for key in dir(obj): if not key.startswith("_"): try: value = getattr(obj, key) except AttributeError: continue if isinstance(value, types.MethodType): continue if not first: p.text(",") p.breakable() p.text(key) p.text("=") step = len(key) + 1 p.indentation += step p.pretty(value) p.indentation -= step first = False p.end_group(1, ">") def _seq_pprinter_factory(start, end, basetype): """Factory that returns a pprint function useful for sequences. Used by the default pprint for tuples, dicts, and lists. """ def inner(obj, p, cycle): typ = type(obj) if ( basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__ ): # If the subclass provides its own repr, use it instead. return p.text(typ.__repr__(obj)) if cycle: return p.text(start + "..." + end) step = len(start) p.begin_group(step, start) for idx, x in p._enumerate(obj): if idx: p.text(",") p.breakable() p.pretty(x) if len(obj) == 1 and type(obj) is tuple: # Special case for 1-item tuples. p.text(",") p.end_group(step, end) return inner def _set_pprinter_factory(start, end, basetype): """Factory that returns a pprint function useful for sets and frozensets.""" def inner(obj, p, cycle): typ = type(obj) if ( basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__ ): # If the subclass provides its own repr, use it instead. return p.text(typ.__repr__(obj)) if cycle: return p.text(start + "..." + end) if len(obj) == 0: # Special case. p.text(basetype.__name__ + "()") else: step = len(start) p.begin_group(step, start) # Like dictionary keys, we will try to sort the items if there # aren't too many items = obj if not (p.max_seq_length and len(obj) >= p.max_seq_length): try: items = sorted(obj) except Exception: # Sometimes the items don't sort. pass for idx, x in p._enumerate(items): if idx: p.text(",") p.breakable() p.pretty(x) p.end_group(step, end) return inner def _dict_pprinter_factory(start, end, basetype=None): """Factory that returns a pprint function used by the default pprint of dicts and dict proxies.""" def inner(obj, p, cycle): typ = type(obj) if ( basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__ ): # If the subclass provides its own repr, use it instead. return p.text(typ.__repr__(obj)) if cycle: return p.text("{...}") p.begin_group(1, start) keys = obj.keys() # if dict isn't large enough to be truncated, sort keys before # displaying if not (p.max_seq_length and len(obj) >= p.max_seq_length): try: keys = sorted(keys) except Exception: # Sometimes the keys don't sort. pass for idx, key in p._enumerate(keys): if idx: p.text(",") p.breakable() p.pretty(key) p.text(": ") p.pretty(obj[key]) p.end_group(1, end) inner.__name__ = "_dict_pprinter_factory(%r, %r, %r)" % (start, end, basetype) return inner def _super_pprint(obj, p, cycle): """The pprint for the super type.""" try: # This section works around various pypy versions that don't do # have the same attributes on super objects obj.__thisclass__ obj.__self__ except AttributeError: # pragma: no cover assert PYPY _repr_pprint(obj, p, cycle) return p.begin_group(8, "") def _re_pattern_pprint(obj, p, cycle): """The pprint function for regular expression patterns.""" p.text("re.compile(") pattern = repr(obj.pattern) if pattern[:1] in "uU": # pragma: no cover pattern = pattern[1:] prefix = "ur" else: prefix = "r" pattern = prefix + pattern.replace("\\\\", "\\") p.text(pattern) if obj.flags: p.text(",") p.breakable() done_one = False for flag in ( "TEMPLATE", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "UNICODE", "VERBOSE", "DEBUG", ): if obj.flags & getattr(re, flag): if done_one: p.text("|") p.text("re." + flag) done_one = True p.text(")") def _type_pprint(obj, p, cycle): """The pprint for classes and types.""" # Heap allocated types might not have the module attribute, # and others may set it to None. # Checks for a __repr__ override in the metaclass # != rather than is not because pypy compatibility if type(obj).__repr__ != type.__repr__: _repr_pprint(obj, p, cycle) return mod = _safe_getattr(obj, "__module__", None) try: name = obj.__qualname__ if not isinstance(name, string_types): # pragma: no cover # This can happen if the type implements __qualname__ as a property # or other descriptor in Python 2. raise Exception("Try __name__") except Exception: # pragma: no cover name = obj.__name__ if not isinstance(name, string_types): name = "" if mod in (None, "__builtin__", "builtins", "exceptions"): p.text(name) else: p.text(mod + "." + name) def _repr_pprint(obj, p, cycle): """A pprint that just redirects to the normal repr function.""" # Find newlines and replace them with p.break_() output = repr(obj) for idx, output_line in enumerate(output.splitlines()): if idx: p.break_() p.text(output_line) def _function_pprint(obj, p, cycle): """Base pprint for all functions and builtin functions.""" name = _safe_getattr(obj, "__qualname__", obj.__name__) mod = obj.__module__ if mod and mod not in ("__builtin__", "builtins", "exceptions"): name = mod + "." + name p.text("" % name) def _exception_pprint(obj, p, cycle): """Base pprint for all exceptions.""" name = getattr(obj.__class__, "__qualname__", obj.__class__.__name__) if obj.__class__.__module__ not in ("exceptions", "builtins"): name = "%s.%s" % (obj.__class__.__module__, name) step = len(name) + 1 p.begin_group(step, name + "(") for idx, arg in enumerate(getattr(obj, "args", ())): if idx: p.text(",") p.breakable() p.pretty(arg) p.end_group(step, ")") #: the exception base try: _exception_base = BaseException except NameError: # pragma: no cover _exception_base = Exception # type: ignore #: printers for builtin types _type_pprinters = { int: _repr_pprint, float: _repr_pprint, str: _repr_pprint, tuple: _seq_pprinter_factory("(", ")", tuple), list: _seq_pprinter_factory("[", "]", list), dict: _dict_pprinter_factory("{", "}", dict), set: _set_pprinter_factory("{", "}", set), frozenset: _set_pprinter_factory("frozenset({", "})", frozenset), super: _super_pprint, _re_pattern_type: _re_pattern_pprint, type: _type_pprint, types.FunctionType: _function_pprint, types.BuiltinFunctionType: _function_pprint, types.MethodType: _repr_pprint, datetime.datetime: _repr_pprint, datetime.timedelta: _repr_pprint, _exception_base: _exception_pprint, } try: # pragma: no cover if types.DictProxyType != dict: _type_pprinters[types.DictProxyType] = _dict_pprinter_factory( "" ) _type_pprinters[types.ClassType] = _type_pprint _type_pprinters[types.SliceType] = _repr_pprint except AttributeError: # Python 3 _type_pprinters[slice] = _repr_pprint try: # pragma: no cover _type_pprinters[xrange] = _repr_pprint # type: ignore _type_pprinters[long] = _repr_pprint # type: ignore _type_pprinters[unicode] = _repr_pprint # type: ignore except NameError: _type_pprinters[range] = _repr_pprint _type_pprinters[bytes] = _repr_pprint #: printers for types specified by name _deferred_type_pprinters = {} # type: ignore def for_type_by_name(type_module, type_name, func): """Add a pretty printer for a type specified by the module and name of a type rather than the type object itself.""" key = (type_module, type_name) oldfunc = _deferred_type_pprinters.get(key, None) _deferred_type_pprinters[key] = func return oldfunc #: printers for the default singletons _singleton_pprinters = dict.fromkeys( map(id, [None, True, False, Ellipsis, NotImplemented]), _repr_pprint ) def _defaultdict_pprint(obj, p, cycle): name = obj.__class__.__name__ with p.group(len(name) + 1, name + "(", ")"): if cycle: p.text("...") else: p.pretty(obj.default_factory) p.text(",") p.breakable() p.pretty(dict(obj)) def _ordereddict_pprint(obj, p, cycle): name = obj.__class__.__name__ with p.group(len(name) + 1, name + "(", ")"): if cycle: p.text("...") elif len(obj): p.pretty(list(obj.items())) def _deque_pprint(obj, p, cycle): name = obj.__class__.__name__ with p.group(len(name) + 1, name + "(", ")"): if cycle: p.text("...") else: p.pretty(list(obj)) def _counter_pprint(obj, p, cycle): name = obj.__class__.__name__ with p.group(len(name) + 1, name + "(", ")"): if cycle: p.text("...") elif len(obj): p.pretty(dict(obj)) for_type_by_name("collections", "defaultdict", _defaultdict_pprint) for_type_by_name("collections", "OrderedDict", _ordereddict_pprint) for_type_by_name("ordereddict", "OrderedDict", _ordereddict_pprint) for_type_by_name("collections", "deque", _deque_pprint) for_type_by_name("collections", "Counter", _counter_pprint) for_type_by_name("counter", "Counter", _counter_pprint) for_type_by_name("_collections", "defaultdict", _defaultdict_pprint) for_type_by_name("_collections", "OrderedDict", _ordereddict_pprint) for_type_by_name("_collections", "deque", _deque_pprint) for_type_by_name("_collections", "Counter", _counter_pprint) hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/vendor/tlds-alpha-by-domain.txt000066400000000000000000000241051354103617500342000ustar00rootroot00000000000000# Version 2019080400, Last Updated Sun Aug 4 07:07:02 2019 UTC AAA AARP ABARTH ABB ABBOTT ABBVIE ABC ABLE ABOGADO ABUDHABI AC ACADEMY ACCENTURE ACCOUNTANT ACCOUNTANTS ACO ACTOR AD ADAC ADS ADULT AE AEG AERO AETNA AF AFAMILYCOMPANY AFL AFRICA AG AGAKHAN AGENCY AI AIG AIGO AIRBUS AIRFORCE AIRTEL AKDN AL ALFAROMEO ALIBABA ALIPAY ALLFINANZ ALLSTATE ALLY ALSACE ALSTOM AM AMERICANEXPRESS AMERICANFAMILY AMEX AMFAM AMICA AMSTERDAM ANALYTICS ANDROID ANQUAN ANZ AO AOL APARTMENTS APP APPLE AQ AQUARELLE AR ARAB ARAMCO ARCHI ARMY ARPA ART ARTE AS ASDA ASIA ASSOCIATES AT ATHLETA ATTORNEY AU AUCTION AUDI AUDIBLE AUDIO AUSPOST AUTHOR AUTO AUTOS AVIANCA AW AWS AX AXA AZ AZURE BA BABY BAIDU BANAMEX BANANAREPUBLIC BAND BANK BAR BARCELONA BARCLAYCARD BARCLAYS BAREFOOT BARGAINS BASEBALL BASKETBALL BAUHAUS BAYERN BB BBC BBT BBVA BCG BCN BD BE BEATS BEAUTY BEER BENTLEY BERLIN BEST BESTBUY BET BF BG BH BHARTI BI BIBLE BID BIKE BING BINGO BIO BIZ BJ BLACK BLACKFRIDAY BLOCKBUSTER BLOG BLOOMBERG BLUE BM BMS BMW BN BNPPARIBAS BO BOATS BOEHRINGER BOFA BOM BOND BOO BOOK BOOKING BOSCH BOSTIK BOSTON BOT BOUTIQUE BOX BR BRADESCO BRIDGESTONE BROADWAY BROKER BROTHER BRUSSELS BS BT BUDAPEST BUGATTI BUILD BUILDERS BUSINESS BUY BUZZ BV BW BY BZ BZH CA CAB CAFE CAL CALL CALVINKLEIN CAM CAMERA CAMP CANCERRESEARCH CANON CAPETOWN CAPITAL CAPITALONE CAR CARAVAN CARDS CARE CAREER CAREERS CARS CARTIER CASA CASE CASEIH CASH CASINO CAT CATERING CATHOLIC CBA CBN CBRE CBS CC CD CEB CENTER CEO CERN CF CFA CFD CG CH CHANEL CHANNEL CHARITY CHASE CHAT CHEAP CHINTAI CHRISTMAS CHROME CHRYSLER CHURCH CI CIPRIANI CIRCLE CISCO CITADEL CITI CITIC CITY CITYEATS CK CL CLAIMS CLEANING CLICK CLINIC CLINIQUE CLOTHING CLOUD CLUB CLUBMED CM CN CO COACH CODES COFFEE COLLEGE COLOGNE COM COMCAST COMMBANK COMMUNITY COMPANY COMPARE COMPUTER COMSEC CONDOS CONSTRUCTION CONSULTING CONTACT CONTRACTORS COOKING COOKINGCHANNEL COOL COOP CORSICA COUNTRY COUPON COUPONS COURSES CR CREDIT CREDITCARD CREDITUNION CRICKET CROWN CRS CRUISE CRUISES CSC CU CUISINELLA CV CW CX CY CYMRU CYOU CZ DABUR DAD DANCE DATA DATE DATING DATSUN DAY DCLK DDS DE DEAL DEALER DEALS DEGREE DELIVERY DELL DELOITTE DELTA DEMOCRAT DENTAL DENTIST DESI DESIGN DEV DHL DIAMONDS DIET DIGITAL DIRECT DIRECTORY DISCOUNT DISCOVER DISH DIY DJ DK DM DNP DO DOCS DOCTOR DODGE DOG DOMAINS DOT DOWNLOAD DRIVE DTV DUBAI DUCK DUNLOP DUNS DUPONT DURBAN DVAG DVR DZ EARTH EAT EC ECO EDEKA EDU EDUCATION EE EG EMAIL EMERCK ENERGY ENGINEER ENGINEERING ENTERPRISES EPSON EQUIPMENT ER ERICSSON ERNI ES ESQ ESTATE ESURANCE ET ETISALAT EU EUROVISION EUS EVENTS EVERBANK EXCHANGE EXPERT EXPOSED EXPRESS EXTRASPACE FAGE FAIL FAIRWINDS FAITH FAMILY FAN FANS FARM FARMERS FASHION FAST FEDEX FEEDBACK FERRARI FERRERO FI FIAT FIDELITY FIDO FILM FINAL FINANCE FINANCIAL FIRE FIRESTONE FIRMDALE FISH FISHING FIT FITNESS FJ FK FLICKR FLIGHTS FLIR FLORIST FLOWERS FLY FM FO FOO FOOD FOODNETWORK FOOTBALL FORD FOREX FORSALE FORUM FOUNDATION FOX FR FREE FRESENIUS FRL FROGANS FRONTDOOR FRONTIER FTR FUJITSU FUJIXEROX FUN FUND FURNITURE FUTBOL FYI GA GAL GALLERY GALLO GALLUP GAME GAMES GAP GARDEN GB GBIZ GD GDN GE GEA GENT GENTING GEORGE GF GG GGEE GH GI GIFT GIFTS GIVES GIVING GL GLADE GLASS GLE GLOBAL GLOBO GM GMAIL GMBH GMO GMX GN GODADDY GOLD GOLDPOINT GOLF GOO GOODYEAR GOOG GOOGLE GOP GOT GOV GP GQ GR GRAINGER GRAPHICS GRATIS GREEN GRIPE GROCERY GROUP GS GT GU GUARDIAN GUCCI GUGE GUIDE GUITARS GURU GW GY HAIR HAMBURG HANGOUT HAUS HBO HDFC HDFCBANK HEALTH HEALTHCARE HELP HELSINKI HERE HERMES HGTV HIPHOP HISAMITSU HITACHI HIV HK HKT HM HN HOCKEY HOLDINGS HOLIDAY HOMEDEPOT HOMEGOODS HOMES HOMESENSE HONDA HORSE HOSPITAL HOST HOSTING HOT HOTELES HOTELS HOTMAIL HOUSE HOW HR HSBC HT HU HUGHES HYATT HYUNDAI IBM ICBC ICE ICU ID IE IEEE IFM IKANO IL IM IMAMAT IMDB IMMO IMMOBILIEN IN INC INDUSTRIES INFINITI INFO ING INK INSTITUTE INSURANCE INSURE INT INTEL INTERNATIONAL INTUIT INVESTMENTS IO IPIRANGA IQ IR IRISH IS ISELECT ISMAILI IST ISTANBUL IT ITAU ITV IVECO JAGUAR JAVA JCB JCP JE JEEP JETZT JEWELRY JIO JLL JM JMP JNJ JO JOBS JOBURG JOT JOY JP JPMORGAN JPRS JUEGOS JUNIPER KAUFEN KDDI KE KERRYHOTELS KERRYLOGISTICS KERRYPROPERTIES KFH KG KH KI KIA KIM KINDER KINDLE KITCHEN KIWI KM KN KOELN KOMATSU KOSHER KP KPMG KPN KR KRD KRED KUOKGROUP KW KY KYOTO KZ LA LACAIXA LADBROKES LAMBORGHINI LAMER LANCASTER LANCIA LANCOME LAND LANDROVER LANXESS LASALLE LAT LATINO LATROBE LAW LAWYER LB LC LDS LEASE LECLERC LEFRAK LEGAL LEGO LEXUS LGBT LI LIAISON LIDL LIFE LIFEINSURANCE LIFESTYLE LIGHTING LIKE LILLY LIMITED LIMO LINCOLN LINDE LINK LIPSY LIVE LIVING LIXIL LK LLC LOAN LOANS LOCKER LOCUS LOFT LOL LONDON LOTTE LOTTO LOVE LPL LPLFINANCIAL LR LS LT LTD LTDA LU LUNDBECK LUPIN LUXE LUXURY LV LY MA MACYS MADRID MAIF MAISON MAKEUP MAN MANAGEMENT MANGO MAP MARKET MARKETING MARKETS MARRIOTT MARSHALLS MASERATI MATTEL MBA MC MCKINSEY MD ME MED MEDIA MEET MELBOURNE MEME MEMORIAL MEN MENU MERCKMSD METLIFE MG MH MIAMI MICROSOFT MIL MINI MINT MIT MITSUBISHI MK ML MLB MLS MM MMA MN MO MOBI MOBILE MOBILY MODA MOE MOI MOM MONASH MONEY MONSTER MOPAR MORMON MORTGAGE MOSCOW MOTO MOTORCYCLES MOV MOVIE MOVISTAR MP MQ MR MS MSD MT MTN MTR MU MUSEUM MUTUAL MV MW MX MY MZ NA NAB NADEX NAGOYA NAME NATIONWIDE NATURA NAVY NBA NC NE NEC NET NETBANK NETFLIX NETWORK NEUSTAR NEW NEWHOLLAND NEWS NEXT NEXTDIRECT NEXUS NF NFL NG NGO NHK NI NICO NIKE NIKON NINJA NISSAN NISSAY NL NO NOKIA NORTHWESTERNMUTUAL NORTON NOW NOWRUZ NOWTV NP NR NRA NRW NTT NU NYC NZ OBI OBSERVER OFF OFFICE OKINAWA OLAYAN OLAYANGROUP OLDNAVY OLLO OM OMEGA ONE ONG ONL ONLINE ONYOURSIDE OOO OPEN ORACLE ORANGE ORG ORGANIC ORIGINS OSAKA OTSUKA OTT OVH PA PAGE PANASONIC PARIS PARS PARTNERS PARTS PARTY PASSAGENS PAY PCCW PE PET PF PFIZER PG PH PHARMACY PHD PHILIPS PHONE PHOTO PHOTOGRAPHY PHOTOS PHYSIO PIAGET PICS PICTET PICTURES PID PIN PING PINK PIONEER PIZZA PK PL PLACE PLAY PLAYSTATION PLUMBING PLUS PM PN PNC POHL POKER POLITIE PORN POST PR PRAMERICA PRAXI PRESS PRIME PRO PROD PRODUCTIONS PROF PROGRESSIVE PROMO PROPERTIES PROPERTY PROTECTION PRU PRUDENTIAL PS PT PUB PW PWC PY QA QPON QUEBEC QUEST QVC RACING RADIO RAID RE READ REALESTATE REALTOR REALTY RECIPES RED REDSTONE REDUMBRELLA REHAB REISE REISEN REIT RELIANCE REN RENT RENTALS REPAIR REPORT REPUBLICAN REST RESTAURANT REVIEW REVIEWS REXROTH RICH RICHARDLI RICOH RIGHTATHOME RIL RIO RIP RMIT RO ROCHER ROCKS RODEO ROGERS ROOM RS RSVP RU RUGBY RUHR RUN RW RWE RYUKYU SA SAARLAND SAFE SAFETY SAKURA SALE SALON SAMSCLUB SAMSUNG SANDVIK SANDVIKCOROMANT SANOFI SAP SARL SAS SAVE SAXO SB SBI SBS SC SCA SCB SCHAEFFLER SCHMIDT SCHOLARSHIPS SCHOOL SCHULE SCHWARZ SCIENCE SCJOHNSON SCOR SCOT SD SE SEARCH SEAT SECURE SECURITY SEEK SELECT SENER SERVICES SES SEVEN SEW SEX SEXY SFR SG SH SHANGRILA SHARP SHAW SHELL SHIA SHIKSHA SHOES SHOP SHOPPING SHOUJI SHOW SHOWTIME SHRIRAM SI SILK SINA SINGLES SITE SJ SK SKI SKIN SKY SKYPE SL SLING SM SMART SMILE SN SNCF SO SOCCER SOCIAL SOFTBANK SOFTWARE SOHU SOLAR SOLUTIONS SONG SONY SOY SPACE SPORT SPOT SPREADBETTING SR SRL SRT SS ST STADA STAPLES STAR STATEBANK STATEFARM STC STCGROUP STOCKHOLM STORAGE STORE STREAM STUDIO STUDY STYLE SU SUCKS SUPPLIES SUPPLY SUPPORT SURF SURGERY SUZUKI SV SWATCH SWIFTCOVER SWISS SX SY SYDNEY SYMANTEC SYSTEMS SZ TAB TAIPEI TALK TAOBAO TARGET TATAMOTORS TATAR TATTOO TAX TAXI TC TCI TD TDK TEAM TECH TECHNOLOGY TEL TELEFONICA TEMASEK TENNIS TEVA TF TG TH THD THEATER THEATRE TIAA TICKETS TIENDA TIFFANY TIPS TIRES TIROL TJ TJMAXX TJX TK TKMAXX TL TM TMALL TN TO TODAY TOKYO TOOLS TOP TORAY TOSHIBA TOTAL TOURS TOWN TOYOTA TOYS TR TRADE TRADING TRAINING TRAVEL TRAVELCHANNEL TRAVELERS TRAVELERSINSURANCE TRUST TRV TT TUBE TUI TUNES TUSHU TV TVS TW TZ UA UBANK UBS UCONNECT UG UK UNICOM UNIVERSITY UNO UOL UPS US UY UZ VA VACATIONS VANA VANGUARD VC VE VEGAS VENTURES VERISIGN VERSICHERUNG VET VG VI VIAJES VIDEO VIG VIKING VILLAS VIN VIP VIRGIN VISA VISION VISTAPRINT VIVA VIVO VLAANDEREN VN VODKA VOLKSWAGEN VOLVO VOTE VOTING VOTO VOYAGE VU VUELOS WALES WALMART WALTER WANG WANGGOU WARMAN WATCH WATCHES WEATHER WEATHERCHANNEL WEBCAM WEBER WEBSITE WED WEDDING WEIBO WEIR WF WHOSWHO WIEN WIKI WILLIAMHILL WIN WINDOWS WINE WINNERS WME WOLTERSKLUWER WOODSIDE WORK WORKS WORLD WOW WS WTC WTF XBOX XEROX XFINITY XIHUAN XIN XN--11B4C3D XN--1CK2E1B XN--1QQW23A XN--2SCRJ9C XN--30RR7Y XN--3BST00M XN--3DS443G XN--3E0B707E XN--3HCRJ9C XN--3OQ18VL8PN36A XN--3PXU8K XN--42C2D9A XN--45BR5CYL XN--45BRJ9C XN--45Q11C XN--4GBRIM XN--54B7FTA0CC XN--55QW42G XN--55QX5D XN--5SU34J936BGSG XN--5TZM5G XN--6FRZ82G XN--6QQ986B3XL XN--80ADXHKS XN--80AO21A XN--80AQECDR1A XN--80ASEHDB XN--80ASWG XN--8Y0A063A XN--90A3AC XN--90AE XN--90AIS XN--9DBQ2A XN--9ET52U XN--9KRT00A XN--B4W605FERD XN--BCK1B9A5DRE4C XN--C1AVG XN--C2BR7G XN--CCK2B3B XN--CG4BKI XN--CLCHC0EA0B2G2A9GCD XN--CZR694B XN--CZRS0T XN--CZRU2D XN--D1ACJ3B XN--D1ALF XN--E1A4C XN--ECKVDTC9D XN--EFVY88H XN--ESTV75G XN--FCT429K XN--FHBEI XN--FIQ228C5HS XN--FIQ64B XN--FIQS8S XN--FIQZ9S XN--FJQ720A XN--FLW351E XN--FPCRJ9C3D XN--FZC2C9E2C XN--FZYS8D69UVGM XN--G2XX48C XN--GCKR3F0F XN--GECRJ9C XN--GK3AT1E XN--H2BREG3EVE XN--H2BRJ9C XN--H2BRJ9C8C XN--HXT814E XN--I1B6B1A6A2E XN--IMR513N XN--IO0A7I XN--J1AEF XN--J1AMH XN--J6W193G XN--JLQ61U9W7B XN--JVR189M XN--KCRX77D1X4A XN--KPRW13D XN--KPRY57D XN--KPU716F XN--KPUT3I XN--L1ACC XN--LGBBAT1AD8J XN--MGB9AWBF XN--MGBA3A3EJT XN--MGBA3A4F16A XN--MGBA7C0BBN0A XN--MGBAAKC7DVF XN--MGBAAM7A8H XN--MGBAB2BD XN--MGBAH1A3HJKRD XN--MGBAI9AZGQP6J XN--MGBAYH7GPA XN--MGBB9FBPOB XN--MGBBH1A XN--MGBBH1A71E XN--MGBC0A9AZCG XN--MGBCA7DZDO XN--MGBERP4A5D4AR XN--MGBGU82A XN--MGBI4ECEXP XN--MGBPL2FH XN--MGBT3DHD XN--MGBTX2B XN--MGBX4CD0AB XN--MIX891F XN--MK1BU44C XN--MXTQ1M XN--NGBC5AZD XN--NGBE9E0A XN--NGBRX XN--NODE XN--NQV7F XN--NQV7FS00EMA XN--NYQY26A XN--O3CW4H XN--OGBPF8FL XN--OTU796D XN--P1ACF XN--P1AI XN--PBT977C XN--PGBS0DH XN--PSSY2U XN--Q9JYB4C XN--QCKA1PMC XN--QXAM XN--RHQV96G XN--ROVU88B XN--RVC1E0AM3E XN--S9BRJ9C XN--SES554G XN--T60B56A XN--TCKWE XN--TIQ49XQYJ XN--UNUP4Y XN--VERMGENSBERATER-CTB XN--VERMGENSBERATUNG-PWB XN--VHQUV XN--VUQ861B XN--W4R85EL8FHU5DNRA XN--W4RS40L XN--WGBH1C XN--WGBL6A XN--XHQ521B XN--XKC2AL3HYE2A XN--XKC2DL3A5EE0H XN--Y9A3AQ XN--YFRO4I67O XN--YGBI2AMMX XN--ZFR164B XXX XYZ YACHTS YAHOO YAMAXUN YANDEX YE YODOBASHI YOGA YOKOHAMA YOU YOUTUBE YT YUN ZA ZAPPOS ZARA ZERO ZIP ZM ZONE ZUERICH ZW hypothesis-hypothesis-python-4.36.2/hypothesis-python/src/hypothesis/version.py000066400000000000000000000014171354103617500302740ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function __version_info__ = (4, 36, 2) __version__ = ".".join(map(str, __version_info__)) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/000077500000000000000000000000001354103617500244065ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/README.rst000066400000000000000000000113521354103617500260770ustar00rootroot00000000000000=========== Don't Panic =========== The Hypothesis test suite is large, but we've written these notes to help you out. It's aimed at contributors (new and old!) who know they need to add tests *somewhere*, but aren't sure where - or maybe need some hints on what kinds of tests might be useful. Others might just be interested in how a testing library tests itself! The very short version ====================== - To improve code coverage (eg because ``make check-coverage`` / the Travis build is failing), go to ``cover/`` - For longer / system / integration tests, look in ``nocover/`` - For tests that require an optional dependency, look in the directory named for that dependency. .. note:: If you get stuck, just ask a maintainer to help out by mentioning ``@HypothesisWorks/hypothesis-python-contributors`` on GitHub. We'd love to help - and also get feedback on how this document could be better! Some scenarios ============== **I'm adding or changing a strategy** Check for a file specific to that strategy (eg ``test_uuids.py`` for the ``uuids()`` strategy). Write tests for all invalid argument handling in ``test_direct_strategies.py``. Strategies with optional dependencies should go in ``hypothesis.extras``, and the tests in their own module (ie not in ``cover``). When you think you might be done, push and let Travis point out any failing tests or non-covered code! **I've made some internal changes** That's not very specific - you should probably refer to the test-finding tips in the next section. Remember that ``tests/cover`` is reasonably quick unit-test style tests - you should consider writing more intensive integration tests too, but put them in ``tests/nocover`` with the others. Finding particular tests ======================== With the sheer size and variety in this directory finding a specific thing can be tricky. Tips: - Check for filenames that are relevant to your contribution. - Use ``git grep`` to search for keywords, e.g. the name of a strategy you've changed. - Deliberately break something related to your code, and see which tests fail. - Ask a maintainer! Sometimes the structure is just arbitrary, and other tactics don't work - but we *want* to help! About each group of tests ========================= Still here? Here's a note on what to expect in each directory. ``common/`` Useful shared testing code, including test setup and a few helper functions in ``utils.py``. Also read up on `pytest `_ features such as ``mark.parametrize``, ``mark.skipif``, and ``raises`` for other functions that are often useful when writing tests. ``cover/`` The home of enough tests to get 100% branch coverage, as quickly as possible without compromising on test power. This can be an intimidating target, but it's entirely achievable and the maintainers are (still) here to help. This directory alone has around two-thirds of the tests for Hypothesis (~8k of ~12k lines of code). If you're adding or fixing tests, chances are therefore good that they're in here! ``datetime/`` Tests which depend on the ``pytz`` or ``dateutil`` packages for timezones. ``django/`` Tests for the Django extra. Includes a toy application, to give us lots of models to generate. ``lark/`` Tests for the Lark extra for context-free grammars, which depend on the ``lark-parser`` package. ``nocover/`` More expensive and longer-running tests, typically used to test trickier interactions or check for regressions in expensive bugs. Lots of tests about how values shrink, databases, compatibility, etc. New tests that are not required for full coverage of code branches or behaviour should also go in ``nocover``, to keep ``cover`` reasonably fast. ``numpy/`` Tests for the Numpy extra. ``pandas/`` Tests for the Pandas extra. ``py2/`` Tests that require Python 2. This is a small group, because almost all of our code and tests are also compatible with Python 3. ``py3/`` Tests that require Python 3. Includes checking that unicode identifiers and function annotations don't break anything, asyncio tests, and tests for inference from type hints. ``pytest/`` Hypothesis has excellent integration with ``pytest``, though we are careful to support other test runners including unittest and nose. This is where we test that our pytest integration is working properly. ``quality/`` Tests that various hard-to-find examples do in fact get found by Hypothesis, as well as some stuff about example shrinking. Mostly intended for tests of the form "Hypothesis finds an example of this condition" + assertions about which example it finds. hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/__init__.py000066400000000000000000000012751354103617500265240ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/common/000077500000000000000000000000001354103617500256765ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/common/__init__.py000066400000000000000000000060541354103617500300140ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import sys from collections import namedtuple from hypothesis.strategies import ( binary, booleans, builds, complex_numbers, decimals, dictionaries, fixed_dictionaries, floats, fractions, frozensets, integers, just, lists, none, one_of, randoms, recursive, sampled_from, sets, text, tuples, ) from tests.common.debug import TIME_INCREMENT try: import pytest except ImportError: pytest = None __all__ = ["standard_types", "OrderedPair", "TIME_INCREMENT"] OrderedPair = namedtuple("OrderedPair", ("left", "right")) ordered_pair = integers().flatmap( lambda right: integers(min_value=0).map( lambda length: OrderedPair(right - length, right) ) ) def constant_list(strat): return strat.flatmap(lambda v: lists(just(v))) ABC = namedtuple("ABC", ("a", "b", "c")) def abc(x, y, z): return builds(ABC, x, y, z) standard_types = [ lists(none(), max_size=0), tuples(), sets(none(), max_size=0), frozensets(none(), max_size=0), fixed_dictionaries({}), abc(booleans(), booleans(), booleans()), abc(booleans(), booleans(), integers()), fixed_dictionaries({"a": integers(), "b": booleans()}), dictionaries(booleans(), integers()), dictionaries(text(), booleans()), one_of(integers(), tuples(booleans())), sampled_from(range(10)), one_of(just("a"), just("b"), just("c")), sampled_from(("a", "b", "c")), integers(), integers(min_value=3), integers(min_value=(-2 ** 32), max_value=(2 ** 64)), floats(), floats(min_value=-2.0, max_value=3.0), floats(), floats(min_value=-2.0), floats(), floats(max_value=-0.0), floats(), floats(min_value=0.0), floats(min_value=3.14, max_value=3.14), text(), binary(), booleans(), tuples(booleans(), booleans()), frozensets(integers()), sets(frozensets(booleans())), complex_numbers(), fractions(), decimals(), lists(lists(booleans())), lists(floats(0.0, 0.0)), ordered_pair, constant_list(integers()), integers().filter(lambda x: abs(x) > 100), floats(min_value=-sys.float_info.max, max_value=sys.float_info.max), none(), randoms(), booleans().flatmap(lambda x: booleans() if x else complex_numbers()), recursive(base=booleans(), extend=lambda x: lists(x, max_size=3), max_leaves=10), ] hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/common/arguments.py000066400000000000000000000026631354103617500302640ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import given from hypothesis.errors import InvalidArgument def e(a, *args, **kwargs): return (a, args, kwargs) def e_to_str(elt): f, args, kwargs = elt bits = list(map(repr, args)) bits.extend(sorted("%s=%r" % (k, v) for k, v in kwargs.items())) return "%s(%s)" % (f.__name__, ", ".join(bits)) def argument_validation_test(bad_args): @pytest.mark.parametrize( ("function", "args", "kwargs"), bad_args, ids=list(map(e_to_str, bad_args)) ) def test_raise_invalid_argument(function, args, kwargs): @given(function(*args, **kwargs)) def test(x): pass with pytest.raises(InvalidArgument): test() return test_raise_invalid_argument hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/common/costbounds.py000066400000000000000000000020471354103617500304360ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.internal.conjecture.shrinking.common import find_integer FIND_INTEGER_COSTS = {} def find_integer_cost(n): try: return FIND_INTEGER_COSTS[n] except KeyError: pass cost = [0] def test(i): cost[0] += 1 return i <= n find_integer(test) return FIND_INTEGER_COSTS.setdefault(n, cost[0]) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/common/debug.py000066400000000000000000000065511354103617500273450ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from hypothesis import HealthCheck, Verbosity, given, settings as Settings from hypothesis.errors import NoSuchExample, Unsatisfiable from hypothesis.internal.conjecture.data import ConjectureData, StopTest from hypothesis.internal.reflection import get_pretty_function_description from tests.common.utils import no_shrink TIME_INCREMENT = 0.01 class Timeout(BaseException): pass def minimal(definition, condition=lambda x: True, settings=None, timeout_after=10): class Found(Exception): """Signal that the example matches condition.""" def wrapped_condition(x): if timeout_after is not None: if runtime: runtime[0] += TIME_INCREMENT if runtime[0] >= timeout_after: raise Timeout() result = condition(x) if result and not runtime: runtime.append(0.0) return result @given(definition) @Settings( parent=settings or Settings(max_examples=50000, verbosity=Verbosity.quiet), suppress_health_check=HealthCheck.all(), report_multiple_bugs=False, derandomize=True, database=None, ) def inner(x): if wrapped_condition(x): result[:] = [x] raise Found definition.validate() runtime = [] result = [] try: inner() except Found: return result[0] raise Unsatisfiable( "Could not find any examples from %r that satisfied %s" % (definition, get_pretty_function_description(condition)) ) def find_any(definition, condition=lambda _: True, settings=None): return minimal(definition, condition, settings=Settings(settings, phases=no_shrink)) def assert_no_examples(strategy, condition=lambda _: True): try: result = find_any(strategy, condition) assert False, "Expected no results but found %r" % (result,) except (Unsatisfiable, NoSuchExample): pass def assert_all_examples(strategy, predicate): """Asserts that all examples of the given strategy match the predicate. :param strategy: Hypothesis strategy to check :param predicate: (callable) Predicate that takes example and returns bool """ @given(strategy) def assert_examples(s): msg = "Found %r using strategy %s which does not match" % (s, strategy) assert predicate(s), msg assert_examples() def assert_can_trigger_event(strategy, predicate): def test(buf): data = ConjectureData.for_buffer(buf) try: data.draw(strategy) except StopTest: pass return any(predicate(e) for e in data.events) find_any(st.binary(), test) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/common/setup.py000066400000000000000000000062521354103617500274150ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os from tempfile import mkdtemp from warnings import filterwarnings from hypothesis import Verbosity, settings from hypothesis._settings import not_set from hypothesis.configuration import set_hypothesis_home_dir from hypothesis.internal.charmap import charmap, charmap_file from hypothesis.internal.coverage import IN_COVERAGE_TESTS def run(): filterwarnings("error") filterwarnings("ignore", category=ImportWarning) filterwarnings("ignore", category=FutureWarning, module="pandas._version") # Fixed in recent versions but allowed by pytest=3.0.0; see #1630 filterwarnings("ignore", category=DeprecationWarning, module="pluggy") # See https://github.com/numpy/numpy/pull/432 filterwarnings("ignore", message="numpy.dtype size changed") filterwarnings("ignore", message="numpy.ufunc size changed") # See https://github.com/HypothesisWorks/hypothesis/issues/1674 filterwarnings( "ignore", message=( "The virtualenv distutils package at .+ appears to be in the " "same location as the system distutils?" ), category=UserWarning, ) # Imported by Pandas in version 1.9, but fixed in later versions. filterwarnings( "ignore", message="Importing from numpy.testing.decorators is deprecated" ) filterwarnings( "ignore", message="Importing from numpy.testing.nosetester is deprecated" ) new_home = mkdtemp() set_hypothesis_home_dir(new_home) assert settings.default.database.path.startswith(new_home) charmap() assert os.path.exists(charmap_file()), charmap_file() assert isinstance(settings, type) # We do a smoke test here before we mess around with settings. x = settings() import hypothesis._settings as settings_module for s in settings_module.all_settings.values(): v = getattr(x, s.name) # Check if it has a dynamically defined default and if so skip # comparison. if getattr(settings, s.name).show_default: assert v == s.default, "%r == x.%s != s.%s == %r" % ( v, s.name, s.name, s.default, ) settings.register_profile( "default", settings(max_examples=10 if IN_COVERAGE_TESTS else not_set) ) settings.register_profile("speedy", settings(max_examples=5)) settings.register_profile("debug", settings(verbosity=Verbosity.debug)) settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "default")) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/common/strategies.py000066400000000000000000000032631354103617500304260ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import time from hypothesis.internal.compat import hbytes, hrange from hypothesis.searchstrategy import SearchStrategy class _Slow(SearchStrategy): def do_draw(self, data): time.sleep(1.0) data.draw_bytes(2) return None SLOW = _Slow() class HardToShrink(SearchStrategy): def __init__(self): self.__last = None self.accepted = set() def do_draw(self, data): x = hbytes([data.draw_bits(8) for _ in range(100)]) if x in self.accepted: return True ls = self.__last if ls is None: if all(x): self.__last = x self.accepted.add(x) return True else: return False diffs = [i for i in hrange(len(x)) if x[i] != ls[i]] if len(diffs) == 1: i = diffs[0] if x[i] + 1 == ls[i]: self.__last = x self.accepted.add(x) return True return False hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/common/utils.py000066400000000000000000000074751354103617500274250ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import contextlib import sys import traceback from io import BytesIO, StringIO from hypothesis._settings import Phase from hypothesis.errors import HypothesisDeprecationWarning from hypothesis.internal.compat import PY2 from hypothesis.internal.reflection import proxies from hypothesis.reporting import default, with_reporter no_shrink = tuple(set(Phase) - {Phase.shrink}) def flaky(max_runs, min_passes): assert isinstance(max_runs, int) assert isinstance(min_passes, int) assert 0 < min_passes <= max_runs <= 50 # arbitrary cap def accept(func): @proxies(func) def inner(*args, **kwargs): runs = passes = 0 while passes < min_passes: runs += 1 try: func(*args, **kwargs) passes += 1 except BaseException: if runs >= max_runs: raise return inner return accept @contextlib.contextmanager def capture_out(): old_out = sys.stdout try: new_out = BytesIO() if PY2 else StringIO() sys.stdout = new_out with with_reporter(default): yield new_out finally: sys.stdout = old_out class ExcInfo(object): pass @contextlib.contextmanager def raises(exctype): e = ExcInfo() try: yield e assert False, "Expected to raise an exception but didn't" except exctype as err: traceback.print_exc() e.value = err return def fails_with(e): def accepts(f): @proxies(f) def inverted_test(*arguments, **kwargs): with raises(e): f(*arguments, **kwargs) return inverted_test return accepts fails = fails_with(AssertionError) class NotDeprecated(Exception): pass @contextlib.contextmanager def validate_deprecation(): import warnings try: warnings.simplefilter("always", HypothesisDeprecationWarning) with warnings.catch_warnings(record=True) as w: yield finally: warnings.simplefilter("error", HypothesisDeprecationWarning) if not any(e.category == HypothesisDeprecationWarning for e in w): raise NotDeprecated( "Expected to get a deprecation warning but got %r" % ([e.category for e in w],) ) def checks_deprecated_behaviour(func): """A decorator for testing deprecated behaviour.""" @proxies(func) def _inner(*args, **kwargs): with validate_deprecation(): return func(*args, **kwargs) return _inner def all_values(db): return {v for vs in db.data.values() for v in vs} def non_covering_examples(database): return { v for k, vs in database.data.items() if not k.endswith(b".coverage") for v in vs } def counts_calls(func): """A decorator that counts how many times a function was called, and stores that value in a ``.calls`` attribute. """ assert not hasattr(func, "calls") @proxies(func) def _inner(*args, **kwargs): _inner.calls += 1 return func(*args, **kwargs) _inner.calls = 0 return _inner hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/conftest.py000066400000000000000000000045751354103617500266200ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import gc import sys import time as time_module import pytest from tests.common import TIME_INCREMENT from tests.common.setup import run run() # Skip collection of tests which require the Django test runner, # or that don't work on the current major version of Python. collect_ignore_glob = ["django/*", "py3/*" if sys.version_info[0] == 2 else "py2/*"] def pytest_configure(config): config.addinivalue_line("markers", "slow: pandas expects this marker to exist.") @pytest.fixture(scope=u"function", autouse=True) def gc_before_each_test(): gc.collect() @pytest.fixture(scope=u"function", autouse=True) def consistently_increment_time(monkeypatch): """Rather than rely on real system time we monkey patch time.time so that it passes at a consistent rate between calls. The reason for this is that when these tests run on travis, performance is extremely variable and the VM the tests are on might go to sleep for a bit, introducing arbitrary delays. This can cause a number of tests to fail flakily. Replacing time with a fake version under our control avoids this problem. """ frozen = [False] current_time = [time_module.time()] def time(): if not frozen[0]: current_time[0] += TIME_INCREMENT return current_time[0] def sleep(naptime): current_time[0] += naptime def freeze(): frozen[0] = True monkeypatch.setattr(time_module, "time", time) try: monkeypatch.setattr(time_module, "monotonic", time) except AttributeError: assert sys.version_info[0] == 2 monkeypatch.setattr(time_module, "sleep", sleep) monkeypatch.setattr(time_module, "freeze", freeze, raising=False) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/000077500000000000000000000000001354103617500255245ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/__init__.py000066400000000000000000000012751354103617500276420ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_arbitrary_data.py000066400000000000000000000060101354103617500321220ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import find, given, reporting, strategies as st from hypothesis.errors import InvalidArgument from tests.common.utils import capture_out, checks_deprecated_behaviour, raises @given(st.integers(), st.data()) def test_conditional_draw(x, data): y = data.draw(st.integers(min_value=x)) assert y >= x def test_prints_on_failure(): @given(st.data()) def test(data): x = data.draw(st.lists(st.integers(0, 10), min_size=2)) y = data.draw(st.sampled_from(x)) x.remove(y) if y in x: raise ValueError() with raises(ValueError): with capture_out() as out: with reporting.with_reporter(reporting.default): test() result = out.getvalue() assert "Draw 1: [0, 0]" in result assert "Draw 2: 0" in result def test_prints_labels_if_given_on_failure(): @given(st.data()) def test(data): x = data.draw(st.lists(st.integers(0, 10), min_size=2), label="Some numbers") y = data.draw(st.sampled_from(x), label="A number") assert y in x x.remove(y) assert y not in x with raises(AssertionError): with capture_out() as out: with reporting.with_reporter(reporting.default): test() result = out.getvalue() assert "Draw 1 (Some numbers): [0, 0]" in result assert "Draw 2 (A number): 0" in result def test_given_twice_is_same(): @given(st.data(), st.data()) def test(data1, data2): data1.draw(st.integers()) data2.draw(st.integers()) raise ValueError() with raises(ValueError): with capture_out() as out: with reporting.with_reporter(reporting.default): test() result = out.getvalue() assert "Draw 1: 0" in result assert "Draw 2: 0" in result @checks_deprecated_behaviour def test_errors_when_used_in_find(): with raises(InvalidArgument): find(st.data(), lambda x: x.draw(st.booleans())) @pytest.mark.parametrize("f", ["filter", "map", "flatmap"]) def test_errors_when_normal_strategy_functions_are_used(f): with raises(InvalidArgument): getattr(st.data(), f)(lambda x: 1) def test_errors_when_asked_for_example(): with raises(InvalidArgument): st.data().example() def test_nice_repr(): assert repr(st.data()) == "data()" hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_attrs_inference.py000066400000000000000000000063261354103617500323170ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import attr import pytest import hypothesis.strategies as st from hypothesis import given, infer from hypothesis.errors import ResolutionFailed try: import typing except ImportError: typing = None @attr.s class Inferrables(object): type_ = attr.ib(type=int) type_converter = attr.ib(converter=bool) validator_type = attr.ib(validator=attr.validators.instance_of(str)) validator_type_tuple = attr.ib(validator=attr.validators.instance_of((str, int))) validator_type_multiple = attr.ib( validator=[ attr.validators.instance_of(str), attr.validators.instance_of((str, int, bool)), ] ) validator_type_has_overlap = attr.ib( validator=[ attr.validators.instance_of(str), attr.validators.instance_of((str, list)), attr.validators.instance_of(object), ] ) validator_optional = attr.ib( validator=attr.validators.optional(lambda inst, atrib, val: float(val)) ) validator_in = attr.ib(validator=attr.validators.in_([1, 2, 3])) validator_in_multiple = attr.ib( validator=[attr.validators.in_(list(range(100))), attr.validators.in_([1, -1])] ) validator_in_multiple_strings = attr.ib( validator=[attr.validators.in_("abcd"), attr.validators.in_(["ab", "cd"])] ) if typing is not None: typing_list = attr.ib(type=typing.List[int]) typing_list_of_list = attr.ib(type=typing.List[typing.List[int]]) typing_dict = attr.ib(type=typing.Dict[str, int]) typing_union = attr.ib(type=typing.Optional[bool]) typing_union = attr.ib(type=typing.Union[str, int]) has_default = attr.ib(default=0) has_default_factory = attr.ib(default=attr.Factory(list)) has_default_factory_takes_self = attr.ib( # uninferrable but has default default=attr.Factory(lambda _: list(), takes_self=True) ) @attr.s class Required(object): a = attr.ib() @attr.s class UnhelpfulConverter(object): a = attr.ib(converter=lambda x: x) @given(st.builds(Inferrables, has_default=infer, has_default_factory=infer)) def test_attrs_inference_builds(c): pass @given(st.from_type(Inferrables)) def test_attrs_inference_from_type(c): pass @pytest.mark.parametrize("c", [Required, UnhelpfulConverter]) def test_cannot_infer(c): with pytest.raises(ResolutionFailed): st.builds(c).example() def test_cannot_infer_takes_self(): with pytest.raises(ResolutionFailed): st.builds(Inferrables, has_default_factory_takes_self=infer).example() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_cache_implementation.py000066400000000000000000000160171354103617500333120ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from random import Random import pytest import hypothesis.strategies as st from hypothesis import HealthCheck, assume, example, given, note, settings from hypothesis.internal.cache import GenericCache, LRUReusedCache class LRUCache(GenericCache): __slots__ = ("__tick",) def __init__(self, max_size): super(LRUCache, self).__init__(max_size) self.__tick = 0 def new_entry(self, key, value): return self.tick() def on_access(self, key, value, score): return self.tick() def tick(self): self.__tick += 1 return self.__tick class LFUCache(GenericCache): def new_entry(self, key, value): return 1 def on_access(self, key, value, score): return score + 1 @st.composite def write_pattern(draw, min_size=0): keys = draw(st.lists(st.integers(0, 1000), unique=True, min_size=1)) values = draw(st.lists(st.integers(), unique=True, min_size=1)) return draw( st.lists( st.tuples(st.sampled_from(keys), st.sampled_from(values)), min_size=min_size ) ) class ValueScored(GenericCache): def new_entry(self, key, value): return value class RandomCache(GenericCache): def __init__(self, max_size): super(RandomCache, self).__init__(max_size) self.random = Random(0) def new_entry(self, key, value): return self.random.random() def on_access(self, key, value, score): return self.random.random() @pytest.mark.parametrize( "implementation", [LRUCache, LFUCache, LRUReusedCache, ValueScored, RandomCache] ) @example(writes=[(0, 0), (3, 0), (1, 0), (2, 0), (2, 0), (1, 0)], size=4) @example(writes=[(0, 0)], size=1) @example(writes=[(1, 0), (2, 0), (0, -1), (1, 0)], size=3) @given(write_pattern(), st.integers(1, 10)) def test_behaves_like_a_dict_with_losses(implementation, writes, size): model = {} target = implementation(max_size=size) for k, v in writes: try: assert model[k] == target[k] except KeyError: pass model[k] = v target[k] = v target.check_valid() assert target[k] == v for r, s in model.items(): try: assert s == target[r] except KeyError: pass assert len(target) <= min(len(model), size) @settings(suppress_health_check=[HealthCheck.too_slow], deadline=None) @given(write_pattern(min_size=2), st.data()) def test_always_evicts_the_lowest_scoring_value(writes, data): scores = {} n_keys = len({k for k, _ in writes}) assume(n_keys > 1) size = data.draw(st.integers(1, n_keys - 1)) evicted = set() def new_score(key): scores[key] = data.draw(st.integers(0, 1000), label="scores[%r]" % (key,)) return scores[key] last_entry = [None] class Cache(GenericCache): def new_entry(self, key, value): last_entry[0] = key evicted.discard(key) assert key not in scores return new_score(key) def on_access(self, key, value, score): assert key in scores return new_score(key) def on_evict(self, key, value, score): note("Evicted %r" % (key,)) assert score == scores[key] del scores[key] if len(scores) > 1: assert score <= min(v for k, v in scores.items() if k != last_entry[0]) evicted.add(key) target = Cache(max_size=size) model = {} for k, v in writes: target[k] = v model[k] = v assert evicted assert len(evicted) + len(target) == len(model) assert len(scores) == len(target) for k, v in model.items(): try: assert target[k] == v assert k not in evicted except KeyError: assert k in evicted def test_basic_access(): cache = ValueScored(max_size=2) cache[1] = 0 cache[1] = 0 cache[0] = 1 cache[2] = 0 assert cache[2] == 0 assert cache[0] == 1 assert len(cache) == 2 def test_can_clear_a_cache(): x = ValueScored(1) x[0] = 1 assert len(x) == 1 x.clear() assert len(x) == 0 def test_max_size_cache_ignores(): x = ValueScored(0) x[0] = 1 with pytest.raises(KeyError): x[0] def test_pinning_prevents_eviction(): cache = LRUReusedCache(max_size=10) cache[20] = 1 cache.pin(20) for i in range(20): cache[i] = 0 assert cache[20] == 1 def test_unpinning_allows_eviction(): cache = LRUReusedCache(max_size=10) cache[20] = True cache.pin(20) for i in range(20): cache[i] = False assert 20 in cache cache.unpin(20) cache[21] = False assert 20 not in cache def test_unpins_must_match_pins(): cache = LRUReusedCache(max_size=2) cache[1] = 1 cache.pin(1) assert cache.is_pinned(1) cache.pin(1) assert cache.is_pinned(1) cache.unpin(1) assert cache.is_pinned(1) cache.unpin(1) assert not cache.is_pinned(1) def test_will_error_instead_of_evicting_pin(): cache = LRUReusedCache(max_size=1) cache[1] = 1 cache.pin(1) with pytest.raises(ValueError): cache[2] = 2 def test_will_error_for_bad_unpin(): cache = LRUReusedCache(max_size=1) cache[1] = 1 with pytest.raises(ValueError): cache.unpin(1) def test_still_inserts_if_score_is_worse(): class TC(GenericCache): def new_entry(self, key, value): return key cache = TC(1) cache[0] = 1 cache[1] = 1 assert 0 not in cache assert 1 in cache assert len(cache) == 1 def test_does_insert_if_score_is_better(): class TC(GenericCache): def new_entry(self, key, value): return value cache = TC(1) cache[0] = 1 cache[1] = 0 assert 0 not in cache assert 1 in cache assert len(cache) == 1 def test_double_pinning_does_not_increase_pin_count(): cache = LRUReusedCache(2) cache[0] = 0 cache.pin(0) cache.pin(0) cache[1] = 1 assert len(cache) == 2 def test_can_add_new_keys_after_unpinning(): cache = LRUReusedCache(1) cache[0] = 0 cache.pin(0) cache.unpin(0) cache[1] = 1 assert len(cache) == 1 assert 1 in cache def test_iterates_over_remaining_keys(): cache = LRUReusedCache(2) for i in range(3): cache[i] = "hi" assert sorted(cache) == [1, 2] hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_caching.py000066400000000000000000000032211354103617500305270ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis.errors import InvalidArgument def test_no_args(): assert st.text() is st.text() def test_tuple_lengths(): assert st.tuples(st.integers()) is st.tuples(st.integers()) assert st.tuples(st.integers()) is not st.tuples(st.integers(), st.integers()) def test_values(): assert st.integers() is not st.integers(min_value=1) def test_alphabet_key(): assert st.text(alphabet="abcs") is st.text(alphabet="abcs") def test_does_not_error_on_unhashable_posarg(): st.text(["a", "b", "c"]) def test_does_not_error_on_unhashable_kwarg(): with pytest.raises(InvalidArgument): st.builds(lambda alphabet: 1, alphabet=["a", "b", "c"]).validate() def test_caches_floats_sensitively(): assert st.floats(min_value=0.0) is st.floats(min_value=0.0) assert st.floats(min_value=0.0) is not st.floats(min_value=0) assert st.floats(min_value=0.0) is not st.floats(min_value=-0.0) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_cathetus.py000066400000000000000000000102471354103617500307610ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import math import sys from sys import float_info import pytest from hypothesis import assume, given from hypothesis.internal.cathetus import cathetus from hypothesis.strategies import floats def test_cathetus_subnormal_underflow(): u = sys.float_info.min * sys.float_info.epsilon h = 5 * u a = 4 * u assert cathetus(h, a) == 3 * u def test_cathetus_simple_underflow(): a = sys.float_info.min h = a * math.sqrt(2) b = cathetus(h, a) assert b > 0, "expecting positive cathetus(%g, %g), got %g" % (h, a, b) def test_cathetus_huge_no_overflow(): h = sys.float_info.max a = h / math.sqrt(2) b = cathetus(h, a) assert not ( math.isinf(b) or math.isnan(b) ), "expecting finite cathetus(%g, %g), got %g" % (h, a, b) def test_cathetus_large_no_overflow(): h = sys.float_info.max / 3 a = h / math.sqrt(2) b = cathetus(h, a) assert not ( math.isinf(b) or math.isnan(b) ), "expecting finite cathetus(%g, %g), got %g" % (h, a, b) @pytest.mark.parametrize( "h,a", [ # NaN hypot (float(u"nan"), 3), (float(u"nan"), 0), (float(u"nan"), float(u"inf")), (float(u"nan"), float(u"nan")), # Infeasible (2, 3), (2, -3), (2, float(u"inf")), (2, float(u"nan")), # Surprisingly consistent with c99 hypot() (float(u"inf"), float(u"inf")), ], ) def test_cathetus_nan(h, a): assert math.isnan(cathetus(h, a)) @pytest.mark.parametrize( "h,a", [ (float(u"inf"), 3), (float(u"inf"), -3), (float(u"inf"), 0), (float(u"inf"), float(u"nan")), ], ) def test_cathetus_infinite(h, a): assert math.isinf(cathetus(h, a)) @pytest.mark.parametrize( "h,a,b", [(-5, 4, 3), (5, -4, 3), (-5, -4, 3), (0, 0, 0), (1, 0, 1)] ) def test_cathetus_signs(h, a, b): assert abs(cathetus(h, a) - b) <= abs(b) * float_info.epsilon @given( h=floats(0) | floats(min_value=1e308, allow_infinity=False), a=floats(0, allow_infinity=False) | floats(min_value=0, max_value=1e250, allow_infinity=False), ) def test_cathetus_always_leq_hypot(h, a): assume(h >= a) b = cathetus(h, a) assert 0 <= b <= h @pytest.mark.parametrize( "a,b,h", [ (3, 4, 5), (5, 12, 13), (8, 15, 17), (7, 24, 25), (20, 21, 29), (12, 35, 37), (9, 40, 41), (28, 45, 53), (11, 60, 61), (16, 63, 65), (33, 56, 65), (48, 55, 73), (13, 84, 85), (36, 77, 85), (39, 80, 89), (65, 72, 97), (20, 99, 101), (60, 91, 109), (15, 112, 113), (44, 117, 125), (88, 105, 137), (17, 144, 145), (24, 143, 145), (51, 140, 149), (85, 132, 157), (119, 120, 169), (52, 165, 173), (19, 180, 181), (57, 176, 185), (104, 153, 185), (95, 168, 193), (28, 195, 197), (84, 187, 205), (133, 156, 205), (21, 220, 221), (140, 171, 221), (60, 221, 229), (105, 208, 233), (120, 209, 241), (32, 255, 257), (23, 264, 265), (96, 247, 265), (69, 260, 269), (115, 252, 277), (160, 231, 281), (161, 240, 289), (68, 285, 293), ], ) def test_pythagorean_triples(a, b, h): assert abs(math.hypot(a, b) - h) <= abs(h) * float_info.epsilon assert abs(cathetus(h, a) - b) <= abs(b) * float_info.epsilon hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_charmap.py000066400000000000000000000125611354103617500305550ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import sys import tempfile import unicodedata import hypothesis.internal.charmap as cm import hypothesis.strategies as st from hypothesis import assume, given from hypothesis.internal.compat import hunichr def test_charmap_contains_all_unicode(): n = 0 for vs in cm.charmap().values(): for u, v in vs: n += v - u + 1 assert n == sys.maxunicode + 1 def test_charmap_has_right_categories(): for cat, intervals in cm.charmap().items(): for u, v in intervals: for i in range(u, v + 1): real = unicodedata.category(hunichr(i)) assert real == cat, "%d is %s but reported in %s" % (i, real, cat) def assert_valid_range_list(ls): for u, v in ls: assert u <= v for i in range(len(ls) - 1): assert ls[i] <= ls[i + 1] assert ls[i][-1] < ls[i + 1][0] @given( st.sets(st.sampled_from(cm.categories())), st.sets(st.sampled_from(cm.categories())) | st.none(), ) def test_query_matches_categories(exclude, include): values = cm.query(exclude, include) assert_valid_range_list(values) for u, v in values: for i in (u, v, (u + v) // 2): cat = unicodedata.category(hunichr(i)) if include is not None: assert cat in include assert cat not in exclude @given( st.sets(st.sampled_from(cm.categories())), st.sets(st.sampled_from(cm.categories())) | st.none(), st.integers(0, sys.maxunicode), st.integers(0, sys.maxunicode), ) def test_query_matches_categories_codepoints(exclude, include, m1, m2): m1, m2 = sorted((m1, m2)) values = cm.query(exclude, include, min_codepoint=m1, max_codepoint=m2) assert_valid_range_list(values) for u, v in values: assert m1 <= u assert v <= m2 @given(st.sampled_from(cm.categories()), st.integers(0, sys.maxunicode)) def test_exclude_only_excludes_from_that_category(cat, i): c = hunichr(i) assume(unicodedata.category(c) != cat) intervals = cm.query(exclude_categories=(cat,)) assert any(a <= i <= b for a, b in intervals) def test_reload_charmap(): x = cm.charmap() assert x is cm.charmap() cm._charmap = None y = cm.charmap() assert x is not y assert x == y def test_recreate_charmap(): x = cm.charmap() assert x is cm.charmap() cm._charmap = None os.unlink(cm.charmap_file()) y = cm.charmap() assert x is not y assert x == y def test_union_empty(): assert cm._union_intervals([], []) == () assert cm._union_intervals([], [[1, 2]]) == ((1, 2),) assert cm._union_intervals([[1, 2]], []) == ((1, 2),) def test_union_handles_totally_overlapped_gap(): # < xx > Imagine the intervals x and y as bit strings. # | The bit at position n is set if n falls inside that interval. # = In this model _union_intervals() performs bit-wise or. assert cm._union_intervals([[2, 3]], [[1, 2], [4, 5]]) == ((1, 5),) def test_union_handles_partially_overlapped_gap(): # < x > Imagine the intervals x and y as bit strings. # | The bit at position n is set if n falls inside that interval. # = In this model _union_intervals() performs bit-wise or. assert cm._union_intervals([[3, 3]], [[1, 2], [5, 5]]) == ((1, 3), (5, 5)) def test_successive_union(): x = [] for v in cm.charmap().values(): x = cm._union_intervals(x, v) assert x == ((0, sys.maxunicode),) def test_can_handle_race_between_exist_and_create(monkeypatch): x = cm.charmap() cm._charmap = None monkeypatch.setattr(os.path, "exists", lambda p: False) y = cm.charmap() assert x is not y assert x == y def test_exception_in_write_does_not_lead_to_broken_charmap(monkeypatch): def broken(*args, **kwargs): raise ValueError() cm._charmap = None monkeypatch.setattr(os.path, "exists", lambda p: False) monkeypatch.setattr(os, "rename", broken) cm.charmap() cm.charmap() def test_regenerate_broken_charmap_file(): cm.charmap() file_loc = cm.charmap_file() with open(file_loc, "wb"): pass cm._charmap = None cm.charmap() def test_exclude_characters_are_included_in_key(): assert cm.query() != cm.query(exclude_characters="0") def test_error_writing_charmap_file_is_suppressed(monkeypatch): def broken_mkstemp(dir): raise RuntimeError() monkeypatch.setattr(tempfile, "mkstemp", broken_mkstemp) try: # Cache the charmap to avoid a performance hit the next time # somebody tries to use it. saved = cm._charmap cm._charmap = None os.unlink(cm.charmap_file()) cm.charmap() finally: cm._charmap = saved hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_complex_numbers.py000066400000000000000000000063351354103617500323460ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import math import sys import hypothesis.strategies as st from hypothesis import given, reject from hypothesis.strategies import complex_numbers from tests.common.debug import minimal def test_minimal(): assert minimal(complex_numbers(), lambda x: True) == 0 def test_minimal_nonzero_real(): assert minimal(complex_numbers(), lambda x: x.real != 0) == 1 def test_minimal_nonzero_imaginary(): assert minimal(complex_numbers(), lambda x: x.imag != 0) == 1j def test_minimal_quadrant1(): assert minimal(complex_numbers(), lambda x: x.imag > 0 and x.real > 0) == 1 + 1j def test_minimal_quadrant2(): assert minimal(complex_numbers(), lambda x: x.imag > 0 and x.real < 0) == -1 + 1j def test_minimal_quadrant3(): assert minimal(complex_numbers(), lambda x: x.imag < 0 and x.real < 0) == -1 - 1j def test_minimal_quadrant4(): assert minimal(complex_numbers(), lambda x: x.imag < 0 and x.real > 0) == 1 - 1j @given(st.data(), st.integers(-5, 5).map(lambda x: 10 ** x)) def test_max_magnitude_respected(data, mag): c = data.draw(complex_numbers(max_magnitude=mag)) assert abs(c) <= mag * (1 + sys.float_info.epsilon) @given(complex_numbers(max_magnitude=0)) def test_max_magnitude_zero(val): assert val == 0 @given(st.data(), st.integers(-5, 5).map(lambda x: 10 ** x)) def test_min_magnitude_respected(data, mag): c = data.draw(complex_numbers(min_magnitude=mag)) assert ( abs(c.real) >= mag or abs(c.imag) >= mag or abs(c) >= mag * (1 - sys.float_info.epsilon) ) def test_minimal_min_magnitude_zero(): assert minimal(complex_numbers(min_magnitude=0), lambda x: True) == 0 def test_minimal_min_magnitude_none(): assert minimal(complex_numbers(min_magnitude=None), lambda x: True) == 0 def test_minimal_min_magnitude_positive(): assert minimal(complex_numbers(min_magnitude=0.5), lambda x: True) in (0.5, 1) def test_minimal_minmax_magnitude(): assert minimal( complex_numbers(min_magnitude=0.5, max_magnitude=1.5), lambda x: True ) in (0.5, 1) @given(st.data(), st.floats(0, 10e300, allow_infinity=False, allow_nan=False)) def test_minmax_magnitude_equal(data, mag): val = data.draw(st.complex_numbers(min_magnitude=mag, max_magnitude=mag)) try: # Cap magnitude at 10e300 to avoid float overflow, and imprecision # at very large exponents (which makes math.isclose fail) assert math.isclose(abs(val), mag) except OverflowError: reject() except AttributeError: pass # Python 2.7.3 does not have math.isclose hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_composite.py000066400000000000000000000071021354103617500311370ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import assume, given from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import hrange from tests.common.debug import minimal from tests.common.utils import flaky @st.composite def badly_draw_lists(draw, m=0): length = draw(st.integers(m, m + 10)) return [draw(st.integers()) for _ in hrange(length)] def test_simplify_draws(): assert minimal(badly_draw_lists(), lambda x: len(x) >= 3) == [0] * 3 def test_can_pass_through_arguments(): assert minimal(badly_draw_lists(5), lambda x: True) == [0] * 5 assert minimal(badly_draw_lists(m=6), lambda x: True) == [0] * 6 @st.composite def draw_ordered_with_assume(draw): x = draw(st.floats()) y = draw(st.floats()) assume(x < y) return (x, y) @given(draw_ordered_with_assume()) def test_can_assume_in_draw(xy): assert xy[0] < xy[1] def test_uses_definitions_for_reprs(): assert repr(badly_draw_lists()) == "badly_draw_lists()" assert repr(badly_draw_lists(1)) == "badly_draw_lists(m=1)" assert repr(badly_draw_lists(m=1)) == "badly_draw_lists(m=1)" def test_errors_given_default_for_draw(): with pytest.raises(InvalidArgument): @st.composite def foo(x=None): pass def test_errors_given_function_of_no_arguments(): with pytest.raises(InvalidArgument): @st.composite def foo(): pass def test_errors_given_kwargs_only(): with pytest.raises(InvalidArgument): @st.composite def foo(**kwargs): pass def test_can_use_pure_args(): @st.composite def stuff(*args): return args[0](st.sampled_from(args[1:])) assert minimal(stuff(1, 2, 3, 4, 5), lambda x: True) == 1 def test_composite_of_lists(): @st.composite def f(draw): return draw(st.integers()) + draw(st.integers()) assert minimal(st.lists(f()), lambda x: len(x) >= 10) == [0] * 10 @flaky(min_passes=2, max_runs=5) def test_can_shrink_matrices_with_length_param(): @st.composite def matrix(draw): rows = draw(st.integers(1, 10)) columns = draw(st.integers(1, 10)) return [ [draw(st.integers(0, 10000)) for _ in range(columns)] for _ in range(rows) ] def transpose(m): return [[row[i] for row in m] for i in range(len(m[0]))] def is_square(m): return len(m) == len(m[0]) value = minimal(matrix(), lambda m: is_square(m) and transpose(m) != m) assert len(value) == 2 assert len(value[0]) == 2 assert sorted(value[0] + value[1]) == [0, 0, 0, 1] class MyList(list): pass @given(st.data(), st.lists(st.integers()).map(MyList)) def test_does_not_change_arguments(data, ls): # regression test for issue #1017 or other argument mutation @st.composite def strat(draw, arg): return arg ex = data.draw(strat(ls)) assert ex is ls hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_conjecture_choice_tree.py000066400000000000000000000045351354103617500336360ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from hypothesis import given from hypothesis.internal.compat import hrange from hypothesis.internal.conjecture.choicetree import ChoiceTree def exhaust(f): tree = ChoiceTree() results = [] prefix = () while not tree.exhausted: prefix = tree.step(prefix, lambda chooser: results.append(f(chooser))) return results @given(st.lists(st.integers())) def test_can_enumerate_a_shallow_set(ls): results = exhaust(lambda chooser: chooser.choose(ls)) assert sorted(results) == sorted(ls) def test_can_enumerate_a_nested_set(): @exhaust def nested(chooser): i = chooser.choose(hrange(10)) j = chooser.choose(hrange(10), condition=lambda j: j > i) return (i, j) assert sorted(nested) == [(i, j) for i in hrange(10) for j in hrange(i + 1, 10)] def test_can_enumerate_empty(): @exhaust def empty(chooser): return 1 assert empty == [1] def test_all_filtered_child(): @exhaust def all_filtered(chooser): chooser.choose(hrange(10), condition=lambda j: False) assert all_filtered == [] def test_skips_over_exhausted_children(): results = [] def f(chooser): results.append( ( chooser.choose(hrange(3), condition=lambda x: x > 0), chooser.choose(hrange(2)), ) ) tree = ChoiceTree() tree.step((1, 0), f) tree.step((1, 1), f) tree.step((0, 0), f) assert results == [(1, 0), (1, 1), (2, 0)] def test_wraps_around_to_beginning(): def f(chooser): chooser.choose(hrange(3)) tree = ChoiceTree() assert tree.step((2,), f) == () hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_conjecture_data_tree.py000066400000000000000000000242561354103617500333170ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from random import Random import pytest from hypothesis import HealthCheck, settings from hypothesis.errors import Flaky from hypothesis.internal.compat import hbytes, hrange from hypothesis.internal.conjecture.data import ConjectureData, Status, StopTest from hypothesis.internal.conjecture.datatree import DataTree from hypothesis.internal.conjecture.engine import ConjectureRunner TEST_SETTINGS = settings( max_examples=5000, database=None, suppress_health_check=HealthCheck.all() ) def runner_for(*examples): if len(examples) == 1 and isinstance(examples[0], list): examples = examples[0] def accept(tf): runner = ConjectureRunner(tf, settings=TEST_SETTINGS, random=Random(0)) runner.exit_with = lambda reason: None ran_examples = [] for e in examples: e = hbytes(e) data = runner.cached_test_function(e) ran_examples.append((e, data)) for e, d in ran_examples: rewritten, status = runner.tree.rewrite(e) assert status == d.status assert rewritten == d.buffer return runner return accept def test_can_lookup_cached_examples(): @runner_for(b"\0\0", b"\0\1") def runner(data): data.draw_bits(8) data.draw_bits(8) def test_can_lookup_cached_examples_with_forced(): @runner_for(b"\0\0", b"\0\1") def runner(data): data.write(b"\1") data.draw_bits(8) def test_can_detect_when_tree_is_exhausted(): @runner_for(b"\0", b"\1") def runner(data): data.draw_bits(1) assert runner.tree.is_exhausted def test_can_detect_when_tree_is_exhausted_variable_size(): @runner_for(b"\0", b"\1\0", b"\1\1") def runner(data): if data.draw_bits(1): data.draw_bits(1) assert runner.tree.is_exhausted def test_one_dead_branch(): @runner_for([[0, i] for i in range(16)] + [[i] for i in range(1, 16)]) def runner(data): i = data.draw_bits(4) if i > 0: data.mark_invalid() data.draw_bits(4) assert runner.tree.is_exhausted def test_non_dead_root(): @runner_for(b"\0\0", b"\1\0", b"\1\1") def runner(data): data.draw_bits(1) data.draw_bits(1) def test_can_reexecute_dead_examples(): @runner_for(b"\0\0", b"\0\1", b"\0\0") def runner(data): data.draw_bits(1) data.draw_bits(1) def test_novel_prefixes_are_novel(): def tf(data): for _ in range(4): data.write(b"\0") data.draw_bits(2) runner = ConjectureRunner(tf, settings=TEST_SETTINGS, random=Random(0)) for _ in range(100): prefix = runner.tree.generate_novel_prefix(runner.random) example = prefix + hbytes(8 - len(prefix)) assert runner.tree.rewrite(example)[1] is None result = runner.cached_test_function(example) assert runner.tree.rewrite(example)[0] == result.buffer def test_overruns_if_not_enough_bytes_for_block(): runner = ConjectureRunner( lambda data: data.draw_bytes(2), settings=TEST_SETTINGS, random=Random(0) ) runner.cached_test_function(b"\0\0") assert runner.tree.rewrite(b"\0")[1] == Status.OVERRUN def test_overruns_if_prefix(): runner = ConjectureRunner( lambda data: [data.draw_bits(1) for _ in range(2)], settings=TEST_SETTINGS, random=Random(0), ) runner.cached_test_function(b"\0\0") assert runner.tree.rewrite(b"\0")[1] == Status.OVERRUN def test_stores_the_tree_flat_until_needed(): @runner_for(hbytes(10)) def runner(data): for _ in hrange(10): data.draw_bits(1) data.mark_interesting() root = runner.tree.root assert len(root.bit_lengths) == 10 assert len(root.values) == 10 assert root.transition.status == Status.INTERESTING def test_split_in_the_middle(): @runner_for([0, 0, 2], [0, 1, 3]) def runner(data): data.draw_bits(1) data.draw_bits(1) data.draw_bits(4) data.mark_interesting() root = runner.tree.root assert len(root.bit_lengths) == len(root.values) == 1 assert list(root.transition.children[0].values) == [2] assert list(root.transition.children[1].values) == [3] def test_stores_forced_nodes(): @runner_for(hbytes(3)) def runner(data): data.draw_bits(1, forced=0) data.draw_bits(1) data.draw_bits(1, forced=0) data.mark_interesting() root = runner.tree.root assert root.forced == {0, 2} def test_correctly_relocates_forced_nodes(): @runner_for([0, 0], [1, 0]) def runner(data): data.draw_bits(1) data.draw_bits(1, forced=0) data.mark_interesting() root = runner.tree.root assert root.transition.children[1].forced == {0} assert root.transition.children[0].forced == {0} def test_can_go_from_interesting_to_valid(): tree = DataTree() data = ConjectureData.for_buffer(b"", observer=tree.new_observer()) with pytest.raises(StopTest): data.conclude_test(Status.INTERESTING) data = ConjectureData.for_buffer(b"", observer=tree.new_observer()) with pytest.raises(StopTest): data.conclude_test(Status.VALID) def test_going_from_interesting_to_invalid_is_flaky(): tree = DataTree() data = ConjectureData.for_buffer(b"", observer=tree.new_observer()) with pytest.raises(StopTest): data.conclude_test(Status.INTERESTING) data = ConjectureData.for_buffer(b"", observer=tree.new_observer()) with pytest.raises(Flaky): data.conclude_test(Status.INVALID) def test_concluding_at_prefix_is_flaky(): tree = DataTree() data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer()) data.draw_bits(1) with pytest.raises(StopTest): data.conclude_test(Status.INTERESTING) data = ConjectureData.for_buffer(b"", observer=tree.new_observer()) with pytest.raises(Flaky): data.conclude_test(Status.INVALID) def test_concluding_with_overrun_at_prefix_is_not_flaky(): tree = DataTree() data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer()) data.draw_bits(1) with pytest.raises(StopTest): data.conclude_test(Status.INTERESTING) data = ConjectureData.for_buffer(b"", observer=tree.new_observer()) with pytest.raises(StopTest): data.conclude_test(Status.OVERRUN) def test_changing_n_bits_is_flaky_in_prefix(): tree = DataTree() data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer()) data.draw_bits(1) with pytest.raises(StopTest): data.conclude_test(Status.INTERESTING) data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer()) with pytest.raises(Flaky): data.draw_bits(2) def test_changing_n_bits_is_flaky_in_branch(): tree = DataTree() for i in [0, 1]: data = ConjectureData.for_buffer([i], observer=tree.new_observer()) data.draw_bits(1) with pytest.raises(StopTest): data.conclude_test(Status.INTERESTING) data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer()) with pytest.raises(Flaky): data.draw_bits(2) def test_extending_past_conclusion_is_flaky(): tree = DataTree() data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer()) data.draw_bits(1) with pytest.raises(StopTest): data.conclude_test(Status.INTERESTING) data = ConjectureData.for_buffer(b"\1\0", observer=tree.new_observer()) data.draw_bits(1) with pytest.raises(Flaky): data.draw_bits(1) def test_changing_to_forced_is_flaky(): tree = DataTree() data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer()) data.draw_bits(1) with pytest.raises(StopTest): data.conclude_test(Status.INTERESTING) data = ConjectureData.for_buffer(b"\1\0", observer=tree.new_observer()) with pytest.raises(Flaky): data.draw_bits(1, forced=0) def test_changing_value_of_forced_is_flaky(): tree = DataTree() data = ConjectureData.for_buffer(b"\1", observer=tree.new_observer()) data.draw_bits(1, forced=1) with pytest.raises(StopTest): data.conclude_test(Status.INTERESTING) data = ConjectureData.for_buffer(b"\1\0", observer=tree.new_observer()) with pytest.raises(Flaky): data.draw_bits(1, forced=0) def test_does_not_truncate_if_unseen(): tree = DataTree() b = hbytes([1, 2, 3, 4]) assert tree.rewrite(b) == (b, None) def test_truncates_if_seen(): tree = DataTree() b = hbytes([1, 2, 3, 4]) data = ConjectureData.for_buffer(b, observer=tree.new_observer()) data.draw_bits(8) data.draw_bits(8) data.freeze() assert tree.rewrite(b) == (b[:2], Status.VALID) def test_child_becomes_exhausted_after_split(): tree = DataTree() data = ConjectureData.for_buffer([0, 0], observer=tree.new_observer()) data.draw_bits(8) data.draw_bits(8, forced=0) data.freeze() data = ConjectureData.for_buffer([1, 0], observer=tree.new_observer()) data.draw_bits(8) data.draw_bits(8) data.freeze() assert not tree.is_exhausted assert tree.root.transition.children[0].is_exhausted def test_will_generate_novel_prefix_to_avoid_exhausted_branches(): tree = DataTree() data = ConjectureData.for_buffer([1], observer=tree.new_observer()) data.draw_bits(1) data.freeze() data = ConjectureData.for_buffer([0, 1], observer=tree.new_observer()) data.draw_bits(1) data.draw_bits(8) data.freeze() prefix = list(tree.generate_novel_prefix(Random(0))) assert len(prefix) == 2 assert prefix[0] == 0 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_conjecture_engine.py000066400000000000000000001171251354103617500326320ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import re from contextlib import contextmanager from random import Random import attr import pytest import hypothesis.internal.conjecture.engine as engine_module import hypothesis.internal.conjecture.floats as flt from hypothesis import HealthCheck, Phase, Verbosity, settings from hypothesis.database import ExampleDatabase, InMemoryExampleDatabase from hypothesis.errors import FailedHealthCheck, Flaky from hypothesis.internal.compat import hbytes, hrange, int_from_bytes, int_to_bytes from hypothesis.internal.conjecture.data import ( MAX_DEPTH, ConjectureData, Overrun, Status, ) from hypothesis.internal.conjecture.engine import ( MIN_TEST_CALLS, ConjectureRunner, ExitReason, TargetSelector, ) from hypothesis.internal.conjecture.shrinker import Shrinker, block_program from hypothesis.internal.conjecture.shrinking import Float from hypothesis.internal.conjecture.utils import Sampler, calc_label_from_name from hypothesis.internal.entropy import deterministic_PRNG from tests.common.strategies import SLOW, HardToShrink from tests.common.utils import no_shrink SOME_LABEL = calc_label_from_name("some label") TEST_SETTINGS = settings( max_examples=5000, database=None, suppress_health_check=HealthCheck.all() ) def run_to_data(f): with deterministic_PRNG(): runner = ConjectureRunner(f, settings=TEST_SETTINGS) runner.run() assert runner.interesting_examples last_data, = runner.interesting_examples.values() return last_data def run_to_buffer(f): return hbytes(run_to_data(f).buffer) @contextmanager def buffer_size_limit(n): original = engine_module.BUFFER_SIZE try: engine_module.BUFFER_SIZE = n yield finally: engine_module.BUFFER_SIZE = original def test_can_index_results(): @run_to_buffer def f(data): data.draw_bytes(5) data.mark_interesting() assert f.index(0) == 0 assert f.count(0) == 5 def test_non_cloneable_intervals(): @run_to_buffer def x(data): data.draw_bytes(10) data.draw_bytes(9) data.mark_interesting() assert x == hbytes(19) def test_duplicate_buffers(): @run_to_buffer def x(data): t = data.draw_bytes(10) if not any(t): data.mark_invalid() s = data.draw_bytes(10) if s == t: data.mark_interesting() assert x == hbytes([0] * 9 + [1]) * 2 def test_deletable_draws(): @run_to_buffer def x(data): while True: x = data.draw_bytes(2) if x[0] == 255: data.mark_interesting() assert x == hbytes([255, 0]) def zero_dist(random, n): return hbytes(n) def test_can_load_data_from_a_corpus(): key = b"hi there" db = ExampleDatabase() value = b"=\xc3\xe4l\x81\xe1\xc2H\xc9\xfb\x1a\xb6bM\xa8\x7f" db.save(key, value) def f(data): if data.draw_bytes(len(value)) == value: data.mark_interesting() runner = ConjectureRunner(f, settings=settings(database=db), database_key=key) runner.run() last_data, = runner.interesting_examples.values() assert last_data.buffer == value assert len(list(db.fetch(key))) == 1 def slow_shrinker(): strat = HardToShrink() def accept(data): if data.draw(strat): data.mark_interesting() return accept @pytest.mark.parametrize("n", [1, 5]) def test_terminates_shrinks(n, monkeypatch): from hypothesis.internal.conjecture import engine db = InMemoryExampleDatabase() def generate_new_examples(self): def draw_bytes(data, n): return hbytes([255] * n) self.test_function(self.new_conjecture_data(draw_bytes)) monkeypatch.setattr( ConjectureRunner, "generate_new_examples", generate_new_examples ) monkeypatch.setattr(engine, "MAX_SHRINKS", n) runner = ConjectureRunner( slow_shrinker(), settings=settings(max_examples=5000, database=db), random=Random(0), database_key=b"key", ) runner.run() last_data, = runner.interesting_examples.values() assert last_data.status == Status.INTERESTING assert runner.shrinks == n in_db = set(db.data[runner.secondary_key]) assert len(in_db) == n def test_detects_flakiness(): failed_once = [False] count = [0] def tf(data): data.draw_bytes(1) count[0] += 1 if not failed_once[0]: failed_once[0] = True data.mark_interesting() runner = ConjectureRunner(tf) runner.run() assert runner.exit_reason == ExitReason.flaky assert count == [MIN_TEST_CALLS + 1] def test_variadic_draw(): def draw_list(data): result = [] while True: data.start_example(SOME_LABEL) d = data.draw_bytes(1)[0] & 7 if d: result.append(data.draw_bytes(d)) data.stop_example() if not d: break return result @run_to_buffer def b(data): if any(all(d) for d in draw_list(data)): data.mark_interesting() ls = draw_list(ConjectureData.for_buffer(b)) assert len(ls) == 1 assert len(ls[0]) == 1 def test_draw_to_overrun(): @run_to_buffer def x(data): d = (data.draw_bytes(1)[0] - 8) & 0xFF data.draw_bytes(128 * d) if d >= 2: data.mark_interesting() assert x == hbytes([10]) + hbytes(128 * 2) def test_can_navigate_to_a_valid_example(): def f(data): i = int_from_bytes(data.draw_bytes(2)) data.draw_bytes(i) data.mark_interesting() runner = ConjectureRunner(f, settings=settings(max_examples=5000, database=None)) with buffer_size_limit(2): runner.run() assert runner.interesting_examples def test_stops_after_max_examples_when_reading(): key = b"key" db = ExampleDatabase(":memory:") for i in range(10): db.save(key, hbytes([i])) seen = [] def f(data): seen.append(data.draw_bytes(1)) runner = ConjectureRunner( f, settings=settings(max_examples=1, database=db), database_key=key ) runner.run() assert len(seen) == 1 def test_stops_after_max_examples_when_generating(): seen = [] def f(data): seen.append(data.draw_bytes(1)) runner = ConjectureRunner(f, settings=settings(max_examples=1, database=None)) runner.run() assert len(seen) == 1 @pytest.mark.parametrize("examples", [1, 5, 20, 50]) def test_stops_after_max_examples_when_generating_more_bugs(examples): seen = [] bad = [False, False] def f(data): seen.append(data.draw_bits(32)) # Rare, potentially multi-error conditions if seen[-1] > 2 ** 31: bad[0] = True raise ValueError bad[1] = True raise Exception runner = ConjectureRunner( f, settings=settings(max_examples=examples, phases=[Phase.generate]) ) try: runner.run() except Exception: pass # No matter what, whether examples is larger or smalller than MAX_TEST_CALLS, # we stop looking at max_examples. (and re-run each failure for the traceback) assert len(seen) <= examples + sum(bad) def test_interleaving_engines(): children = [] @run_to_buffer def x(data): rnd = Random(data.draw_bytes(1)) def g(d2): d2.draw_bytes(1) data.mark_interesting() runner = ConjectureRunner(g, random=rnd) children.append(runner) runner.run() if runner.interesting_examples: data.mark_interesting() assert x == b"\0" for c in children: assert not c.interesting_examples def test_phases_can_disable_shrinking(): seen = set() def f(data): seen.add(hbytes(data.draw_bytes(32))) data.mark_interesting() runner = ConjectureRunner( f, settings=settings(database=None, phases=(Phase.reuse, Phase.generate)) ) runner.run() assert len(seen) == MIN_TEST_CALLS def test_erratic_draws(): n = [0] with pytest.raises(Flaky): @run_to_buffer def x(data): data.draw_bytes(n[0]) data.draw_bytes(255 - n[0]) if n[0] == 255: data.mark_interesting() else: n[0] += 1 def test_no_read_no_shrink(): count = [0] @run_to_buffer def x(data): count[0] += 1 data.mark_interesting() assert x == b"" assert count == [1] def test_one_dead_branch(): with deterministic_PRNG(): seen = set() @run_to_buffer def x(data): i = data.draw_bytes(1)[0] if i > 0: data.mark_invalid() i = data.draw_bytes(1)[0] if len(seen) < 255: seen.add(i) elif i not in seen: data.mark_interesting() def test_saves_on_interrupt(): def interrupts(data): raise KeyboardInterrupt() db = InMemoryExampleDatabase() runner = ConjectureRunner( interrupts, settings=settings(database=db), database_key=b"key" ) with pytest.raises(KeyboardInterrupt): runner.run() assert db.data def test_returns_written(): value = hbytes(b"\0\1\2\3") @run_to_buffer def written(data): data.write(value) data.mark_interesting() assert value == written def fails_health_check(label, **kwargs): def accept(f): runner = ConjectureRunner( f, settings=settings( max_examples=100, phases=no_shrink, database=None, **kwargs ), ) with pytest.raises(FailedHealthCheck) as e: runner.run() assert e.value.health_check == label assert not runner.interesting_examples return accept def test_fails_health_check_for_all_invalid(): @fails_health_check(HealthCheck.filter_too_much) def _(data): data.draw_bytes(2) data.mark_invalid() def test_fails_health_check_for_large_base(): @fails_health_check(HealthCheck.large_base_example) def _(data): data.draw_bytes(10 ** 6) def test_fails_health_check_for_large_non_base(): @fails_health_check(HealthCheck.data_too_large) def _(data): if data.draw_bits(8): data.draw_bytes(10 ** 6) def test_fails_health_check_for_slow_draws(): @fails_health_check(HealthCheck.too_slow) def _(data): data.draw(SLOW) @pytest.mark.parametrize("n_large", [1, 5, 8, 15]) def test_can_shrink_variable_draws(n_large): target = 128 * n_large @run_to_data def data(data): n = data.draw_bits(4) b = [data.draw_bits(8) for _ in hrange(n)] if sum(b) >= target: data.mark_interesting() x = data.buffer assert x.count(0) == 0 assert sum(x[1:]) == target @pytest.mark.parametrize("n", [1, 5, 8, 15]) def test_can_shrink_variable_draws_with_just_deletion(n, monkeypatch): @shrinking_from([n] + [0] * (n - 1) + [1]) def shrinker(data): n = data.draw_bits(4) b = [data.draw_bits(8) for _ in hrange(n)] if any(b): data.mark_interesting() shrinker.fixate_shrink_passes(["minimize_individual_blocks"]) assert list(shrinker.shrink_target.buffer) == [1, 1] def test_deletion_and_lowering_fails_to_shrink(monkeypatch): monkeypatch.setattr( Shrinker, "shrink", lambda self: self.fixate_shrink_passes(["minimize_individual_blocks"]), ) def gen(self): self.cached_test_function(10) monkeypatch.setattr(ConjectureRunner, "generate_new_examples", gen) @run_to_buffer def x(data): for _ in hrange(10): data.draw_bytes(1) data.mark_interesting() assert x == hbytes(10) def test_run_nothing(): def f(data): assert False runner = ConjectureRunner(f, settings=settings(phases=())) runner.run() assert runner.call_count == 0 class Foo(object): def __repr__(self): return "stuff" @pytest.mark.parametrize("event", ["hi", Foo()]) def test_note_events(event): def f(data): data.note_event(event) data.draw_bytes(1) runner = ConjectureRunner(f) runner.run() assert runner.event_call_counts[str(event)] == runner.call_count > 0 def test_debug_data(capsys): buf = [0, 1, 2] def f(data): for x in hbytes(buf): if data.draw_bits(8) != x: data.mark_invalid() data.start_example(1) data.stop_example() data.mark_interesting() runner = ConjectureRunner( f, settings=settings( max_examples=5000, database=None, suppress_health_check=HealthCheck.all(), verbosity=Verbosity.debug, ), ) runner.cached_test_function(buf) runner.run() out, _ = capsys.readouterr() assert re.match(u"\\d+ bytes \\[.*\\] -> ", out) assert "INTERESTING" in out def test_zeroes_bytes_above_bound(): def f(data): if data.draw_bits(1): x = data.draw_bytes(9) assert not any(x[4:8]) with buffer_size_limit(10): ConjectureRunner(f).run() def test_can_write_bytes_towards_the_end(): buf = b"\1\2\3" def f(data): if data.draw_bits(1): data.draw_bytes(5) data.write(hbytes(buf)) assert hbytes(data.buffer[-len(buf) :]) == buf with buffer_size_limit(10): ConjectureRunner(f).run() def test_can_increase_number_of_bytes_drawn_in_tail(): # This is designed to trigger a case where the zero bound queue will end up # increasing the size of data drawn because moving zeroes into the initial # prefix will increase the amount drawn. def f(data): x = data.draw_bytes(5) n = x.count(0) b = data.draw_bytes(n + 1) assert not any(b) runner = ConjectureRunner( f, settings=settings(max_examples=100, suppress_health_check=HealthCheck.all()) ) with buffer_size_limit(11): runner.run() def test_uniqueness_is_preserved_when_writing_at_beginning(): seen = set() def f(data): data.write(hbytes(1)) n = data.draw_bits(3) assert n not in seen seen.add(n) runner = ConjectureRunner(f, settings=settings(max_examples=50)) runner.run() assert runner.valid_examples == len(seen) @pytest.mark.parametrize("skip_target", [False, True]) @pytest.mark.parametrize("initial_attempt", [127, 128]) def test_clears_out_its_database_on_shrinking( initial_attempt, skip_target, monkeypatch ): def generate_new_examples(self): self.cached_test_function(initial_attempt) monkeypatch.setattr( ConjectureRunner, "generate_new_examples", generate_new_examples ) key = b"key" db = InMemoryExampleDatabase() def f(data): if data.draw_bits(8) >= 127: data.mark_interesting() runner = ConjectureRunner( f, settings=settings(database=db, max_examples=256), database_key=key, random=Random(0), ) for n in hrange(256): if n != 127 or not skip_target: db.save(runner.secondary_key, hbytes([n])) runner.run() assert len(runner.interesting_examples) == 1 for b in db.fetch(runner.secondary_key): assert b[0] >= 127 assert len(list(db.fetch(runner.database_key))) == 1 def test_can_delete_intervals(): @shrinking_from([255] * 10 + [1, 3]) def shrinker(data): while True: n = data.draw_bits(8) if n == 255: continue elif n == 1: break else: data.mark_invalid() if data.draw_bits(8) == 3: data.mark_interesting() shrinker.fixate_shrink_passes(["adaptive_example_deletion"]) x = shrinker.shrink_target assert x.buffer == hbytes([1, 3]) def test_detects_too_small_block_starts(): call_count = [0] def f(data): assert call_count[0] == 0 call_count[0] += 1 data.draw_bytes(8) data.mark_interesting() runner = ConjectureRunner(f, settings=settings(database=None)) r = runner.cached_test_function(hbytes(8)) assert r.status == Status.INTERESTING assert call_count[0] == 1 r2 = runner.cached_test_function(hbytes([255] * 7)) assert r2.status == Status.OVERRUN assert call_count[0] == 1 def test_shrinks_both_interesting_examples(monkeypatch): def generate_new_examples(self): self.cached_test_function(hbytes([1])) monkeypatch.setattr( ConjectureRunner, "generate_new_examples", generate_new_examples ) def f(data): n = data.draw_bits(8) data.mark_interesting(n & 1) runner = ConjectureRunner(f, database_key=b"key") runner.run() assert runner.interesting_examples[0].buffer == hbytes([0]) assert runner.interesting_examples[1].buffer == hbytes([1]) def test_duplicate_blocks_that_go_away(): @shrinking_from([1, 1, 1, 2] * 2 + [5] * 2) def shrinker(data): x = data.draw_bits(32) y = data.draw_bits(32) if x != y: data.mark_invalid() b = [data.draw_bytes(1) for _ in hrange(x & 255)] if len(set(b)) <= 1: data.mark_interesting() shrinker.fixate_shrink_passes(["minimize_duplicated_blocks"]) assert shrinker.shrink_target.buffer == hbytes([0] * 8) def test_accidental_duplication(monkeypatch): @shrinking_from([18] * 20) def shrinker(data): x = data.draw_bits(8) y = data.draw_bits(8) if x != y: data.mark_invalid() if x < 5: data.mark_invalid() b = [data.draw_bytes(1) for _ in hrange(x)] if len(set(b)) == 1: data.mark_interesting() shrinker.fixate_shrink_passes(["minimize_duplicated_blocks"]) assert list(shrinker.buffer) == [5] * 7 def test_discarding(monkeypatch): monkeypatch.setattr(Shrinker, "shrink", Shrinker.remove_discarded) monkeypatch.setattr( ConjectureRunner, "generate_new_examples", lambda runner: runner.cached_test_function(hbytes([0, 1] * 10)), ) @run_to_buffer def x(data): count = 0 while count < 10: data.start_example(SOME_LABEL) b = data.draw_bits(1) if b: count += 1 data.stop_example(discard=not b) data.mark_interesting() assert x == hbytes(hbytes([1]) * 10) def test_can_remove_discarded_data(): @shrinking_from(hbytes([0] * 10) + hbytes([11])) def shrinker(data): while True: data.start_example(SOME_LABEL) b = data.draw_bits(8) data.stop_example(discard=(b == 0)) if b == 11: break data.mark_interesting() shrinker.remove_discarded() assert list(shrinker.buffer) == [11] def test_discarding_iterates_to_fixed_point(): @shrinking_from(hbytes([1] * 10) + hbytes([0])) def shrinker(data): data.start_example(0) data.draw_bits(1) data.stop_example(discard=True) while data.draw_bits(1): pass data.mark_interesting() shrinker.remove_discarded() assert list(shrinker.buffer) == [1, 0] def test_discarding_is_not_fooled_by_empty_discards(): @shrinking_from(hbytes([1, 1])) def shrinker(data): data.draw_bits(1) data.start_example(0) data.stop_example(discard=True) data.draw_bits(1) data.mark_interesting() shrinker.remove_discarded() assert shrinker.shrink_target.has_discards def test_discarding_can_fail(monkeypatch): @shrinking_from(hbytes([1])) def shrinker(data): data.start_example(0) data.draw_bits(1) data.stop_example(discard=True) data.mark_interesting() shrinker.remove_discarded() assert any(e.discarded and e.length > 0 for e in shrinker.shrink_target.examples) def test_depth_bounds_in_generation(): depth = [0] def tails(data, n): depth[0] = max(depth[0], n) if data.draw_bits(8): data.start_example(SOME_LABEL) tails(data, n + 1) data.stop_example() def f(data): tails(data, 0) runner = ConjectureRunner(f, settings=settings(database=None, max_examples=20)) runner.run() assert 0 < depth[0] <= MAX_DEPTH def test_shrinking_from_mostly_zero(monkeypatch): monkeypatch.setattr( ConjectureRunner, "generate_new_examples", lambda self: self.cached_test_function(hbytes(5) + hbytes([2])), ) @run_to_buffer def x(data): s = [data.draw_bits(8) for _ in hrange(6)] if any(s): data.mark_interesting() assert x == hbytes(5) + hbytes([1]) def test_handles_nesting_of_discard_correctly(monkeypatch): monkeypatch.setattr(Shrinker, "shrink", Shrinker.remove_discarded) monkeypatch.setattr( ConjectureRunner, "generate_new_examples", lambda runner: runner.cached_test_function(hbytes([0, 0, 1, 1])), ) @run_to_buffer def x(data): while True: data.start_example(SOME_LABEL) succeeded = data.draw_bits(1) data.start_example(SOME_LABEL) data.draw_bits(1) data.stop_example(discard=not succeeded) data.stop_example(discard=not succeeded) if succeeded: data.mark_interesting() assert x == hbytes([1, 1]) def test_can_zero_subintervals(monkeypatch): @shrinking_from(hbytes([3, 0, 0, 0, 1]) * 10) def shrinker(data): for _ in hrange(10): data.start_example(SOME_LABEL) n = data.draw_bits(8) data.draw_bytes(n) data.stop_example() if data.draw_bits(8) != 1: return data.mark_interesting() shrinker.fixate_shrink_passes(["zero_examples"]) assert list(shrinker.buffer) == [0, 1] * 10 def test_can_pass_to_an_indirect_descendant(monkeypatch): def tree(data): data.start_example(1) n = data.draw_bits(1) label = data.draw_bits(8) if n: tree(data) tree(data) data.stop_example(1) return label initial = hbytes([1, 10, 0, 0, 1, 0, 0, 10, 0, 0]) target = hbytes([0, 10]) good = {initial, target} @shrinking_from(initial) def shrinker(data): tree(data) if hbytes(data.buffer) in good: data.mark_interesting() shrinker.fixate_shrink_passes(["pass_to_descendant"]) assert shrinker.shrink_target.buffer == target def shrink(buffer, *passes): def accept(f): shrinker = shrinking_from(buffer)(f) shrinker.fixate_shrink_passes(passes) return list(shrinker.buffer) return accept def test_shrinking_blocks_from_common_offset(): @shrinking_from([11, 10]) def shrinker(data): m = data.draw_bits(8) n = data.draw_bits(8) if abs(m - n) <= 1 and max(m, n) > 0: data.mark_interesting() shrinker.mark_changed(0) shrinker.mark_changed(1) shrinker.lower_common_block_offset() x = shrinker.shrink_target.buffer assert sorted(x) == [0, 1] def test_handle_empty_draws(monkeypatch): monkeypatch.setattr( Shrinker, "shrink", lambda self: self.fixate_shrink_passes(["adaptive_example_deletion"]), ) @run_to_buffer def x(data): while True: data.start_example(SOME_LABEL) n = data.draw_bits(1) data.start_example(SOME_LABEL) data.stop_example() data.stop_example(discard=n > 0) if not n: break data.mark_interesting() assert x == hbytes([0]) def test_large_initial_write(): big = hbytes(b"\xff") * 512 def f(data): data.write(big) data.draw_bits(63) with deterministic_PRNG(): runner = ConjectureRunner( f, settings=settings( max_examples=5000, database=None, suppress_health_check=HealthCheck.all(), ), ) with buffer_size_limit(1024): runner.run() assert runner.exit_reason == ExitReason.finished def test_can_reorder_examples(): @shrinking_from([1, 0, 1, 1, 0, 1, 0, 0, 0]) def shrinker(data): total = 0 for _ in range(5): data.start_example(0) if data.draw_bits(8): total += data.draw_bits(9) data.stop_example(0) if total == 2: data.mark_interesting() shrinker.fixate_shrink_passes(["reorder_examples"]) assert list(shrinker.buffer) == [0, 0, 0, 1, 0, 1, 1, 0, 1] def test_permits_but_ignores_raising_order(monkeypatch): monkeypatch.setattr( ConjectureRunner, "generate_new_examples", lambda runner: runner.cached_test_function([1]), ) monkeypatch.setattr( Shrinker, "shrink", lambda self: self.incorporate_new_buffer(hbytes([2])) ) @run_to_buffer def x(data): data.draw_bits(2) data.mark_interesting() assert list(x) == [1] def test_block_deletion_can_delete_short_ranges(monkeypatch): @shrinking_from([v for i in range(5) for _ in range(i + 1) for v in [0, i]]) def shrinker(data): while True: n = data.draw_bits(16) for _ in range(n): if data.draw_bits(16) != n: data.mark_invalid() if n == 4: data.mark_interesting() shrinker.fixate_shrink_passes([block_program("X" * i) for i in range(1, 5)]) assert list(shrinker.shrink_target.buffer) == [0, 4] * 5 def test_try_shrinking_blocks_ignores_overrun_blocks(monkeypatch): monkeypatch.setattr( ConjectureRunner, "generate_new_examples", lambda runner: runner.cached_test_function([3, 3, 0, 1]), ) monkeypatch.setattr( Shrinker, "shrink", lambda self: self.try_shrinking_blocks((0, 1, 5), hbytes([2])), ) @run_to_buffer def x(data): n1 = data.draw_bits(8) data.draw_bits(8) if n1 == 3: data.draw_bits(8) k = data.draw_bits(8) if k == 1: data.mark_interesting() assert list(x) == [2, 2, 1] def shrinking_from(start): def accept(f): with deterministic_PRNG(): runner = ConjectureRunner( f, settings=settings( max_examples=5000, database=None, suppress_health_check=HealthCheck.all(), ), ) runner.cached_test_function(start) assert runner.interesting_examples last_data, = runner.interesting_examples.values() return runner.new_shrinker( last_data, lambda d: d.status == Status.INTERESTING ) return accept def test_dependent_block_pairs_is_up_to_shrinking_integers(): # Unit test extracted from a failure in tests/nocover/test_integers.py distribution = Sampler([4.0, 8.0, 1.0, 1.0, 0.5]) sizes = [8, 16, 32, 64, 128] @shrinking_from(b"\x03\x01\x00\x00\x00\x00\x00\x01\x00\x02\x01") def shrinker(data): size = sizes[distribution.sample(data)] result = data.draw_bits(size) sign = (-1) ** (result & 1) result = (result >> 1) * sign cap = data.draw_bits(8) if result >= 32768 and cap == 1: data.mark_interesting() shrinker.fixate_shrink_passes(["minimize_individual_blocks"]) assert list(shrinker.shrink_target.buffer) == [1, 1, 0, 1, 0, 0, 1] def test_finding_a_minimal_balanced_binary_tree(): # Tests iteration while the shape of the thing being iterated over can # change. In particular the current example can go from trivial to non # trivial. def tree(data): # Returns height of a binary tree and whether it is height balanced. data.start_example("tree") n = data.draw_bits(1) if n == 0: result = (1, True) else: h1, b1 = tree(data) h2, b2 = tree(data) result = (1 + max(h1, h2), b1 and b2 and abs(h1 - h2) <= 1) data.stop_example("tree") return result # Starting from an unbalanced tree of depth six @shrinking_from([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]) def shrinker(data): _, b = tree(data) if not b: data.mark_interesting() shrinker.fixate_shrink_passes(["adaptive_example_deletion", "reorder_examples"]) assert list(shrinker.shrink_target.buffer) == [1, 0, 1, 0, 1, 0, 0] def test_database_clears_secondary_key(): key = b"key" database = InMemoryExampleDatabase() def f(data): if data.draw_bits(8) == 10: data.mark_interesting() else: data.mark_invalid() runner = ConjectureRunner( f, settings=settings( max_examples=1, database=database, suppress_health_check=HealthCheck.all() ), database_key=key, ) for i in range(10): database.save(runner.secondary_key, hbytes([i])) runner.cached_test_function([10]) assert runner.interesting_examples assert len(set(database.fetch(key))) == 1 assert len(set(database.fetch(runner.secondary_key))) == 10 runner.clear_secondary_key() assert len(set(database.fetch(key))) == 1 assert len(set(database.fetch(runner.secondary_key))) == 0 def test_database_uses_values_from_secondary_key(): key = b"key" database = InMemoryExampleDatabase() def f(data): if data.draw_bits(8) >= 5: data.mark_interesting() else: data.mark_invalid() runner = ConjectureRunner( f, settings=settings( max_examples=1, database=database, suppress_health_check=HealthCheck.all() ), database_key=key, ) for i in range(10): database.save(runner.secondary_key, hbytes([i])) runner.cached_test_function([10]) assert runner.interesting_examples assert len(set(database.fetch(key))) == 1 assert len(set(database.fetch(runner.secondary_key))) == 10 runner.clear_secondary_key() assert len(set(database.fetch(key))) == 1 assert set(map(int_from_bytes, database.fetch(runner.secondary_key))) == set( range(6, 11) ) v, = runner.interesting_examples.values() assert list(v.buffer) == [5] def test_exit_because_max_iterations(): def f(data): data.draw_bits(64) data.mark_invalid() runner = ConjectureRunner( f, settings=settings( max_examples=1, database=None, suppress_health_check=HealthCheck.all() ), ) runner.run() assert runner.call_count <= 1000 assert runner.exit_reason == ExitReason.max_iterations def test_dependent_block_pairs_can_lower_to_zero(): @shrinking_from([1, 0, 1]) def shrinker(data): if data.draw_bits(1): n = data.draw_bits(16) else: n = data.draw_bits(8) if n == 1: data.mark_interesting() shrinker.fixate_shrink_passes(["minimize_individual_blocks"]) assert list(shrinker.shrink_target.buffer) == [0, 1] def test_handle_size_too_large_during_dependent_lowering(): @shrinking_from([1, 255, 0]) def shrinker(data): if data.draw_bits(1): data.draw_bits(16) data.mark_interesting() else: data.draw_bits(8) shrinker.fixate_shrink_passes(["minimize_individual_blocks"]) def test_zero_examples_will_zero_blocks(): @shrinking_from([1, 1, 1]) def shrinker(data): n = data.draw_bits(1) data.draw_bits(1) m = data.draw_bits(1) if n == m == 1: data.mark_interesting() shrinker.fixate_shrink_passes(["zero_examples"]) assert list(shrinker.shrink_target.buffer) == [1, 0, 1] def test_block_may_grow_during_lexical_shrinking(): initial = hbytes([2, 1, 1]) @shrinking_from(initial) def shrinker(data): n = data.draw_bits(8) if n == 2: data.draw_bits(8) data.draw_bits(8) else: data.draw_bits(16) data.mark_interesting() shrinker.fixate_shrink_passes(["minimize_individual_blocks"]) assert list(shrinker.shrink_target.buffer) == [0, 0, 0] def test_lower_common_block_offset_does_nothing_when_changed_blocks_are_zero(): @shrinking_from([1, 0, 1, 0]) def shrinker(data): data.draw_bits(1) data.draw_bits(1) data.draw_bits(1) data.draw_bits(1) data.mark_interesting() shrinker.mark_changed(1) shrinker.mark_changed(3) shrinker.lower_common_block_offset() assert list(shrinker.shrink_target.buffer) == [1, 0, 1, 0] def test_lower_common_block_offset_ignores_zeros(): @shrinking_from([2, 2, 0]) def shrinker(data): n = data.draw_bits(8) data.draw_bits(8) data.draw_bits(8) if n > 0: data.mark_interesting() for i in range(3): shrinker.mark_changed(i) shrinker.lower_common_block_offset() assert list(shrinker.shrink_target.buffer) == [1, 1, 0] def test_pandas_hack(): @shrinking_from([2, 1, 1, 7]) def shrinker(data): n = data.draw_bits(8) m = data.draw_bits(8) if n == 1: if m == 7: data.mark_interesting() data.draw_bits(8) if data.draw_bits(8) == 7: data.mark_interesting() shrinker.fixate_shrink_passes([block_program("-XX")]) assert list(shrinker.shrink_target.buffer) == [1, 7] def test_alphabet_minimization(): @shrink(hbytes((10, 11)) * 5, "alphabet_minimize") def x(data): buf = data.draw_bytes(10) if len(set(buf)) > 2: data.mark_invalid() if buf[0] < buf[1] and buf[1] > 1: data.mark_interesting() assert x == [0, 2] * 5 fake_data_counter = 0 @attr.s() class FakeData(object): status = attr.ib(default=Status.VALID) global_identifer = attr.ib(init=False) def __attrs_post_init__(self): global fake_data_counter fake_data_counter += 1 self.global_identifier = fake_data_counter def test_target_selector_will_maintain_a_bounded_pool(): selector = TargetSelector(random=Random(0), pool_size=3) for i in range(100): selector.add(FakeData()) assert len(selector) == min(i + 1, 3) def test_target_selector_will_use_novel_examples_preferentially(): selector = TargetSelector(random=Random(0), pool_size=3) seen = set() for i in range(100): selector.add(FakeData()) assert len(selector) == min(i + 1, 3) t = selector.select().global_identifier assert t not in seen seen.add(t) def test_target_selector_will_eventually_reuse_examples(): selector = TargetSelector(random=Random(0), pool_size=2) seen = set() selector.add(FakeData()) selector.add(FakeData()) for _ in range(2): x = selector.select() assert x.global_identifier not in seen seen.add(x.global_identifier) for _ in range(2): x = selector.select() assert x.global_identifier in seen def test_cached_test_function_returns_right_value(): count = [0] def tf(data): count[0] += 1 data.draw_bits(2) data.mark_interesting() with deterministic_PRNG(): runner = ConjectureRunner(tf, settings=TEST_SETTINGS) for _ in hrange(2): for b in (b"\0", b"\1"): d = runner.cached_test_function(b) assert d.status == Status.INTERESTING assert d.buffer == b assert count[0] == 2 def test_cached_test_function_does_not_reinvoke_on_prefix(): call_count = [0] def test_function(data): call_count[0] += 1 data.draw_bits(8) data.write(hbytes([7])) data.draw_bits(8) with deterministic_PRNG(): runner = ConjectureRunner(test_function, settings=TEST_SETTINGS) data = runner.cached_test_function(hbytes(3)) assert data.status == Status.VALID for n in [2, 1, 0]: prefix_data = runner.cached_test_function(hbytes(n)) assert prefix_data is Overrun assert call_count[0] == 1 def test_float_shrink_can_run_when_canonicalisation_does_not_work(monkeypatch): # This should be an error when called monkeypatch.setattr(Float, "shrink", None) base_buf = int_to_bytes(flt.base_float_to_lex(1000.0), 8) + hbytes(1) @shrinking_from(base_buf) def shrinker(data): flt.draw_float(data) if hbytes(data.buffer) == base_buf: data.mark_interesting() shrinker.fixate_shrink_passes(["minimize_floats"]) assert shrinker.shrink_target.buffer == base_buf def test_will_evict_entries_from_the_cache(monkeypatch): monkeypatch.setattr(engine_module, "CACHE_SIZE", 5) count = [0] def tf(data): data.draw_bytes(1) count[0] += 1 runner = ConjectureRunner(tf, settings=TEST_SETTINGS) for _ in range(3): for n in range(10): runner.cached_test_function([n]) # Because we exceeded the cache size, our previous # calls will have been evicted, so each call to # cached_test_function will have to reexecute. assert count[0] == 30 def test_try_shrinking_blocks_out_of_bounds(): @shrinking_from(hbytes([1])) def shrinker(data): data.draw_bits(1) data.mark_interesting() assert not shrinker.try_shrinking_blocks((1,), hbytes([1])) def test_block_programs_are_adaptive(): @shrinking_from(hbytes(1000) + hbytes([1])) def shrinker(data): while not data.draw_bits(1): pass data.mark_interesting() p = shrinker.add_new_pass(block_program("X")) shrinker.fixate_shrink_passes([p.name]) assert len(shrinker.shrink_target.buffer) == 1 assert shrinker.calls <= 60 def test_zero_examples_is_adaptive(): @shrinking_from(hbytes([1]) * 1001) def shrinker(data): for _ in hrange(1000): data.draw_bits(1) if data.draw_bits(1): data.mark_interesting() shrinker.fixate_shrink_passes(["zero_examples"]) assert shrinker.shrink_target.buffer == hbytes(1000) + hbytes([1]) assert shrinker.calls <= 60 def test_branch_ending_in_write(): seen = set() def tf(data): count = 0 while data.draw_bits(1): count += 1 if count > 1: data.draw_bits(1, forced=0) b = hbytes(data.buffer) assert b not in seen seen.add(b) with deterministic_PRNG(): runner = ConjectureRunner(tf, settings=TEST_SETTINGS) for _ in hrange(100): prefix = runner.generate_novel_prefix() attempt = prefix + hbytes(2) data = runner.cached_test_function(attempt) assert data.status == Status.VALID assert attempt.startswith(data.buffer) def test_exhaust_space(): with deterministic_PRNG(): runner = ConjectureRunner( lambda data: data.draw_bits(1), settings=TEST_SETTINGS ) runner.run() assert runner.tree.is_exhausted assert runner.valid_examples == 2 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_conjecture_float_encoding.py000066400000000000000000000150271354103617500343360ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import sys import pytest import hypothesis.internal.conjecture.floats as flt from hypothesis import assume, example, given, strategies as st from hypothesis.internal.compat import ceil, floor, hbytes, int_from_bytes, int_to_bytes from hypothesis.internal.conjecture.data import ConjectureData from hypothesis.internal.conjecture.engine import ConjectureRunner from hypothesis.internal.floats import float_to_int EXPONENTS = list(range(0, flt.MAX_EXPONENT + 1)) assert len(EXPONENTS) == 2 ** 11 def assert_reordered_exponents(res): res = list(res) assert len(res) == len(EXPONENTS) for x in res: assert res.count(x) == 1 assert 0 <= x <= flt.MAX_EXPONENT def test_encode_permutes_elements(): assert_reordered_exponents(map(flt.encode_exponent, EXPONENTS)) def test_decode_permutes_elements(): assert_reordered_exponents(map(flt.decode_exponent, EXPONENTS)) def test_decode_encode(): for e in EXPONENTS: assert flt.decode_exponent(flt.encode_exponent(e)) == e def test_encode_decode(): for e in EXPONENTS: assert flt.decode_exponent(flt.encode_exponent(e)) == e @given(st.data()) def test_double_reverse_bounded(data): n = data.draw(st.integers(1, 64)) i = data.draw(st.integers(0, 2 ** n - 1)) j = flt.reverse_bits(i, n) assert flt.reverse_bits(j, n) == i @given(st.integers(0, 2 ** 64 - 1)) def test_double_reverse(i): j = flt.reverse64(i) assert flt.reverse64(j) == i @example(1.25) @example(1.0) @given(st.floats()) def test_draw_write_round_trip(f): d = ConjectureData.for_buffer(hbytes(10)) flt.write_float(d, f) d2 = ConjectureData.for_buffer(d.buffer) g = flt.draw_float(d2) if f == f: assert f == g assert float_to_int(f) == float_to_int(g) d3 = ConjectureData.for_buffer(d2.buffer) flt.draw_float(d3) assert d3.buffer == d2.buffer @example(0.0) @example(2.5) @example(8.000000000000007) @example(3.0) @example(2.0) @example(1.9999999999999998) @example(1.0) @given(st.floats(min_value=0.0)) def test_floats_round_trip(f): i = flt.float_to_lex(f) g = flt.lex_to_float(i) assert float_to_int(f) == float_to_int(g) @example(1, 0.5) @given(st.integers(1, 2 ** 53), st.floats(0, 1).filter(lambda x: x not in (0, 1))) def test_floats_order_worse_than_their_integral_part(n, g): f = n + g assume(int(f) != f) assume(int(f) != 0) i = flt.float_to_lex(f) if f < 0: g = ceil(f) else: g = floor(f) assert flt.float_to_lex(float(g)) < i integral_floats = st.floats(allow_infinity=False, allow_nan=False, min_value=0.0).map( lambda x: abs(float(int(x))) ) @given(integral_floats, integral_floats) def test_integral_floats_order_as_integers(x, y): assume(x != y) x, y = sorted((x, y)) assert flt.float_to_lex(x) < flt.float_to_lex(y) @given(st.floats(0, 1)) def test_fractional_floats_are_worse_than_one(f): assume(0 < f < 1) assert flt.float_to_lex(f) > flt.float_to_lex(1) def test_reverse_bits_table_reverses_bits(): def bits(x): result = [] for _ in range(8): result.append(x & 1) x >>= 1 result.reverse() return result for i, b in enumerate(flt.REVERSE_BITS_TABLE): assert bits(i) == list(reversed(bits(b))) def test_reverse_bits_table_has_right_elements(): assert sorted(flt.REVERSE_BITS_TABLE) == list(range(256)) def float_runner(start, condition): def parse_buf(b): return flt.lex_to_float(int_from_bytes(b)) def test_function(data): f = flt.draw_float(data) if condition(f): data.mark_interesting() runner = ConjectureRunner(test_function) runner.cached_test_function(int_to_bytes(flt.float_to_lex(start), 8) + hbytes(1)) assert runner.interesting_examples return runner def minimal_from(start, condition): runner = float_runner(start, condition) runner.shrink_interesting_examples() v, = runner.interesting_examples.values() result = flt.draw_float(ConjectureData.for_buffer(v.buffer)) assert condition(result) return result INTERESTING_FLOATS = [0.0, 1.0, 2.0, sys.float_info.max, float("inf"), float("nan")] @pytest.mark.parametrize( ("start", "end"), [ (a, b) for a in INTERESTING_FLOATS for b in INTERESTING_FLOATS if flt.float_to_lex(a) > flt.float_to_lex(b) ], ) def test_can_shrink_downwards(start, end): assert minimal_from(start, lambda x: not (x < end)) == end @pytest.mark.parametrize( "f", [1, 2, 4, 8, 10, 16, 32, 64, 100, 128, 256, 500, 512, 1000, 1024] ) @pytest.mark.parametrize("mul", [1.1, 1.5, 9.99, 10]) def test_shrinks_downwards_to_integers(f, mul): g = minimal_from(f * mul, lambda x: x >= f) assert g == f def test_shrink_to_integer_upper_bound(): assert minimal_from(1.1, lambda x: 1 < x <= 2) == 2 def test_shrink_up_to_one(): assert minimal_from(0.5, lambda x: 0.5 <= x <= 1.5) == 1 def test_shrink_down_to_half(): assert minimal_from(0.75, lambda x: 0 < x < 1) == 0.5 def test_does_not_shrink_across_one(): # This is something of an odd special case. Because of our encoding we # prefer all numbers >= 1 to all numbers in 0 < x < 1. For the most part # this is the correct thing to do, but there are some low negative exponent # cases where we get odd behaviour like this. # This test primarily exists to validate that we don't try to subtract one # from the starting point and trigger an internal exception. assert minimal_from(1.1, lambda x: x == 1.1 or 0 < x < 1) == 1.1 @pytest.mark.parametrize("f", [2.0, 10000000.0]) def test_converts_floats_to_integer_form(f): assert flt.is_simple(f) buf = int_to_bytes(flt.base_float_to_lex(f), 8) runner = float_runner(f, lambda g: g == f) runner.shrink_interesting_examples() v, = runner.interesting_examples.values() assert v.buffer[:-1] < buf hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_conjecture_intlist.py000066400000000000000000000033031354103617500330430ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import assume, given from hypothesis.internal.compat import PY2 from hypothesis.internal.conjecture.junkdrawer import IntList non_neg_lists = st.lists(st.integers(min_value=0, max_value=2 ** 63 - 1)) @given(non_neg_lists) def test_intlist_is_equal_to_itself(ls): assert IntList(ls) == IntList(ls) @given(non_neg_lists, non_neg_lists) def test_distinct_int_lists_are_not_equal(x, y): assume(x != y) assert IntList(x) != IntList(y) def test_basic_equality(): x = IntList([1, 2, 3]) assert x == x t = x != x assert not t assert x != "foo" s = x == "foo" assert not s @pytest.mark.skipif( PY2, reason="The Python 2 list fallback handles this and we don't really care enough to validate it there.", ) def test_error_on_invalid_value(): with pytest.raises(ValueError): IntList([-1]) def test_extend_by_too_large(): x = IntList() ls = [1, 10 ** 6] x.extend(ls) assert list(x) == ls hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_conjecture_junkdrawer.py000066400000000000000000000026051354103617500335350ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis.internal.conjecture.junkdrawer import LazySequenceCopy def test_out_of_range(): x = LazySequenceCopy([1, 2, 3]) with pytest.raises(IndexError): x[3] with pytest.raises(IndexError): x[-4] def test_pass_through(): x = LazySequenceCopy([1, 2, 3]) assert x[0] == 1 assert x[1] == 2 assert x[2] == 3 def test_can_assign_without_changing_underlying(): underlying = [1, 2, 3] x = LazySequenceCopy(underlying) x[1] = 10 assert x[1] == 10 assert underlying[1] == 2 def test_pop(): x = LazySequenceCopy([2, 3]) assert x.pop() == 3 assert x.pop() == 2 with pytest.raises(IndexError): x.pop() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_conjecture_minimizer.py000066400000000000000000000045031354103617500333630ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from collections import Counter from random import Random from hypothesis.internal.compat import hbytes from hypothesis.internal.conjecture.shrinking import Lexical def test_shrink_to_zero(): assert Lexical.shrink( hbytes([255] * 8), lambda x: True, random=Random(0) ) == hbytes(8) def test_shrink_to_smallest(): assert Lexical.shrink( hbytes([255] * 8), lambda x: sum(x) > 10, random=Random(0) ) == hbytes([0] * 7 + [11]) def test_float_hack_fails(): assert Lexical.shrink( hbytes([255] * 8), lambda x: x[0] >> 7, random=Random(0) ) == hbytes([128] + [0] * 7) def test_can_sort_bytes_by_reordering(): start = hbytes([5, 4, 3, 2, 1, 0]) finish = Lexical.shrink(start, lambda x: set(x) == set(start), random=Random(0)) assert finish == hbytes([0, 1, 2, 3, 4, 5]) def test_can_sort_bytes_by_reordering_partially(): start = hbytes([5, 4, 3, 2, 1, 0]) finish = Lexical.shrink( start, lambda x: set(x) == set(start) and x[0] > x[-1], random=Random(0) ) assert finish == hbytes([1, 2, 3, 4, 5, 0]) def test_can_sort_bytes_by_reordering_partially2(): start = hbytes([5, 4, 3, 2, 1, 0]) finish = Lexical.shrink( start, lambda x: Counter(x) == Counter(start) and x[0] > x[2], random=Random(0), full=True, ) assert finish <= hbytes([1, 2, 0, 3, 4, 5]) def test_can_sort_bytes_by_reordering_partially_not_cross_stationary_element(): start = hbytes([5, 3, 0, 2, 1, 4]) finish = Lexical.shrink( start, lambda x: set(x) == set(start) and x[3] == 2, random=Random(0) ) assert finish <= hbytes([0, 3, 5, 2, 1, 4]) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_conjecture_order_shrinking.py000066400000000000000000000043601354103617500345500ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from random import Random import hypothesis.strategies as st from hypothesis import example, given from hypothesis.internal.compat import hrange from hypothesis.internal.conjecture.shrinking import Ordering @example([0, 1, 1, 1, 1, 1, 1, 0]) @example([0, 0]) @example([0, 1, -1]) @given(st.lists(st.integers())) def test_shrinks_down_to_sorted_the_slow_way(ls): # We normally would short-circuit and find that we can sort this # automatically, but here we test that a single run_step could put the # list in sorted order anyway if it had to, and that that is just an # optimisation. shrinker = Ordering(ls, lambda ls: True, random=Random(0), full=False) shrinker.run_step() assert list(shrinker.current) == sorted(ls) def test_can_partially_sort_a_list(): finish = Ordering.shrink( [5, 4, 3, 2, 1, 0], lambda x: x[0] > x[-1], random=Random(0) ) assert finish == (1, 2, 3, 4, 5, 0) def test_can_partially_sort_a_list_2(): finish = Ordering.shrink( [5, 4, 3, 2, 1, 0], lambda x: x[0] > x[2], random=Random(0), full=True ) assert finish <= (1, 2, 0, 3, 4, 5) def test_adaptively_shrinks_around_hole(): initial = list(hrange(1000, 0, -1)) initial[500] = 2000 intended_result = sorted(initial) intended_result.insert(500, intended_result.pop()) shrinker = Ordering( initial, lambda ls: ls[500] == 2000, random=Random(0), full=True ) shrinker.run() assert shrinker.current[500] == 2000 assert list(shrinker.current) == intended_result assert shrinker.calls <= 60 test_conjecture_shrinking_interface.py000066400000000000000000000026111354103617500353130ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from random import Random from hypothesis.internal.conjecture.shrinking import Integer from tests.common.utils import capture_out def test_debug_output(): with capture_out() as o: Integer.shrink(10, lambda x: True, debug=True, random=Random(0)) assert "initial=10" in o.getvalue() assert "shrinking to 0" in o.getvalue() def test_includes_name_in_repr_if_set(): assert ( repr(Integer(10, lambda x: True, name="hi there", random=Random(0))) == "Integer('hi there', initial=10, current=10)" ) def test_normally_contains_no_space_for_name(): assert ( repr(Integer(10, lambda x: True, random=Random(0))) == "Integer(initial=10, current=10)" ) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_conjecture_test_data.py000066400000000000000000000274771354103617500333470ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import itertools import pytest import hypothesis.strategies as st from hypothesis import given, strategies as st from hypothesis.errors import Frozen, InvalidArgument from hypothesis.internal.compat import hbytes, hrange from hypothesis.internal.conjecture.data import ( MAX_DEPTH, ConjectureData, DataObserver, Overrun, Status, StopTest, ) from hypothesis.searchstrategy.strategies import SearchStrategy @given(st.binary()) def test_buffer_draws_as_self(buf): x = ConjectureData.for_buffer(buf) assert x.draw_bytes(len(buf)) == buf def test_cannot_draw_after_freeze(): x = ConjectureData.for_buffer(b"hi") x.draw_bytes(1) x.freeze() with pytest.raises(Frozen): x.draw_bytes(1) def test_can_double_freeze(): x = ConjectureData.for_buffer(b"hi") x.freeze() assert x.frozen x.freeze() assert x.frozen def test_can_draw_zero_bytes(): x = ConjectureData.for_buffer(b"") for _ in range(10): assert x.draw_bytes(0) == b"" def test_draw_past_end_sets_overflow(): x = ConjectureData.for_buffer(hbytes(5)) with pytest.raises(StopTest) as e: x.draw_bytes(6) assert e.value.testcounter == x.testcounter assert x.frozen assert x.status == Status.OVERRUN def test_notes_repr(): x = ConjectureData.for_buffer(b"") x.note(b"hi") assert repr(b"hi") in x.output def test_can_mark_interesting(): x = ConjectureData.for_buffer(hbytes()) with pytest.raises(StopTest): x.mark_interesting() assert x.frozen assert x.status == Status.INTERESTING def test_drawing_zero_bits_is_free(): x = ConjectureData.for_buffer(hbytes()) assert x.draw_bits(0) == 0 def test_can_mark_invalid(): x = ConjectureData.for_buffer(hbytes()) with pytest.raises(StopTest): x.mark_invalid() assert x.frozen assert x.status == Status.INVALID class BoomStrategy(SearchStrategy): def do_draw(self, data): data.draw_bytes(1) raise ValueError() def test_closes_interval_on_error_in_strategy(): x = ConjectureData.for_buffer(b"hi") with pytest.raises(ValueError): x.draw(BoomStrategy()) x.freeze() assert not any(eg.end is None for eg in x.examples) class BigStrategy(SearchStrategy): def do_draw(self, data): data.draw_bytes(10 ** 6) def test_does_not_double_freeze_in_interval_close(): x = ConjectureData.for_buffer(b"hi") with pytest.raises(StopTest): x.draw(BigStrategy()) assert x.frozen assert not any(eg.end is None for eg in x.examples) def test_triviality(): d = ConjectureData.for_buffer([1, 0, 1]) d.start_example(1) d.draw_bits(1) d.draw_bits(1) d.stop_example(1) d.write(hbytes([2])) d.freeze() def eg(u, v): return [ex for ex in d.examples if ex.start == u and ex.end == v][0] assert not eg(0, 2).trivial assert not eg(0, 1).trivial assert eg(1, 2).trivial assert eg(2, 3).trivial def test_example_depth_marking(): d = ConjectureData.for_buffer(hbytes(24)) # These draw sizes are chosen so that each example has a unique length. d.draw_bytes(2) d.start_example("inner") d.draw_bytes(3) d.draw_bytes(6) d.stop_example() d.draw_bytes(12) d.freeze() assert len(d.examples) == 6 depths = {(ex.length, ex.depth) for ex in d.examples} assert depths == {(2, 1), (3, 2), (6, 2), (9, 1), (12, 1), (23, 0)} def test_has_examples_even_when_empty(): d = ConjectureData.for_buffer(hbytes()) d.draw(st.just(False)) d.freeze() assert d.examples def test_has_cached_examples_even_when_overrun(): d = ConjectureData.for_buffer(hbytes(1)) d.start_example(3) d.draw_bits(1) d.stop_example() try: d.draw_bits(1) except StopTest: pass assert d.status == Status.OVERRUN assert any(ex.label == 3 and ex.length == 1 for ex in d.examples) assert d.examples is d.examples def test_can_write_empty_string(): d = ConjectureData.for_buffer([1, 1, 1]) d.draw_bits(1) d.write(hbytes()) d.draw_bits(1) d.draw_bits(0, forced=0) d.draw_bits(1) assert d.buffer == hbytes([1, 1, 1]) def test_blocks_preserve_identity(): n = 10 d = ConjectureData.for_buffer([1] * 10) for _ in hrange(n): d.draw_bits(1) d.freeze() blocks = [d.blocks[i] for i in range(n)] result = d.as_result() for i, b in enumerate(blocks): assert result.blocks[i] is b def test_compact_blocks_during_generation(): d = ConjectureData.for_buffer([1] * 10) for _ in hrange(5): d.draw_bits(1) assert len(list(d.blocks)) == 5 for _ in hrange(5): d.draw_bits(1) assert len(list(d.blocks)) == 10 def test_handles_indices_like_a_list(): n = 5 d = ConjectureData.for_buffer([1] * n) for _ in hrange(n): d.draw_bits(1) assert d.blocks[-1] is d.blocks[n - 1] assert d.blocks[-n] is d.blocks[0] with pytest.raises(IndexError): d.blocks[n] with pytest.raises(IndexError): d.blocks[-n - 1] def test_can_observe_draws(): class LoggingObserver(DataObserver): def __init__(self): self.log = [] def draw_bits(self, *args): self.log.append(("draw",) + args) def conclude_test(self, *args): assert x.frozen self.log.append(("concluded",) + args) observer = LoggingObserver() x = ConjectureData.for_buffer(hbytes([1, 2, 3]), observer=observer) x.draw_bits(1) x.draw_bits(7, forced=10) x.draw_bits(8) with pytest.raises(StopTest): x.conclude_test(Status.INTERESTING, interesting_origin="neat") assert observer.log == [ ("draw", 1, False, 1), ("draw", 7, True, 10), ("draw", 8, False, 3), ("concluded", Status.INTERESTING, "neat"), ] def test_calls_concluded_implicitly(): class NoteConcluded(DataObserver): def conclude_test(self, status, reason): assert x.frozen self.conclusion = (status, reason) observer = NoteConcluded() x = ConjectureData.for_buffer(hbytes([1]), observer=observer) x.draw_bits(1) x.freeze() assert observer.conclusion == (Status.VALID, None) def test_handles_start_indices_like_a_list(): n = 5 d = ConjectureData.for_buffer([1] * n) for _ in hrange(n): d.draw_bits(1) for i in hrange(-2 * n, 2 * n + 1): try: start = d.blocks.start(i) except IndexError: # Directly retrieving the start position failed, so check that # indexing also fails. with pytest.raises(IndexError): d.blocks[i] continue # Directly retrieving the start position succeeded, so check that # indexing also succeeds, and gives the same position. assert start == d.blocks[i].start def test_last_block_length(): d = ConjectureData.for_buffer([0] * 15) with pytest.raises(IndexError): d.blocks.last_block_length for n in hrange(1, 5 + 1): d.draw_bits(n * 8) assert d.blocks.last_block_length == n def test_examples_show_up_as_discarded(): d = ConjectureData.for_buffer([1, 0, 1]) d.start_example(1) d.draw_bits(1) d.stop_example(discard=True) d.start_example(1) d.draw_bits(1) d.stop_example() d.freeze() assert len([ex for ex in d.examples if ex.discarded]) == 1 def test_examples_support_negative_indexing(): d = ConjectureData.for_buffer(hbytes(2)) d.draw_bits(1) d.draw_bits(1) d.freeze() assert d.examples[-1].length == 1 def test_can_override_label(): d = ConjectureData.for_buffer(hbytes(2)) d.draw(st.booleans(), label=7) d.freeze() assert any(ex.label == 7 for ex in d.examples) def test_will_mark_too_deep_examples_as_invalid(): d = ConjectureData.for_buffer(hbytes(0)) s = st.none() for _ in hrange(MAX_DEPTH + 1): s = s.map(lambda x: x) with pytest.raises(StopTest): d.draw(s) assert d.status == Status.INVALID def test_empty_strategy_is_invalid(): d = ConjectureData.for_buffer(hbytes(0)) with pytest.raises(StopTest): d.draw(st.nothing()) assert d.status == Status.INVALID def test_will_error_on_find(): d = ConjectureData.for_buffer(hbytes(0)) d.is_find = True with pytest.raises(InvalidArgument): d.draw(st.data()) def test_can_note_non_str(): d = ConjectureData.for_buffer(hbytes(0)) x = object() d.note(x) assert repr(x) in d.output def test_can_note_str_as_non_repr(): d = ConjectureData.for_buffer(hbytes(0)) d.note(u"foo") assert d.output == u"foo" def test_result_is_overrun(): d = ConjectureData.for_buffer(hbytes(0)) with pytest.raises(StopTest): d.draw_bits(1) assert d.as_result() is Overrun def test_trivial_before_force_agrees_with_trivial_after(): d = ConjectureData.for_buffer([0, 1, 1]) d.draw_bits(1) d.draw_bits(1, forced=1) d.draw_bits(1) t1 = [d.blocks.trivial(i) for i in hrange(3)] d.freeze() r = d.as_result() t2 = [b.trivial for b in r.blocks] assert d.blocks.owner is None t3 = [r.blocks.trivial(i) for i in hrange(3)] assert t1 == t2 == t3 def test_events_are_noted(): d = ConjectureData.for_buffer(()) d.note_event("hello") assert "hello" in d.events def test_blocks_end_points(): d = ConjectureData.for_buffer(hbytes(4)) d.draw_bits(1) d.draw_bits(16, forced=1) d.draw_bits(8) assert ( list(d.blocks.all_bounds()) == [b.bounds for b in d.blocks] == [(0, 1), (1, 3), (3, 4)] ) def test_blocks_lengths(): d = ConjectureData.for_buffer(hbytes(7)) d.draw_bits(32) d.draw_bits(16) d.draw_bits(1) assert [b.length for b in d.blocks] == [4, 2, 1] def test_child_indices(): d = ConjectureData.for_buffer(hbytes(4)) d.start_example(0) # examples[1] d.start_example(0) # examples[2] d.draw_bits(1) # examples[3] d.draw_bits(1) # examples[4] d.stop_example() d.stop_example() d.draw_bits(1) # examples[5] d.draw_bits(1) # examples[6] d.freeze() assert list(d.examples.children[0]) == [1, 5, 6] assert list(d.examples.children[1]) == [2] assert list(d.examples.children[2]) == [3, 4] assert d.examples[0].parent is None for ex in list(d.examples)[1:]: assert ex in d.examples[ex.parent].children def test_example_equality(): d = ConjectureData.for_buffer(hbytes(2)) d.start_example(0) d.draw_bits(1) d.stop_example() d.start_example(0) d.draw_bits(1) d.stop_example() d.freeze() examples = list(d.examples) for ex1, ex2 in itertools.combinations(examples, 2): assert ex1 != ex2 assert not (ex1 == ex2) for ex in examples: assert ex == ex not (ex != ex) assert not (ex == "hello") assert ex != "hello" def test_discarded_data_is_eventually_terminated(): data = ConjectureData.for_buffer(hbytes(100)) with pytest.raises(StopTest): for _ in hrange(100): data.start_example(1) data.draw_bits(1) data.stop_example(discard=True) assert data.status == Status.INVALID hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_conjecture_utils.py000066400000000000000000000102331354103617500325150ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from collections import Counter from fractions import Fraction import hypothesis.internal.conjecture.utils as cu import hypothesis.strategies as st from hypothesis import HealthCheck, Phase, assume, example, given, settings from hypothesis.internal.compat import hbytes, hrange from hypothesis.internal.conjecture.data import ConjectureData from hypothesis.internal.coverage import IN_COVERAGE_TESTS def test_does_draw_data_for_empty_range(): data = ConjectureData.for_buffer(b"\1") assert cu.integer_range(data, 1, 1) == 1 data.freeze() assert data.buffer == hbytes(b"\0") def test_uniform_float_shrinks_to_zero(): d = ConjectureData.for_buffer(hbytes([0] * 7)) assert cu.fractional_float(d) == 0.0 assert len(d.buffer) == 7 def test_uniform_float_can_draw_1(): d = ConjectureData.for_buffer(hbytes([255] * 7)) assert cu.fractional_float(d) == 1.0 assert len(d.buffer) == 7 def test_coin_biased_towards_truth(): p = 1 - 1.0 / 500 for i in range(255): assert cu.biased_coin(ConjectureData.for_buffer([i]), p) second_order = [ cu.biased_coin(ConjectureData.for_buffer([255, i]), p) for i in range(255) ] assert False in second_order assert True in second_order def test_coin_biased_towards_falsehood(): p = 1.0 / 500 for i in range(255): assert not cu.biased_coin(ConjectureData.for_buffer([i]), p) second_order = [ cu.biased_coin(ConjectureData.for_buffer([255, i]), p) for i in range(255) ] assert False in second_order assert True in second_order def test_unbiased_coin_has_no_second_order(): counts = Counter() for i in range(256): buf = hbytes([i]) data = ConjectureData.for_buffer(buf) result = cu.biased_coin(data, 0.5) if data.buffer == buf: counts[result] += 1 assert counts[False] == counts[True] > 0 def test_drawing_certain_coin_still_writes(): data = ConjectureData.for_buffer([0, 1]) assert not data.buffer assert cu.biased_coin(data, 1) assert data.buffer def test_drawing_impossible_coin_still_writes(): data = ConjectureData.for_buffer([1, 0]) assert not data.buffer assert not cu.biased_coin(data, 0) assert data.buffer def test_drawing_an_exact_fraction_coin(): count = 0 for i in hrange(8): if cu.biased_coin(ConjectureData.for_buffer([i]), Fraction(3, 8)): count += 1 assert count == 3 @st.composite def weights(draw): parts = draw(st.lists(st.integers())) parts.reverse() base = Fraction(1, 1) for p in parts: base = Fraction(1) / (1 + base) return base @example([Fraction(1, 3), Fraction(1, 3), Fraction(1, 3)]) @example([Fraction(1, 1), Fraction(1, 2)]) @example([Fraction(1, 2), Fraction(4, 10)]) @example([Fraction(1, 1), Fraction(3, 5), Fraction(1, 1)]) @example([Fraction(2, 257), Fraction(2, 5), Fraction(1, 11)]) @settings( deadline=None, suppress_health_check=HealthCheck.all(), phases=[Phase.explicit] if IN_COVERAGE_TESTS else tuple(Phase), ) @given(st.lists(weights(), min_size=1)) def test_sampler_distribution(weights): total = sum(weights) n = len(weights) assume(total > 0) probabilities = [w / total for w in weights] sampler = cu.Sampler(weights) calculated = [Fraction(0)] * n for base, alternate, p_alternate in sampler.table: calculated[base] += (1 - p_alternate) / n calculated[alternate] += p_alternate / n assert probabilities == calculated hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_control.py000066400000000000000000000102561354103617500306210ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import Verbosity, given, reporting, settings from hypothesis.control import ( BuildContext, _current_build_context, cleanup, current_build_context, event, note, ) from hypothesis.errors import CleanupFailed, InvalidArgument from hypothesis.internal.conjecture.data import ConjectureData as TD from hypothesis.strategies import integers from tests.common.utils import capture_out def bc(): return BuildContext(TD.for_buffer(b"")) def test_cannot_cleanup_with_no_context(): with pytest.raises(InvalidArgument): cleanup(lambda: None) assert _current_build_context.value is None def test_cannot_event_with_no_context(): with pytest.raises(InvalidArgument): event("hi") assert _current_build_context.value is None def test_cleanup_executes_on_leaving_build_context(): data = [] with bc(): cleanup(lambda: data.append(1)) assert not data assert data == [1] assert _current_build_context.value is None def test_can_nest_build_context(): data = [] with bc(): cleanup(lambda: data.append(1)) with bc(): cleanup(lambda: data.append(2)) assert not data assert data == [2] assert data == [2, 1] assert _current_build_context.value is None def test_does_not_suppress_exceptions(): with pytest.raises(AssertionError): with bc(): assert False assert _current_build_context.value is None def test_suppresses_exceptions_in_teardown(): with capture_out() as o: with pytest.raises(AssertionError): with bc(): def foo(): raise ValueError() cleanup(foo) assert False assert u"ValueError" in o.getvalue() assert _current_build_context.value is None def test_runs_multiple_cleanup_with_teardown(): with capture_out() as o: with pytest.raises(AssertionError): with bc(): def foo(): raise ValueError() cleanup(foo) def bar(): raise TypeError() cleanup(foo) cleanup(bar) assert False assert u"ValueError" in o.getvalue() assert u"TypeError" in o.getvalue() assert _current_build_context.value is None def test_raises_error_if_cleanup_fails_but_block_does_not(): with pytest.raises(CleanupFailed): with bc(): def foo(): raise ValueError() cleanup(foo) assert _current_build_context.value is None def test_raises_if_note_out_of_context(): with pytest.raises(InvalidArgument): note("Hi") def test_raises_if_current_build_context_out_of_context(): with pytest.raises(InvalidArgument): current_build_context() def test_current_build_context_is_current(): with bc() as a: assert current_build_context() is a def test_prints_all_notes_in_verbose_mode(): # slightly roundabout because @example messes with verbosity - see #1521 messages = set() @settings(verbosity=Verbosity.debug, database=None) @given(integers(1, 10)) def test(x): msg = "x -> %d" % (x,) note(msg) messages.add(msg) assert x < 5 with capture_out() as out: with reporting.with_reporter(reporting.default): with pytest.raises(AssertionError): test() v = out.getvalue() for x in sorted(messages): assert x in v hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_core.py000066400000000000000000000055211354103617500300700ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from _pytest.outcomes import Failed, Skipped import hypothesis.strategies as s from hypothesis import find, given, reject, settings from hypothesis.errors import NoSuchExample, Unsatisfiable from tests.common.utils import checks_deprecated_behaviour @checks_deprecated_behaviour def test_stops_after_max_examples_if_satisfying(): tracker = [] def track(x): tracker.append(x) return False max_examples = 100 with pytest.raises(NoSuchExample): find(s.integers(0, 10000), track, settings=settings(max_examples=max_examples)) assert len(tracker) == max_examples @checks_deprecated_behaviour def test_stops_after_ten_times_max_examples_if_not_satisfying(): count = [0] def track(x): count[0] += 1 reject() max_examples = 100 with pytest.raises(Unsatisfiable): find(s.integers(0, 10000), track, settings=settings(max_examples=max_examples)) assert count[0] == 10 * max_examples some_normal_settings = settings() def test_is_not_normally_default(): assert settings.default is not some_normal_settings @given(s.booleans()) @some_normal_settings def test_settings_are_default_in_given(x): assert settings.default is some_normal_settings def test_given_shrinks_pytest_helper_errors(): final_value = [None] @settings(derandomize=True) @given(s.integers()) def inner(x): final_value[0] = x if x > 100: pytest.fail("x=%r is too big!" % x) with pytest.raises(Failed): inner() assert final_value[0] == 101 def test_pytest_skip_skips_shrinking(): values = [] @settings(derandomize=True) @given(s.integers()) def inner(x): values.append(x) if x > 100: pytest.skip("x=%r is too big!" % x) with pytest.raises(Skipped): inner() assert len([x for x in values if x > 100]) == 1 @checks_deprecated_behaviour def test_can_find_with_db_eq_none(): find(s.integers(), bool, settings(database=None)) @checks_deprecated_behaviour def test_no_such_example(): with pytest.raises(NoSuchExample): find(s.none(), bool, database_key=b"no such example") hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_custom_reprs.py000066400000000000000000000037361354103617500316730ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import given def test_includes_non_default_args_in_repr(): assert repr(st.integers()) == "integers()" assert repr(st.integers(min_value=1)) == "integers(min_value=1)" def hi(there, stuff): return there def test_supports_positional_and_keyword_args_in_builds(): assert ( repr(st.builds(hi, st.integers(), there=st.booleans())) == "builds(hi, integers(), there=booleans())" ) def test_preserves_sequence_type_of_argument(): assert repr(st.sampled_from([0])) == "sampled_from([0])" class IHaveABadRepr(object): def __repr__(self): raise ValueError("Oh no!") def test_errors_are_deferred_until_repr_is_calculated(): s = ( st.builds( lambda x, y: 1, st.just(IHaveABadRepr()), y=st.one_of(st.sampled_from((IHaveABadRepr(),)), st.just(IHaveABadRepr())), ) .map(lambda t: t) .filter(lambda t: True) .flatmap(lambda t: st.just(IHaveABadRepr())) ) with pytest.raises(ValueError): repr(s) @given(st.iterables(st.integers())) def test_iterables_repr_is_useful(it): # fairly hard-coded but useful; also ensures _values are inexhaustible assert repr(it) == "iter({!r})".format(it._values) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_database_backend.py000066400000000000000000000107021354103617500323500ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import pytest from hypothesis import given, settings from hypothesis.database import ( DirectoryBasedExampleDatabase, ExampleDatabase, InMemoryExampleDatabase, ) from hypothesis.strategies import binary, lists, tuples small_settings = settings(max_examples=50) @given(lists(tuples(binary(), binary()))) @small_settings def test_backend_returns_what_you_put_in(xs): backend = InMemoryExampleDatabase() mapping = {} for key, value in xs: mapping.setdefault(key, set()).add(value) backend.save(key, value) for key, values in mapping.items(): backend_contents = list(backend.fetch(key)) distinct_backend_contents = set(backend_contents) assert len(backend_contents) == len(distinct_backend_contents) assert distinct_backend_contents == set(values) def test_can_delete_keys(): backend = InMemoryExampleDatabase() backend.save(b"foo", b"bar") backend.save(b"foo", b"baz") backend.delete(b"foo", b"bar") assert list(backend.fetch(b"foo")) == [b"baz"] def test_default_database_is_in_memory(): assert isinstance(ExampleDatabase(), InMemoryExampleDatabase) def test_default_on_disk_database_is_dir(tmpdir): assert isinstance( ExampleDatabase(tmpdir.join("foo")), DirectoryBasedExampleDatabase ) def test_selects_directory_based_if_already_directory(tmpdir): path = str(tmpdir.join("hi.sqlite3")) DirectoryBasedExampleDatabase(path).save(b"foo", b"bar") assert isinstance(ExampleDatabase(path), DirectoryBasedExampleDatabase) def test_does_not_error_when_fetching_when_not_exist(tmpdir): db = DirectoryBasedExampleDatabase(tmpdir.join("examples")) db.fetch(b"foo") @pytest.fixture(scope="function", params=["memory", "directory"]) def exampledatabase(request, tmpdir): if request.param == "memory": return ExampleDatabase() if request.param == "directory": return DirectoryBasedExampleDatabase(str(tmpdir.join("examples"))) assert False def test_can_delete_a_key_that_is_not_present(exampledatabase): exampledatabase.delete(b"foo", b"bar") def test_can_fetch_a_key_that_is_not_present(exampledatabase): assert list(exampledatabase.fetch(b"foo")) == [] def test_saving_a_key_twice_fetches_it_once(exampledatabase): exampledatabase.save(b"foo", b"bar") exampledatabase.save(b"foo", b"bar") assert list(exampledatabase.fetch(b"foo")) == [b"bar"] def test_can_close_a_database_without_touching_it(exampledatabase): exampledatabase.close() def test_can_close_a_database_after_saving(exampledatabase): exampledatabase.save(b"foo", b"bar") def test_class_name_is_in_repr(exampledatabase): assert type(exampledatabase).__name__ in repr(exampledatabase) exampledatabase.close() def test_an_absent_value_is_present_after_it_moves(exampledatabase): exampledatabase.move(b"a", b"b", b"c") assert next(exampledatabase.fetch(b"b")) == b"c" def test_an_absent_value_is_present_after_it_moves_to_self(exampledatabase): exampledatabase.move(b"a", b"a", b"b") assert next(exampledatabase.fetch(b"a")) == b"b" def test_two_directory_databases_can_interact(tmpdir): path = str(tmpdir) db1 = DirectoryBasedExampleDatabase(path) db2 = DirectoryBasedExampleDatabase(path) db1.save(b"foo", b"bar") assert list(db2.fetch(b"foo")) == [b"bar"] db2.save(b"foo", b"bar") db2.save(b"foo", b"baz") assert sorted(db1.fetch(b"foo")) == [b"bar", b"baz"] def test_can_handle_disappearing_files(tmpdir, monkeypatch): path = str(tmpdir) db = DirectoryBasedExampleDatabase(path) db.save(b"foo", b"bar") base_listdir = os.listdir monkeypatch.setattr( os, "listdir", lambda d: base_listdir(d) + ["this-does-not-exist"] ) assert list(db.fetch(b"foo")) == [b"bar"] hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_datetimes.py000066400000000000000000000073071354103617500311230ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import datetime as dt import pytest from hypothesis import given, settings from hypothesis.internal.compat import hrange from hypothesis.strategies import dates, datetimes, timedeltas, times from tests.common.debug import find_any, minimal def test_can_find_positive_delta(): assert minimal(timedeltas(), lambda x: x.days > 0) == dt.timedelta(1) def test_can_find_negative_delta(): assert minimal( timedeltas(max_value=dt.timedelta(10 ** 6)), lambda x: x.days < 0 ) == dt.timedelta(-1) def test_can_find_on_the_second(): find_any(timedeltas(), lambda x: x.seconds == 0) def test_can_find_off_the_second(): find_any(timedeltas(), lambda x: x.seconds != 0) def test_simplifies_towards_zero_delta(): d = minimal(timedeltas()) assert d.days == d.seconds == d.microseconds == 0 def test_min_value_is_respected(): assert minimal(timedeltas(min_value=dt.timedelta(days=10))).days == 10 def test_max_value_is_respected(): assert minimal(timedeltas(max_value=dt.timedelta(days=-10))).days == -10 @given(timedeltas()) def test_single_timedelta(val): assert find_any(timedeltas(val, val)) is val def test_simplifies_towards_millenium(): d = minimal(datetimes()) assert d.year == 2000 assert d.month == d.day == 1 assert d.hour == d.minute == d.second == d.microsecond == 0 @given(datetimes()) def test_default_datetimes_are_naive(dt): assert dt.tzinfo is None def test_bordering_on_a_leap_year(): x = minimal( datetimes( dt.datetime.min.replace(year=2003), dt.datetime.max.replace(year=2005) ), lambda x: x.month == 2 and x.day == 29, timeout_after=60, ) assert x.year == 2004 def test_can_find_after_the_year_2000(): assert minimal(dates(), lambda x: x.year > 2000).year == 2001 def test_can_find_before_the_year_2000(): assert minimal(dates(), lambda x: x.year < 2000).year == 1999 @pytest.mark.parametrize("month", hrange(1, 13)) def test_can_find_each_month(month): find_any(dates(), lambda x: x.month == month, settings(max_examples=10 ** 6)) def test_min_year_is_respected(): assert minimal(dates(min_value=dt.date.min.replace(2003))).year == 2003 def test_max_year_is_respected(): assert minimal(dates(max_value=dt.date.min.replace(1998))).year == 1998 @given(dates()) def test_single_date(val): assert find_any(dates(val, val)) is val def test_can_find_midnight(): find_any(times(), lambda x: x.hour == x.minute == x.second == 0) def test_can_find_non_midnight(): assert minimal(times(), lambda x: x.hour != 0).hour == 1 def test_can_find_on_the_minute(): find_any(times(), lambda x: x.second == 0) def test_can_find_off_the_minute(): find_any(times(), lambda x: x.second != 0) def test_simplifies_towards_midnight(): d = minimal(times()) assert d.hour == d.minute == d.second == d.microsecond == 0 def test_can_generate_naive_time(): find_any(times(), lambda d: not d.tzinfo) @given(times()) def test_naive_times_are_naive(dt): assert dt.tzinfo is None hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_deadline.py000066400000000000000000000071101354103617500307010ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import time import pytest import hypothesis.strategies as st from hypothesis import given, settings from hypothesis.errors import DeadlineExceeded, Flaky, InvalidArgument from tests.common.utils import capture_out, fails_with def test_raises_deadline_on_slow_test(): @settings(deadline=500) @given(st.integers()) def slow(i): time.sleep(1) with pytest.raises(DeadlineExceeded): slow() @fails_with(DeadlineExceeded) @given(st.integers()) def test_slow_tests_are_errors_by_default(i): time.sleep(1) def test_non_numeric_deadline_is_an_error(): with pytest.raises(InvalidArgument): settings(deadline="3 seconds") @given(st.integers()) @settings(deadline=None) def test_slow_with_none_deadline(i): time.sleep(1) def test_raises_flaky_if_a_test_becomes_fast_on_rerun(): once = [True] @settings(deadline=500) @given(st.integers()) def test_flaky_slow(i): if once[0]: once[0] = False time.sleep(1) with pytest.raises(Flaky): test_flaky_slow() def test_deadlines_participate_in_shrinking(): @settings(deadline=500) @given(st.integers(min_value=0)) def slow_if_large(i): if i >= 1000: time.sleep(1) with capture_out() as o: with pytest.raises(DeadlineExceeded): slow_if_large() assert "slow_if_large(i=1000)" in o.getvalue() def test_keeps_you_well_above_the_deadline(): seen = set() failed_once = [False] @settings(deadline=100) @given(st.integers(0, 2000)) def slow(i): # Make sure our initial failure isn't something that immediately goes # flaky. if not failed_once[0]: if i * 0.9 <= 100: return else: failed_once[0] = True t = i / 1000 if i in seen: time.sleep(0.9 * t) else: seen.add(i) time.sleep(t) with pytest.raises(DeadlineExceeded): slow() def test_gives_a_deadline_specific_flaky_error_message(): once = [True] @settings(deadline=100) @given(st.integers()) def slow_once(i): if once[0]: once[0] = False time.sleep(0.2) with capture_out() as o: with pytest.raises(Flaky): slow_once() assert "Unreliable test timing" in o.getvalue() assert "took 2" in o.getvalue() @pytest.mark.parametrize("slow_strategy", [False, True]) @pytest.mark.parametrize("slow_test", [False, True]) def test_should_only_fail_a_deadline_if_the_test_is_slow(slow_strategy, slow_test): s = st.integers() if slow_strategy: s = s.map(lambda x: time.sleep(0.08)) @settings(deadline=50) @given(st.data()) def test(data): data.draw(s) if slow_test: time.sleep(0.1) if slow_test: with pytest.raises(DeadlineExceeded): test() else: test() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_debug_information.py000066400000000000000000000026721354103617500326370ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import re import pytest import hypothesis.strategies as st from hypothesis import Verbosity, given, settings from tests.common.utils import capture_out def test_reports_passes(): @given(st.integers()) @settings(verbosity=Verbosity.debug) def test(i): assert i < 10 with capture_out() as out: with pytest.raises(AssertionError): test() value = out.getvalue() assert "adaptive_example_deletion" in value assert "calls" in value assert "shrinks" in value shrinks_info = re.compile(r"call(s?) of which ([0-9]+) shrank") for l in value.splitlines(): m = shrinks_info.search(l) if m is not None and int(m.group(2)) != 0: break else: assert False, value hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_deferred_strategies.py000066400000000000000000000115001354103617500331440ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import given, strategies as st from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import hrange from tests.common.debug import assert_no_examples, minimal def test_binary_tree(): tree = st.deferred(lambda: st.integers() | st.tuples(tree, tree)) assert minimal(tree) == 0 assert minimal(tree, lambda x: isinstance(x, tuple)) == (0, 0) def test_mutual_recursion(): t = st.deferred(lambda: a | b) a = st.deferred(lambda: st.none() | st.tuples(st.just("a"), b)) b = st.deferred(lambda: st.none() | st.tuples(st.just("b"), a)) for c in ("a", "b"): assert minimal(t, lambda x: x is not None and x[0] == c) == (c, None) def test_errors_on_non_function_define(): x = st.deferred(1) with pytest.raises(InvalidArgument): x.example() def test_errors_if_define_does_not_return_search_strategy(): x = st.deferred(lambda: 1) with pytest.raises(InvalidArgument): x.example() def test_errors_on_definition_as_self(): x = st.deferred(lambda: x) with pytest.raises(InvalidArgument): x.example() def test_branches_pass_through_deferred(): x = st.one_of(st.booleans(), st.integers()) y = st.deferred(lambda: x) assert x.branches == y.branches def test_can_draw_one_of_self(): x = st.deferred(lambda: st.one_of(st.booleans(), x)) assert minimal(x) is False assert len(x.branches) == 1 def test_hidden_self_references_just_result_in_no_example(): bad = st.deferred(lambda: st.none().flatmap(lambda _: bad)) assert_no_examples(bad) def test_self_recursive_flatmap(): bad = st.deferred(lambda: bad.flatmap(lambda x: st.none())) assert_no_examples(bad) def test_self_reference_through_one_of_can_detect_emptiness(): bad = st.deferred(lambda: st.one_of(bad, bad)) assert bad.is_empty def test_self_tuple_draws_nothing(): x = st.deferred(lambda: st.tuples(x)) assert_no_examples(x) def test_mutually_recursive_tuples_draw_nothing(): x = st.deferred(lambda: st.tuples(y)) y = st.tuples(x) assert_no_examples(x) assert_no_examples(y) def test_literals_strategy_is_valid(): literals = st.deferred( lambda: st.one_of( st.booleans(), st.tuples(literals, literals), literals.map(lambda x: [x]) ) ) @given(literals) def test(e): pass test() assert not literals.has_reusable_values def test_impossible_self_recursion(): x = st.deferred(lambda: st.tuples(st.none(), x)) assert x.is_empty assert x.has_reusable_values def test_very_deep_deferral(): # This test is designed so that the recursive properties take a very long # time to converge: Although we can rapidly determine them for the original # value, each round in the fixed point calculation only manages to update # a single value in the related strategies, so it takes 100 rounds to # update everything. Most importantly this triggers our infinite loop # detection heuristic and we start tracking duplicates, but we shouldn't # see any because this loop isn't infinite, just long. def strat(i): if i == 0: return st.deferred(lambda: st.one_of(strategies + [st.none()])) else: return st.deferred(lambda: st.tuples(strategies[(i + 1) % len(strategies)])) strategies = list(map(strat, hrange(100))) assert strategies[0].has_reusable_values assert not strategies[0].is_empty def test_recursion_in_middle(): # This test is significant because the integers().map(abs) is not checked # in the initial pass - when we recurse into x initially we decide that # x is empty, so the tuple is empty, and don't need to check the third # argument. Then when we do the more refined test we've discovered that x # is non-empty, so we need to check the non-emptiness of the last component # to determine the non-emptiness of the tuples. x = st.deferred(lambda: st.tuples(st.none(), x, st.integers().map(abs)) | st.none()) assert not x.is_empty def test_deferred_supports_find(): nested = st.deferred(lambda: st.integers() | st.lists(nested)) assert nested.supports_find hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_detection.py000066400000000000000000000032371354103617500311200ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import given from hypothesis.internal.detection import is_hypothesis_test from hypothesis.stateful import GenericStateMachine from hypothesis.strategies import integers def test_functions_default_to_not_tests(): def foo(): pass assert not is_hypothesis_test(foo) def test_methods_default_to_not_tests(): class Foo(object): def foo(): pass assert not is_hypothesis_test(Foo().foo) def test_detection_of_functions(): @given(integers()) def test(i): pass assert is_hypothesis_test(test) def test_detection_of_methods(): class Foo(object): @given(integers()) def test(self, i): pass assert is_hypothesis_test(Foo().test) def test_detection_of_stateful_tests(): class Stuff(GenericStateMachine): def steps(self): return integers() def execute_step(self, step): pass assert is_hypothesis_test(Stuff.TestCase().runTest) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_direct_strategies.py000066400000000000000000000410751354103617500326500ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import collections import decimal import fractions import math from datetime import date, datetime, time, timedelta import pytest import hypothesis.strategies as ds from hypothesis import given, settings from hypothesis.errors import InvalidArgument from hypothesis.vendor.pretty import pretty from tests.common.debug import minimal from tests.common.utils import checks_deprecated_behaviour # Use `pretty` instead of `repr` for building test names, so that set and dict # parameters print consistently across multiple worker processes with different # PYTHONHASHSEED values. def fn_test(*fnkwargs): fnkwargs = list(fnkwargs) return pytest.mark.parametrize( ("fn", "args"), fnkwargs, ids=[ "%s(%s)" % (fn.__name__, ", ".join(map(pretty, args))) for fn, args in fnkwargs ], ) def fn_ktest(*fnkwargs): fnkwargs = list(fnkwargs) return pytest.mark.parametrize( ("fn", "kwargs"), fnkwargs, ids=["%s(**%s)" % (fn.__name__, pretty(kwargs)) for fn, kwargs in fnkwargs], ) @fn_ktest( (ds.integers, {"min_value": float("nan")}), (ds.integers, {"min_value": 2, "max_value": 1}), (ds.integers, {"min_value": float("nan")}), (ds.integers, {"max_value": float("nan")}), (ds.dates, {"min_value": "fish"}), (ds.dates, {"max_value": "fish"}), (ds.dates, {"min_value": date(2017, 8, 22), "max_value": date(2017, 8, 21)}), (ds.datetimes, {"min_value": "fish"}), (ds.datetimes, {"max_value": "fish"}), ( ds.datetimes, {"min_value": datetime(2017, 8, 22), "max_value": datetime(2017, 8, 21)}, ), (ds.decimals, {"min_value": float("nan")}), (ds.decimals, {"max_value": float("nan")}), (ds.decimals, {"min_value": 2, "max_value": 1}), (ds.decimals, {"max_value": "-snan"}), (ds.decimals, {"max_value": complex(1, 2)}), (ds.decimals, {"places": -1}), (ds.decimals, {"places": 0.5}), (ds.decimals, {"max_value": 0.0, "min_value": 1.0}), (ds.decimals, {"min_value": 1.0, "max_value": 0.0}), (ds.decimals, {"min_value": 0.0, "max_value": 1.0, "allow_infinity": True}), (ds.decimals, {"min_value": "inf"}), (ds.decimals, {"max_value": "-inf"}), (ds.decimals, {"min_value": "-inf", "allow_infinity": False}), (ds.decimals, {"max_value": "inf", "allow_infinity": False}), (ds.decimals, {"min_value": complex(1, 2)}), (ds.decimals, {"min_value": "0.1", "max_value": "0.9", "places": 0}), ( ds.dictionaries, {"keys": ds.booleans(), "values": ds.booleans(), "min_size": 10, "max_size": 1}, ), (ds.floats, {"min_value": float("nan")}), (ds.floats, {"max_value": float("nan")}), (ds.floats, {"min_value": complex(1, 2)}), (ds.floats, {"max_value": complex(1, 2)}), (ds.floats, {"exclude_min": None}), (ds.floats, {"exclude_max": None}), (ds.floats, {"exclude_min": True}), # because min_value=None (ds.floats, {"exclude_max": True}), # because max_value=None (ds.fractions, {"min_value": 2, "max_value": 1}), (ds.fractions, {"min_value": float("nan")}), (ds.fractions, {"max_value": float("nan")}), (ds.fractions, {"max_denominator": 0}), (ds.fractions, {"max_denominator": 1.5}), (ds.fractions, {"min_value": complex(1, 2)}), (ds.lists, {"elements": ds.integers(), "min_size": 10, "max_size": 9}), (ds.lists, {"elements": ds.integers(), "min_size": -10, "max_size": -9}), (ds.lists, {"elements": ds.integers(), "max_size": -9}), (ds.lists, {"elements": ds.integers(), "min_size": -10}), (ds.lists, {"elements": ds.integers(), "min_size": float("nan")}), (ds.lists, {"elements": ds.nothing(), "max_size": 1}), (ds.lists, {"elements": "hi"}), (ds.lists, {"elements": ds.integers(), "unique_by": 1}), (ds.lists, {"elements": ds.integers(), "unique_by": ()}), (ds.lists, {"elements": ds.integers(), "unique_by": (1,)}), (ds.lists, {"elements": ds.sampled_from([0, 1]), "min_size": 3, "unique": True}), (ds.text, {"min_size": 10, "max_size": 9}), (ds.text, {"alphabet": [1]}), (ds.text, {"alphabet": ["abc"]}), (ds.binary, {"min_size": 10, "max_size": 9}), (ds.floats, {"min_value": float("nan")}), (ds.floats, {"min_value": "0"}), (ds.floats, {"max_value": "0"}), (ds.floats, {"max_value": 0.0, "min_value": 1.0}), (ds.floats, {"min_value": 0.0, "allow_nan": True}), (ds.floats, {"max_value": 0.0, "allow_nan": True}), (ds.floats, {"min_value": 0.0, "max_value": 1.0, "allow_infinity": True}), (ds.floats, {"min_value": float("inf"), "allow_infinity": False}), (ds.floats, {"max_value": float("-inf"), "allow_infinity": False}), (ds.complex_numbers, {"min_magnitude": float("nan")}), (ds.complex_numbers, {"max_magnitude": float("nan")}), (ds.complex_numbers, {"max_magnitude": complex(1, 2)}), (ds.complex_numbers, {"min_magnitude": -1}), (ds.complex_numbers, {"max_magnitude": -1}), (ds.complex_numbers, {"min_magnitude": 3, "max_magnitude": 2}), (ds.complex_numbers, {"max_magnitude": 2, "allow_infinity": True}), (ds.complex_numbers, {"max_magnitude": 2, "allow_nan": True}), (ds.fixed_dictionaries, {"mapping": "fish"}), (ds.fixed_dictionaries, {"mapping": {1: "fish"}}), (ds.dictionaries, {"keys": ds.integers(), "values": 1}), (ds.dictionaries, {"keys": 1, "values": ds.integers()}), (ds.text, {"alphabet": "", "min_size": 1}), (ds.timedeltas, {"min_value": "fish"}), (ds.timedeltas, {"max_value": "fish"}), ( ds.timedeltas, {"min_value": timedelta(hours=1), "max_value": timedelta(minutes=1)}, ), (ds.times, {"min_value": "fish"}), (ds.times, {"max_value": "fish"}), (ds.times, {"min_value": time(2, 0), "max_value": time(1, 0)}), (ds.uuids, {"version": 6}), (ds.characters, {"min_codepoint": -1}), (ds.characters, {"min_codepoint": "1"}), (ds.characters, {"max_codepoint": -1}), (ds.characters, {"max_codepoint": "1"}), (ds.characters, {"whitelist_categories": []}), (ds.characters, {"whitelist_categories": ["Nd"], "blacklist_categories": ["Nd"]}), (ds.slices, {"size": 0}), (ds.slices, {"size": None}), (ds.slices, {"size": "chips"}), (ds.slices, {"size": -1}), (ds.slices, {"size": 2.3}), ) def test_validates_keyword_arguments(fn, kwargs): with pytest.raises(InvalidArgument): fn(**kwargs).example() @fn_ktest( (ds.integers, {"min_value": 0}), (ds.integers, {"min_value": 11}), (ds.integers, {"min_value": 11, "max_value": 100}), (ds.integers, {"max_value": 0}), (ds.integers, {"min_value": -2, "max_value": -1}), (ds.decimals, {"min_value": 1.0, "max_value": 1.5}), (ds.decimals, {"min_value": "1.0", "max_value": "1.5"}), (ds.decimals, {"min_value": decimal.Decimal("1.5")}), (ds.decimals, {"max_value": 1.0, "min_value": -1.0, "allow_infinity": False}), (ds.decimals, {"min_value": 1.0, "allow_nan": False}), (ds.decimals, {"max_value": 1.0, "allow_nan": False}), (ds.decimals, {"max_value": 1.0, "min_value": -1.0, "allow_nan": False}), (ds.decimals, {"min_value": "-inf"}), (ds.decimals, {"max_value": "inf"}), (ds.fractions, {"min_value": -1, "max_value": 1, "max_denominator": 1000}), (ds.fractions, {"min_value": 1, "max_value": 1}), (ds.fractions, {"min_value": 1, "max_value": 1, "max_denominator": 2}), (ds.fractions, {"min_value": 1.0}), (ds.fractions, {"min_value": decimal.Decimal("1.0")}), (ds.fractions, {"min_value": fractions.Fraction(1, 2)}), (ds.fractions, {"min_value": "1/2", "max_denominator": 2}), (ds.fractions, {"max_value": "1/2", "max_denominator": 3}), (ds.lists, {"elements": ds.nothing(), "max_size": 0}), (ds.lists, {"elements": ds.integers()}), (ds.lists, {"elements": ds.integers(), "max_size": 5}), (ds.lists, {"elements": ds.booleans(), "min_size": 5}), (ds.lists, {"elements": ds.booleans(), "min_size": 5, "max_size": 10}), (ds.sets, {"min_size": 10, "max_size": 10, "elements": ds.integers()}), (ds.booleans, {}), (ds.just, {"value": "hi"}), (ds.integers, {"min_value": 12, "max_value": 12}), (ds.floats, {}), (ds.floats, {"min_value": 1.0}), (ds.floats, {"max_value": 1.0}), (ds.floats, {"min_value": float("inf")}), (ds.floats, {"max_value": float("-inf")}), (ds.floats, {"max_value": 1.0, "min_value": -1.0}), (ds.floats, {"max_value": 1.0, "min_value": -1.0, "allow_infinity": False}), (ds.floats, {"min_value": 1.0, "allow_nan": False}), (ds.floats, {"max_value": 1.0, "allow_nan": False}), (ds.floats, {"max_value": 1.0, "min_value": -1.0, "allow_nan": False}), (ds.complex_numbers, {}), (ds.complex_numbers, {"min_magnitude": 3, "max_magnitude": 3}), (ds.complex_numbers, {"max_magnitude": 0}), (ds.complex_numbers, {"allow_nan": True}), (ds.complex_numbers, {"allow_nan": True, "allow_infinity": True}), (ds.complex_numbers, {"allow_nan": True, "allow_infinity": False}), (ds.complex_numbers, {"allow_nan": False}), (ds.complex_numbers, {"allow_nan": False, "allow_infinity": True}), (ds.complex_numbers, {"allow_nan": False, "allow_infinity": False}), (ds.complex_numbers, {"max_magnitude": float("inf"), "allow_infinity": True}), (ds.sampled_from, {"elements": [1]}), (ds.sampled_from, {"elements": [1, 2, 3]}), (ds.fixed_dictionaries, {"mapping": {1: ds.integers()}}), (ds.dictionaries, {"keys": ds.booleans(), "values": ds.integers()}), (ds.text, {"alphabet": "abc"}), (ds.text, {"alphabet": set("abc")}), (ds.text, {"alphabet": ""}), (ds.text, {"alphabet": ds.sampled_from("abc")}), (ds.characters, {"whitelist_categories": ["N"]}), (ds.characters, {"blacklist_categories": []}), ) def test_produces_valid_examples_from_keyword(fn, kwargs): fn(**kwargs).example() @fn_test((ds.one_of, (1,)), (ds.tuples, (1,))) def test_validates_args(fn, args): with pytest.raises(InvalidArgument): fn(*args).example() @fn_test( (ds.one_of, (ds.booleans(), ds.tuples(ds.booleans()))), (ds.one_of, (ds.booleans(),)), (ds.text, ()), (ds.binary, ()), (ds.builds, (lambda x, y: x + y, ds.integers(), ds.integers())), ) def test_produces_valid_examples_from_args(fn, args): fn(*args).example() def test_build_class_with_target_kwarg(): NamedTupleWithTargetField = collections.namedtuple("Something", ["target"]) ds.builds(NamedTupleWithTargetField, target=ds.integers()).example() def test_builds_raises_with_no_target(): with pytest.raises(InvalidArgument): ds.builds().example() @pytest.mark.parametrize("non_callable", [1, "abc", ds.integers()]) def test_builds_raises_if_non_callable_as_target_kwarg(non_callable): with pytest.raises(InvalidArgument): ds.builds(target=non_callable).example() @pytest.mark.parametrize("non_callable", [1, "abc", ds.integers()]) def test_builds_raises_if_non_callable_as_first_arg(non_callable): # If there are any positional arguments, then the target (which must be # callable) must be specified as the first one. with pytest.raises(InvalidArgument): ds.builds(non_callable, target=lambda x: x).example() def test_tuples_raise_error_on_bad_kwargs(): with pytest.raises(TypeError): ds.tuples(stuff="things") @given(ds.lists(ds.booleans(), min_size=10, max_size=10)) def test_has_specified_length(xs): assert len(xs) == 10 @given(ds.integers(max_value=100)) @settings(max_examples=100) def test_has_upper_bound(x): assert x <= 100 @given(ds.integers(min_value=100)) def test_has_lower_bound(x): assert x >= 100 @given(ds.integers(min_value=1, max_value=2)) def test_is_in_bounds(x): assert 1 <= x <= 2 @given(ds.fractions(min_value=-1, max_value=1, max_denominator=1000)) def test_fraction_is_in_bounds(x): assert -1 <= x <= 1 and abs(x.denominator) <= 1000 @given(ds.fractions(min_value=fractions.Fraction(1, 2))) def test_fraction_gt_positive(x): assert fractions.Fraction(1, 2) <= x @given(ds.fractions(max_value=fractions.Fraction(-1, 2))) def test_fraction_lt_negative(x): assert x <= fractions.Fraction(-1, 2) @given(ds.decimals(min_value=-1.5, max_value=1.5, allow_nan=False)) def test_decimal_is_in_bounds(x): # decimal.Decimal("-1.5") == -1.5 (not explicitly testable in py2.6) assert decimal.Decimal("-1.5") <= x <= decimal.Decimal("1.5") def test_float_can_find_max_value_inf(): assert minimal(ds.floats(max_value=float("inf")), lambda x: math.isinf(x)) == float( "inf" ) assert minimal(ds.floats(min_value=0.0), lambda x: math.isinf(x)) == float("inf") def test_float_can_find_min_value_inf(): minimal(ds.floats(), lambda x: x < 0 and math.isinf(x)) minimal(ds.floats(min_value=float("-inf"), max_value=0.0), lambda x: math.isinf(x)) def test_can_find_none_list(): assert minimal(ds.lists(ds.none()), lambda x: len(x) >= 3) == [None] * 3 def test_fractions(): assert minimal(ds.fractions(), lambda f: f >= 1) == 1 def test_decimals(): assert minimal(ds.decimals(), lambda f: f.is_finite() and f >= 1) == 1 def test_non_float_decimal(): minimal(ds.decimals(), lambda d: d.is_finite() and decimal.Decimal(float(d)) != d) def test_produces_dictionaries_of_at_least_minimum_size(): t = minimal( ds.dictionaries(ds.booleans(), ds.integers(), min_size=2), lambda x: True ) assert t == {False: 0, True: 0} @given(ds.dictionaries(ds.integers(), ds.integers(), max_size=5)) @settings(max_examples=50) def test_dictionaries_respect_size(d): assert len(d) <= 5 @given(ds.dictionaries(ds.integers(), ds.integers(), max_size=0)) @settings(max_examples=50) def test_dictionaries_respect_zero_size(d): assert len(d) <= 5 @given(ds.lists(ds.none(), max_size=5)) def test_none_lists_respect_max_size(ls): assert len(ls) <= 5 @given(ds.lists(ds.none(), max_size=5, min_size=1)) def test_none_lists_respect_max_and_min_size(ls): assert 1 <= len(ls) <= 5 @given(ds.iterables(ds.integers(), max_size=5, min_size=1)) def test_iterables_are_exhaustible(it): for _ in it: pass with pytest.raises(StopIteration): next(it) def test_minimal_iterable(): assert list(minimal(ds.iterables(ds.integers()), lambda x: True)) == [] @checks_deprecated_behaviour @pytest.mark.parametrize( "fn,kwargs", [ (ds.integers, {"min_value": decimal.Decimal("1.5")}), (ds.integers, {"max_value": decimal.Decimal("1.5")}), (ds.integers, {"min_value": -1.5, "max_value": -0.5}), (ds.floats, {"min_value": 1.8, "width": 32}), (ds.floats, {"max_value": 1.8, "width": 32}), (ds.fractions, {"min_value": "1/3", "max_value": "1/2", "max_denominator": 2}), (ds.fractions, {"min_value": "0", "max_value": "1/3", "max_denominator": 2}), ], ) def test_disallowed_bounds_are_deprecated(fn, kwargs): fn(**kwargs).example() @checks_deprecated_behaviour @pytest.mark.parametrize( "fn,kwargs", [(ds.integers, {"min_value": 0.1, "max_value": 0.2})] ) def test_no_integers_in_bounds(fn, kwargs): with pytest.raises(InvalidArgument): fn(**kwargs).example() @checks_deprecated_behaviour @pytest.mark.parametrize( "fn,kwargs", [(ds.fractions, {"min_value": "1/3", "max_value": "1/3", "max_denominator": 2})], ) def test_no_fractions_in_bounds(fn, kwargs): with pytest.raises(InvalidArgument): fn(**kwargs).example() @pytest.mark.parametrize("parameter_name", ["min_value", "max_value"]) @pytest.mark.parametrize("value", [-1, 0, 1]) def test_no_infinity_for_min_max_values(value, parameter_name): kwargs = {"allow_infinity": False, parameter_name: value} @given(ds.floats(**kwargs)) def test_not_infinite(xs): assert not math.isinf(xs) test_not_infinite() @pytest.mark.parametrize("parameter_name", ["min_value", "max_value"]) @pytest.mark.parametrize("value", [-1, 0, 1]) def test_no_nan_for_min_max_values(value, parameter_name): kwargs = {"allow_nan": False, parameter_name: value} @given(ds.floats(**kwargs)) def test_not_nan(xs): assert not math.isnan(xs) test_not_nan() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_draw_example.py000066400000000000000000000020701354103617500316040ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis.strategies import lists from tests.common import standard_types @pytest.mark.parametrize(u"spec", standard_types, ids=list(map(repr, standard_types))) def test_single_example(spec): spec.example() @pytest.mark.parametrize(u"spec", standard_types, ids=list(map(repr, standard_types))) def test_list_example(spec): lists(spec).example() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_error_in_draw.py000066400000000000000000000022051354103617500317700ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import given from tests.common.utils import capture_out def test_error_is_in_finally(): @given(st.data()) def test(d): try: d.draw(st.lists(st.integers(), min_size=3, unique=True)) finally: raise ValueError() with capture_out() as o: with pytest.raises(ValueError): test() assert "[0, 1, -1]" in o.getvalue() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_escalation.py000066400000000000000000000035431354103617500312640ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.internal.escalation as esc import hypothesis.strategies as st from hypothesis import given def test_does_not_escalate_errors_in_non_hypothesis_file(): try: assert False except AssertionError: esc.escalate_hypothesis_internal_error() def test_does_escalate_errors_in_hypothesis_file(monkeypatch): monkeypatch.setattr(esc, "is_hypothesis_file", lambda x: True) with pytest.raises(AssertionError): try: assert False except AssertionError: esc.escalate_hypothesis_internal_error() def test_does_not_escalate_errors_in_hypothesis_file_if_disabled(monkeypatch): monkeypatch.setattr(esc, "is_hypothesis_file", lambda x: True) monkeypatch.setattr(esc, "PREVENT_ESCALATION", True) try: assert False except AssertionError: esc.escalate_hypothesis_internal_error() def test_immediately_escalates_errors_in_generation(): count = [0] def explode(s): count[0] += 1 raise ValueError() @given(st.integers().map(explode)) def test(i): pass with pytest.raises(ValueError): test() assert count == [1] hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_example.py000066400000000000000000000036571354103617500306030ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from decimal import Decimal from random import Random import pytest import hypothesis.strategies as st from hypothesis import example, find, given from hypothesis.errors import HypothesisException, Unsatisfiable from tests.common.utils import checks_deprecated_behaviour, fails_with @checks_deprecated_behaviour def test_deterministic_examples_are_deprecated(): st.integers().example(Random()) def test_example_of_none_is_none(): assert st.none().example() is None def test_exception_in_compare_can_still_have_example(): st.one_of(st.none().map(lambda n: Decimal("snan")), st.just(Decimal(0))).example() def test_does_not_always_give_the_same_example(): s = st.integers() assert len({s.example() for _ in range(100)}) >= 10 def test_raises_on_no_examples(): with pytest.raises(Unsatisfiable): st.nothing().example() @fails_with(HypothesisException) @example(False) @given(st.booleans()) def test_example_inside_given(b): st.integers().example() @fails_with(HypothesisException) def test_example_inside_find(): find(st.integers(0, 100), lambda x: st.integers().example()) @fails_with(HypothesisException) def test_example_inside_strategy(): st.booleans().map(lambda x: st.integers().example()).example() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_executors.py000066400000000000000000000042551354103617500311640ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import inspect from unittest import TestCase import pytest from hypothesis import example, given from hypothesis.executors import ConjectureRunner from hypothesis.strategies import booleans, integers def test_must_use_result_of_test(): class DoubleRun(object): def execute_example(self, function): x = function() if inspect.isfunction(x): return x() @given(booleans()) def boom(self, b): def f(): raise ValueError() return f with pytest.raises(ValueError): DoubleRun().boom() class TestTryReallyHard(TestCase): @given(integers()) def test_something(self, i): pass def execute_example(self, f): f() return f() class Valueless(object): def execute_example(self, f): try: return f() except ValueError: return None @given(integers()) @example(1) def test_no_boom_on_example(self, x): raise ValueError() @given(integers()) def test_no_boom(self, x): raise ValueError() @given(integers()) def test_boom(self, x): assert False def test_boom(): with pytest.raises(AssertionError): Valueless().test_boom() def test_no_boom(): Valueless().test_no_boom() def test_no_boom_on_example(): Valueless().test_no_boom_on_example() class TestNormal(ConjectureRunner, TestCase): @given(booleans()) def test_stuff(self, b): pass hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_explicit_examples.py000066400000000000000000000134701354103617500326610ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from unittest import TestCase import pytest from hypothesis import Phase, Verbosity, example, given, note, reporting, settings from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import integer_types, print_unicode from hypothesis.strategies import integers, text from tests.common.utils import capture_out class TestInstanceMethods(TestCase): @given(integers()) @example(1) def test_hi_1(self, x): assert isinstance(x, integer_types) @given(integers()) @example(x=1) def test_hi_2(self, x): assert isinstance(x, integer_types) @given(x=integers()) @example(x=1) def test_hi_3(self, x): assert isinstance(x, integer_types) def test_kwarg_example_on_testcase(): class Stuff(TestCase): @given(integers()) @example(x=1) def test_hi(self, x): assert isinstance(x, integer_types) Stuff(u"test_hi").test_hi() def test_errors_when_run_with_not_enough_args(): @given(integers(), int) @example(1) def foo(x, y): pass with pytest.raises(TypeError): foo() def test_errors_when_run_with_not_enough_kwargs(): @given(integers(), int) @example(x=1) def foo(x, y): pass with pytest.raises(TypeError): foo() def test_can_use_examples_after_given(): long_str = u"This is a very long string that you've no chance of hitting" @example(long_str) @given(text()) def test_not_long_str(x): assert x != long_str with pytest.raises(AssertionError): test_not_long_str() def test_can_use_examples_before_given(): long_str = u"This is a very long string that you've no chance of hitting" @given(text()) @example(long_str) def test_not_long_str(x): assert x != long_str with pytest.raises(AssertionError): test_not_long_str() def test_can_use_examples_around_given(): long_str = u"This is a very long string that you've no chance of hitting" short_str = u"Still no chance" seen = [] @example(short_str) @given(text()) @example(long_str) def test_not_long_str(x): seen.append(x) test_not_long_str() assert set(seen[:2]) == {long_str, short_str} @pytest.mark.parametrize((u"x", u"y"), [(1, False), (2, True)]) @example(z=10) @given(z=integers()) def test_is_a_thing(x, y, z): pass def test_no_args_and_kwargs(): with pytest.raises(InvalidArgument): example(1, y=2) def test_no_empty_examples(): with pytest.raises(InvalidArgument): example() def test_does_not_print_on_explicit_examples_if_no_failure(): @example(1) @given(integers()) def test_positive(x): assert x > 0 with reporting.with_reporter(reporting.default): with pytest.raises(AssertionError): with capture_out() as out: test_positive() out = out.getvalue() assert u"Falsifying example: test_positive(1)" not in out def test_prints_output_for_explicit_examples(): @example(-1) @given(integers()) def test_positive(x): assert x > 0 with reporting.with_reporter(reporting.default): with pytest.raises(AssertionError): with capture_out() as out: test_positive() out = out.getvalue() assert u"Falsifying example: test_positive(x=-1)" in out def test_prints_verbose_output_for_explicit_examples(): @settings(verbosity=Verbosity.verbose) @example("NOT AN INTEGER") @given(integers()) def test_always_passes(x): pass with reporting.with_reporter(reporting.default): with capture_out() as out: test_always_passes() out = out.getvalue() assert u"Trying example: test_always_passes(x='NOT AN INTEGER')" in out def test_captures_original_repr_of_example(): @example(x=[]) @given(integers()) def test_mutation(x): x.append(1) assert not x with reporting.with_reporter(reporting.default): with pytest.raises(AssertionError): with capture_out() as out: test_mutation() out = out.getvalue() assert u"Falsifying example: test_mutation(x=[])" in out def test_examples_are_tried_in_order(): @example(x=1) @example(x=2) @given(integers()) @settings(phases=[Phase.explicit]) @example(x=3) def test(x): print_unicode(u"x -> %d" % (x,)) with capture_out() as out: with reporting.with_reporter(reporting.default): test() ls = out.getvalue().splitlines() assert ls == [u"x -> 1", u"x -> 2", u"x -> 3"] def test_prints_note_in_failing_example(): @example(x=42) @example(x=43) @given(integers()) def test(x): note("x -> %d" % (x,)) assert x == 42 with capture_out() as out: with reporting.with_reporter(reporting.default): with pytest.raises(AssertionError): test() v = out.getvalue() print_unicode(v) assert "x -> 43" in v assert "x -> 42" not in v def test_must_agree_with_number_of_arguments(): @example(1, 2) @given(integers()) def test(a): pass with pytest.raises(InvalidArgument): test() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_filestorage.py000066400000000000000000000033561354103617500314500ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import hypothesis.configuration as fs previous_home_dir = None def setup_function(function): global previous_home_dir previous_home_dir = fs.hypothesis_home_dir() fs.set_hypothesis_home_dir(None) def teardown_function(function): global previous_home_dir fs.set_hypothesis_home_dir(previous_home_dir) previous_home_dir = None def test_defaults_to_the_default(): assert fs.hypothesis_home_dir() == fs.__hypothesis_home_directory_default def test_can_set_homedir_and_it_will_exist(tmpdir): fs.set_hypothesis_home_dir(str(tmpdir.mkdir(u"kittens"))) d = fs.hypothesis_home_dir() assert u"kittens" in d assert os.path.exists(d) def test_will_pick_up_location_from_env(monkeypatch, tmpdir): tmpdir = str(tmpdir) monkeypatch.setattr(os, "environ", {"HYPOTHESIS_STORAGE_DIRECTORY": tmpdir}) assert fs.hypothesis_home_dir() == tmpdir def test_storage_directories_are_created_automatically(tmpdir): fs.set_hypothesis_home_dir(str(tmpdir)) assert os.path.exists(fs.storage_directory(u"badgers")) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_flakiness.py000066400000000000000000000067301354103617500311220ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import HealthCheck, Verbosity, assume, example, given, reject, settings from hypothesis.errors import Flaky, Unsatisfiable, UnsatisfiedAssumption from hypothesis.internal.conjecture.engine import MIN_TEST_CALLS from hypothesis.strategies import booleans, composite, integers, lists, random_module from tests.common.utils import no_shrink class Nope(Exception): pass def test_fails_only_once_is_flaky(): first_call = [True] @given(integers()) def rude(x): if first_call[0]: first_call[0] = False raise Nope() with pytest.raises(Flaky): rude() def test_gives_flaky_error_if_assumption_is_flaky(): seen = set() @given(integers()) @settings(verbosity=Verbosity.quiet) def oops(s): assume(s not in seen) seen.add(s) assert False with pytest.raises(Flaky): oops() def test_does_not_attempt_to_shrink_flaky_errors(): values = [] @settings(database=None) @given(integers()) def test(x): values.append(x) assert len(values) != 1 with pytest.raises(Flaky): test() # We try a total of ten calls in the generation phase, each usually a # unique value, looking briefly (and unsuccessfully) for another bug. assert 1 < len(set(values)) <= MIN_TEST_CALLS # We don't try any new values while shrinking, just execute the test # twice more (to check for flakiness and to raise the bug to the user). assert set(values) == set(values[:-2]) class SatisfyMe(Exception): pass @composite def single_bool_lists(draw): n = draw(integers(0, 20)) result = [False] * (n + 1) result[n] = True return result @example([True, False, False, False], [3], None) @example([False, True, False, False], [3], None) @example([False, False, True, False], [3], None) @example([False, False, False, True], [3], None) @settings(deadline=None) @given(lists(booleans()) | single_bool_lists(), lists(integers(1, 3)), random_module()) def test_failure_sequence_inducing(building, testing, rnd): buildit = iter(building) testit = iter(testing) def build(x): try: assume(not next(buildit)) except StopIteration: pass return x @given(integers().map(build)) @settings( verbosity=Verbosity.quiet, database=None, suppress_health_check=HealthCheck.all(), phases=no_shrink, ) def test(x): try: i = next(testit) except StopIteration: return if i == 1: return elif i == 2: reject() else: raise Nope() try: test() except (Nope, Unsatisfiable, Flaky): pass except UnsatisfiedAssumption: raise SatisfyMe() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_float_nastiness.py000066400000000000000000000224431354103617500323360ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import math import sys import warnings from decimal import Decimal import pytest import hypothesis.strategies as st from hypothesis import assume, given from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import CAN_PACK_HALF_FLOAT, WINDOWS from hypothesis.internal.floats import ( float_to_int, int_to_float, is_negative, next_down, next_up, ) from tests.common.debug import find_any, minimal from tests.common.utils import checks_deprecated_behaviour try: import numpy except ImportError: numpy = None @pytest.mark.parametrize( ("lower", "upper"), [ # Exact values don't matter, but they're large enough so that x + y = inf. (9.9792015476736e291, 1.7976931348623157e308), (-sys.float_info.max, sys.float_info.max), ], ) def test_floats_are_in_range(lower, upper): @given(st.floats(lower, upper)) def test_is_in_range(t): assert lower <= t <= upper test_is_in_range() @pytest.mark.parametrize("sign", [-1, 1]) def test_can_generate_both_zeros(sign): assert minimal(st.floats(), lambda x: math.copysign(1, x) == sign) == sign * 0.0 @pytest.mark.parametrize( (u"l", u"r"), [(-1.0, 1.0), (-0.0, 1.0), (-1.0, 0.0), (-sys.float_info.min, sys.float_info.min)], ) @pytest.mark.parametrize("sign", [-1, 1]) def test_can_generate_both_zeros_when_in_interval(l, r, sign): assert minimal(st.floats(l, r), lambda x: math.copysign(1, x) == sign) == sign * 0.0 @given(st.floats(0.0, 1.0)) def test_does_not_generate_negative_if_right_boundary_is_positive(x): assert math.copysign(1, x) == 1 @given(st.floats(-1.0, -0.0)) def test_does_not_generate_positive_if_right_boundary_is_negative(x): assert math.copysign(1, x) == -1 def test_half_bounded_generates_zero(): find_any(st.floats(min_value=-1.0), lambda x: x == 0.0) find_any(st.floats(max_value=1.0), lambda x: x == 0.0) @pytest.mark.xfail( WINDOWS, reason=("Seems to be triggering a floating point bug on 2.7 + windows + x64"), ) @given(st.floats(max_value=-0.0)) def test_half_bounded_respects_sign_of_upper_bound(x): assert math.copysign(1, x) == -1 @given(st.floats(min_value=0.0)) def test_half_bounded_respects_sign_of_lower_bound(x): assert math.copysign(1, x) == 1 @given(st.floats(allow_nan=False)) def test_filter_nan(x): assert not math.isnan(x) @given(st.floats(allow_infinity=False)) def test_filter_infinity(x): assert not math.isinf(x) def test_can_guard_against_draws_of_nan(): """In this test we create a NaN value that naturally "tries" to shrink into the first strategy, where it is not permitted. This tests a case that is very unlikely to happen in random generation: When the unconstrained first branch of generating a float just happens to produce a NaN value. Here what happens is that we get a NaN from the *second* strategy, but this then shrinks into its unconstrained branch. The natural thing to happen is then to try to zero the branch parameter of the one_of, but that will put an illegal value there, so it's not allowed to happen. """ tagged_floats = st.one_of( st.tuples(st.just(0), st.floats(allow_nan=False)), st.tuples(st.just(1), st.floats(allow_nan=True)), ) tag, f = minimal(tagged_floats, lambda x: math.isnan(x[1])) assert tag == 1 def test_very_narrow_interval(): upper_bound = -1.0 lower_bound = int_to_float(float_to_int(upper_bound) + 10) assert lower_bound < upper_bound @given(st.floats(lower_bound, upper_bound)) def test(f): assert lower_bound <= f <= upper_bound test() @given(st.floats()) def test_up_means_greater(x): hi = next_up(x) if not x < hi: assert ( (math.isnan(x) and math.isnan(hi)) or (x > 0 and math.isinf(x)) or (x == hi == 0 and is_negative(x) and not is_negative(hi)) ) @given(st.floats()) def test_down_means_lesser(x): lo = next_down(x) if not x > lo: assert ( (math.isnan(x) and math.isnan(lo)) or (x < 0 and math.isinf(x)) or (x == lo == 0 and is_negative(lo) and not is_negative(x)) ) @given(st.floats(allow_nan=False, allow_infinity=False)) def test_updown_roundtrip(val): assert val == next_up(next_down(val)) assert val == next_down(next_up(val)) @checks_deprecated_behaviour @pytest.mark.parametrize("xhi", [True, False]) @pytest.mark.parametrize("xlo", [True, False]) @given(st.data(), st.floats(allow_nan=False, allow_infinity=False).filter(bool)) def test_floats_in_tiny_interval_within_bounds(xlo, xhi, data, center): assume(not (math.isinf(next_down(center)) or math.isinf(next_up(center)))) lo = Decimal.from_float(next_down(center)).next_plus() hi = Decimal.from_float(next_up(center)).next_minus() assert float(lo) < lo < center < hi < float(hi) val = data.draw(st.floats(lo, hi, exclude_min=xlo, exclude_max=xhi)) assert lo < val < hi @checks_deprecated_behaviour def test_float_free_interval_is_invalid(): lo = (2 ** 54) + 1 hi = lo + 2 assert float(lo) < lo < hi < float(hi), "There are no floats in [lo .. hi]" with pytest.raises(InvalidArgument): st.floats(lo, hi).example() @given(st.floats(width=32, allow_infinity=False)) def test_float32_can_exclude_infinity(x): assert not math.isinf(x) @pytest.mark.skipif(not (numpy or CAN_PACK_HALF_FLOAT), reason="dependency") @given(st.floats(width=32, allow_infinity=False)) def test_float16_can_exclude_infinity(x): assert not math.isinf(x) @pytest.mark.parametrize( "kwargs", [ dict(min_value=10 ** 5, width=16), dict(max_value=10 ** 5, width=16), dict(min_value=10 ** 40, width=32), dict(max_value=10 ** 40, width=32), dict(min_value=10 ** 400, width=64), dict(max_value=10 ** 400, width=64), dict(min_value=10 ** 400), dict(max_value=10 ** 400), ], ) def test_out_of_range(kwargs): if kwargs.get("width") == 16 and not (CAN_PACK_HALF_FLOAT or numpy): pytest.skip() with pytest.raises(OverflowError): st.floats(**kwargs).validate() def test_invalidargument_iff_half_float_unsupported(): if numpy is None and not CAN_PACK_HALF_FLOAT: with pytest.raises(InvalidArgument): st.floats(width=16).validate() else: st.floats(width=16).validate() def test_disallowed_width(): with pytest.raises(InvalidArgument): st.floats(width=128).validate() def test_no_single_floats_in_range(): low = 2.0 ** 25 + 1 high = low + 2 st.floats(low, high).validate() # Note: OK for 64bit floats with pytest.raises(InvalidArgument): """Unrepresentable bounds are deprecated; but we're not testing that here.""" with warnings.catch_warnings(): warnings.simplefilter("ignore") st.floats(low, high, width=32).validate() # If the floats() strategy adds random floats to a value as large as 10^304 # without handling overflow, we are very likely to generate infinity. @given(st.floats(min_value=1e304, allow_infinity=False)) def test_finite_min_bound_does_not_overflow(x): assert not math.isinf(x) @given(st.floats(max_value=-1e304, allow_infinity=False)) def test_finite_max_bound_does_not_overflow(x): assert not math.isinf(x) @given(st.floats(0, 1, exclude_min=True, exclude_max=True)) def test_can_exclude_endpoints(x): assert 0 < x < 1 @given(st.floats(float("-inf"), -1e307, exclude_min=True)) def test_can_exclude_neg_infinite_endpoint(x): assert not math.isinf(x) @given(st.floats(1e307, float("inf"), exclude_max=True)) def test_can_exclude_pos_infinite_endpoint(x): assert not math.isinf(x) def test_exclude_infinite_endpoint_is_invalid(): with pytest.raises(InvalidArgument): st.floats(min_value=float("inf"), exclude_min=True).validate() with pytest.raises(InvalidArgument): st.floats(max_value=float("-inf"), exclude_max=True).validate() @pytest.mark.parametrize("lo,hi", [(True, False), (False, True), (True, True)]) @given(bound=st.floats(allow_nan=False, allow_infinity=False).filter(bool)) def test_exclude_entire_interval(lo, hi, bound): with pytest.raises(InvalidArgument, match="exclude_min=.+ and exclude_max="): st.floats(bound, bound, exclude_min=lo, exclude_max=hi).validate() def test_exclude_zero_interval(): st.floats(-0.0, 0.0).validate() st.floats(-0.0, 0.0, exclude_min=True).validate() st.floats(-0.0, 0.0, exclude_max=True).validate() @checks_deprecated_behaviour def test_inverse_zero_interval_is_deprecated(): st.floats(0.0, -0.0).validate() st.floats(-0.0, 0.0, exclude_min=True, exclude_max=True).validate() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_float_utils.py000066400000000000000000000024071354103617500314650ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import math import pytest from hypothesis.internal.floats import count_between_floats, next_down, next_up def test_can_handle_straddling_zero(): assert count_between_floats(-0.0, 0.0) == 2 @pytest.mark.parametrize( "func,val", [ (next_up, float("nan")), (next_up, float("inf")), (next_up, -0.0), (next_down, float("nan")), (next_down, float("-inf")), (next_down, 0.0), ], ) def test_next_float_equal(func, val): if math.isnan(val): assert math.isnan(func(val)) else: assert func(val) == val hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_functions.py000066400000000000000000000053411354103617500311500ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import given from hypothesis.errors import InvalidArgument, InvalidState from hypothesis.internal.compat import getfullargspec from hypothesis.strategies import booleans, functions def func_a(): pass @given(functions(func_a, booleans())) def test_functions_no_args(f): assert f.__name__ == "func_a" assert f is not func_a assert isinstance(f(), bool) def func_b(a, b, c): pass @given(functions(func_b, booleans())) def test_functions_with_args(f): assert f.__name__ == "func_b" assert f is not func_b with pytest.raises(TypeError): f() assert isinstance(f(1, 2, 3), bool) def func_c(**kwargs): pass @given(functions(func_c, booleans())) def test_functions_kw_args(f): assert f.__name__ == "func_c" assert f is not func_c with pytest.raises(TypeError): f(1, 2, 3) assert isinstance(f(a=1, b=2, c=3), bool) @given(functions(lambda: None, booleans())) def test_functions_argless_lambda(f): assert f.__name__ == "" with pytest.raises(TypeError): f(1) assert isinstance(f(), bool) @given(functions(lambda a: None, booleans())) def test_functions_lambda_with_arg(f): assert f.__name__ == "" with pytest.raises(TypeError): f() assert isinstance(f(1), bool) @pytest.mark.parametrize("like,returns", [(None, booleans()), (lambda: None, None)]) def test_invalid_arguments(like, returns): with pytest.raises(InvalidArgument): functions(like, returns).example() def test_functions_valid_within_given_invalid_outside(): cache = [None] @given(functions()) def t(f): assert f() is None cache[0] = f t() with pytest.raises(InvalidState): cache[0]() def test_can_call_default_like_arg(): # This test is somewhat silly, but coverage complains about the uncovered # branch for calling it otherwise and alternative workarounds are worse. like, returns = getfullargspec(functions).defaults assert like() is None assert returns.example() is None hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_given_error_conditions.py000066400000000000000000000036771354103617500337240ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import assume, given, infer, reject, settings from hypothesis.errors import InvalidArgument, Unsatisfiable from hypothesis.strategies import booleans, integers from tests.common.utils import fails_with def test_raises_unsatisfiable_if_all_false_in_finite_set(): @given(booleans()) def test_assume_false(x): reject() with pytest.raises(Unsatisfiable): test_assume_false() def test_does_not_raise_unsatisfiable_if_some_false_in_finite_set(): @given(booleans()) def test_assume_x(x): assume(x) test_assume_x() def test_error_if_has_no_hints(): @given(a=infer) def inner(a): pass with pytest.raises(InvalidArgument): inner() def test_error_if_infer_is_posarg(): @given(infer) def inner(ex): pass with pytest.raises(InvalidArgument): inner() def test_given_twice_is_an_error(): @settings(deadline=None) @given(booleans()) @given(integers()) def inner(a, b): pass with pytest.raises(InvalidArgument): inner() @fails_with(InvalidArgument) def test_given_is_not_a_class_decorator(): @given(integers()) class test_given_is_not_a_class_decorator: def __init__(self, i): pass hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_health_checks.py000066400000000000000000000116411354103617500317250ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import time import pytest from pytest import raises import hypothesis.strategies as st from hypothesis import HealthCheck, given, settings from hypothesis.control import assume from hypothesis.errors import FailedHealthCheck, InvalidArgument from hypothesis.internal.compat import int_from_bytes from hypothesis.searchstrategy.strategies import SearchStrategy from tests.common.utils import checks_deprecated_behaviour, no_shrink def test_slow_generation_fails_a_health_check(): @given(st.integers().map(lambda x: time.sleep(0.2))) def test(x): pass with raises(FailedHealthCheck): test() def test_slow_generation_inline_fails_a_health_check(): @settings(deadline=None) @given(st.data()) def test(data): data.draw(st.integers().map(lambda x: time.sleep(0.2))) with raises(FailedHealthCheck): test() def test_default_health_check_can_weaken_specific(): import random @settings(suppress_health_check=HealthCheck.all()) @given(st.lists(st.integers(), min_size=1)) def test(x): random.choice(x) test() def test_suppressing_filtering_health_check(): forbidden = set() def unhealthy_filter(x): if len(forbidden) < 200: forbidden.add(x) return x not in forbidden @given(st.integers().filter(unhealthy_filter)) def test1(x): raise ValueError() with raises(FailedHealthCheck): test1() forbidden = set() @settings(suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.too_slow]) @given(st.integers().filter(unhealthy_filter)) def test2(x): raise ValueError() with raises(ValueError): test2() def test_filtering_everything_fails_a_health_check(): @given(st.integers().filter(lambda x: False)) @settings(database=None) def test(x): pass with raises(FailedHealthCheck) as e: test() assert "filter" in e.value.args[0] class fails_regularly(SearchStrategy): def do_draw(self, data): b = int_from_bytes(data.draw_bytes(2)) assume(b == 3) print("ohai") def test_filtering_most_things_fails_a_health_check(): @given(fails_regularly()) @settings(database=None, phases=no_shrink) def test(x): pass with raises(FailedHealthCheck) as e: test() assert "filter" in e.value.args[0] def test_large_data_will_fail_a_health_check(): @given(st.none() | st.binary(min_size=10 ** 5)) @settings(database=None) def test(x): pass with raises(FailedHealthCheck) as e: test() assert "allowable size" in e.value.args[0] def test_returning_non_none_is_forbidden(): @given(st.integers()) def a(x): return 1 with raises(FailedHealthCheck): a() def test_the_slow_test_health_check_can_be_disabled(): @given(st.integers()) @settings(deadline=None) def a(x): time.sleep(1000) a() def test_the_slow_test_health_only_runs_if_health_checks_are_on(): @given(st.integers()) @settings(suppress_health_check=HealthCheck.all(), deadline=None) def a(x): time.sleep(1000) a() def test_returning_non_none_does_not_fail_if_health_check_disabled(): @given(st.integers()) @settings(suppress_health_check=HealthCheck.all()) def a(x): return 1 a() def test_large_base_example_fails_health_check(): @given(st.binary(min_size=7000, max_size=7000)) def test(b): pass with pytest.raises(FailedHealthCheck) as exc: test() assert exc.value.health_check == HealthCheck.large_base_example def test_example_that_shrinks_to_overrun_fails_health_check(): @given(st.binary(min_size=9000, max_size=9000) | st.none()) def test(b): pass with pytest.raises(FailedHealthCheck) as exc: test() assert exc.value.health_check == HealthCheck.large_base_example def test_it_is_an_error_to_suppress_non_iterables(): with raises(InvalidArgument): settings(suppress_health_check=1) @checks_deprecated_behaviour def test_hung_test_is_deprecated(): settings(suppress_health_check=[HealthCheck.hung_test]) def test_it_is_an_error_to_suppress_non_healthchecks(): with raises(InvalidArgument): settings(suppress_health_check=[1]) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_internal_helpers.py000066400000000000000000000016161354103617500324770ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis.internal.floats import sign def test_sign_gives_good_type_error(): x = "foo" with pytest.raises(TypeError) as e: sign(x) assert repr(x) in e.value.args[0] hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_intervalset.py000066400000000000000000000057641354103617500315110ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import assume, example, given from hypothesis.internal.charmap import _subtract_intervals from hypothesis.internal.intervalsets import IntervalSet def build_intervals(ls): ls.sort() result = [] for u, l in ls: v = u + l if result: a, b = result[-1] if u <= b + 1: result[-1] = (a, v) continue result.append((u, v)) return result IntervalLists = st.builds( build_intervals, st.lists(st.tuples(st.integers(0, 200), st.integers(0, 20))) ) Intervals = st.builds(IntervalSet, IntervalLists) @given(Intervals) def test_intervals_are_equivalent_to_their_lists(intervals): ls = list(intervals) assert len(ls) == len(intervals) for i in range(len(ls)): assert ls[i] == intervals[i] for i in range(1, len(ls) - 1): assert ls[-i] == intervals[-i] @given(Intervals) def test_intervals_match_indexes(intervals): ls = list(intervals) for v in ls: assert ls.index(v) == intervals.index(v) @example(intervals=IntervalSet(()), v=0) @given(Intervals, st.integers()) def test_error_for_index_of_not_present_value(intervals, v): assume(v not in intervals) with pytest.raises(ValueError): intervals.index(v) def test_validates_index(): with pytest.raises(IndexError): IntervalSet([])[1] with pytest.raises(IndexError): IntervalSet([[1, 10]])[11] with pytest.raises(IndexError): IntervalSet([[1, 10]])[-11] def test_index_above_is_index_if_present(): assert IntervalSet([[1, 10]]).index_above(1) == 0 assert IntervalSet([[1, 10]]).index_above(2) == 1 def test_index_above_is_length_if_higher(): assert IntervalSet([[1, 10]]).index_above(100) == 10 def intervals_to_set(ints): return set(IntervalSet(ints)) @example(x=[(0, 1), (3, 3)], y=[(1, 3)]) @example(x=[(0, 1)], y=[(0, 0), (1, 1)]) @example(x=[(0, 1)], y=[(1, 1)]) @given(IntervalLists, IntervalLists) def test_subtraction_of_intervals(x, y): xs = intervals_to_set(x) ys = intervals_to_set(x) assume(not xs.isdisjoint(ys)) z = _subtract_intervals(x, y) assert z == tuple(sorted(z)) for a, b in z: assert a <= b assert intervals_to_set(z) == intervals_to_set(x) - intervals_to_set(y) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_lambda_formatting.py000066400000000000000000000115661354103617500326200ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.internal.reflection import get_pretty_function_description def test_bracket_whitespace_is_striped(): assert get_pretty_function_description(lambda x: (x + 1)) == "lambda x: (x + 1)" def test_no_whitespace_before_colon_with_no_args(): assert get_pretty_function_description(eval("lambda: None")) == "lambda: " def test_can_have_unicode_in_lambda_sources(): t = lambda x: "é" not in x assert get_pretty_function_description(t) == ('lambda x: "é" not in x') # fmt: off ordered_pair = ( lambda right: [].map( lambda length: ())) # fmt: on def test_can_get_descriptions_of_nested_lambdas_with_different_names(): assert ( get_pretty_function_description(ordered_pair) == "lambda right: [].map(lambda length: ())" ) def test_does_not_error_on_unparsable_source(): # fmt: off t = [ lambda x: \ # This will break ast.parse, but the brackets are needed for the real # parser to accept this lambda x][0] # fmt: on assert get_pretty_function_description(t) == "lambda x: " def test_source_of_lambda_is_pretty(): assert get_pretty_function_description(lambda x: True) == "lambda x: True" def test_variable_names_are_not_pretty(): t = lambda x: True assert get_pretty_function_description(t) == "lambda x: True" def test_does_not_error_on_dynamically_defined_functions(): x = eval("lambda t: 1") get_pretty_function_description(x) def test_collapses_whitespace_nicely(): # fmt: off t = ( lambda x, y: 1 ) # fmt: on assert get_pretty_function_description(t) == "lambda x, y: 1" def test_is_not_confused_by_tuples(): p = (lambda x: x > 1, 2)[0] assert get_pretty_function_description(p) == "lambda x: x > 1" def test_strips_comments_from_the_end(): t = lambda x: 1 # A lambda comment assert get_pretty_function_description(t) == "lambda x: 1" def test_does_not_strip_hashes_within_a_string(): t = lambda x: "#" assert get_pretty_function_description(t) == 'lambda x: "#"' def test_can_distinguish_between_two_lambdas_with_different_args(): a, b = (lambda x: 1, lambda y: 2) assert get_pretty_function_description(a) == "lambda x: 1" assert get_pretty_function_description(b) == "lambda y: 2" def test_does_not_error_if_it_cannot_distinguish_between_two_lambdas(): a, b = (lambda x: 1, lambda x: 2) assert "lambda x:" in get_pretty_function_description(a) assert "lambda x:" in get_pretty_function_description(b) def test_lambda_source_break_after_def_with_brackets(): # fmt: off f = (lambda n: 'aaa') # fmt: on source = get_pretty_function_description(f) assert source == "lambda n: 'aaa'" def test_lambda_source_break_after_def_with_line_continuation(): # fmt: off f = lambda n:\ 'aaa' # fmt: on source = get_pretty_function_description(f) assert source == "lambda n: 'aaa'" def arg_decorator(*s): def accept(f): return s return accept @arg_decorator(lambda x: x + 1) def plus_one(): pass @arg_decorator(lambda x: x + 1, lambda y: y * 2) def two_decorators(): pass def test_can_extract_lambda_repr_in_a_decorator(): assert get_pretty_function_description(plus_one[0]) == "lambda x: x + 1" def test_can_extract_two_lambdas_from_a_decorator_if_args_differ(): a, b = two_decorators assert get_pretty_function_description(a) == "lambda x: x + 1" assert get_pretty_function_description(b) == "lambda y: y * 2" @arg_decorator(lambda x: x + 1) def decorator_with_space(): pass def test_can_extract_lambda_repr_in_a_decorator_with_spaces(): assert get_pretty_function_description(decorator_with_space[0]) == "lambda x: x + 1" @arg_decorator(lambda: ()) def to_brackets(): pass def test_can_handle_brackets_in_decorator_argument(): assert get_pretty_function_description(to_brackets[0]) == "lambda: ()" def identity(x): return x @arg_decorator(identity(lambda x: x + 1)) def decorator_with_wrapper(): pass def test_can_handle_nested_lambda_in_decorator_argument(): assert ( get_pretty_function_description(decorator_with_wrapper[0]) == "lambda x: x + 1" ) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_map.py000066400000000000000000000020151354103617500277100ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import assume, given, strategies as st from tests.common.debug import assert_no_examples @given(st.integers().map(lambda x: assume(x % 3 != 0) and x)) def test_can_assume_in_map(x): assert x % 3 != 0 def test_assume_in_just_raises_immediately(): assert_no_examples(st.just(1).map(lambda x: assume(x == 2))) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_nothing.py000066400000000000000000000041271354103617500306070ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import given, strategies as st from hypothesis.errors import InvalidArgument from tests.common.debug import assert_no_examples, minimal def test_resampling(): x = minimal( st.lists(st.integers(), min_size=1).flatmap( lambda x: st.lists(st.sampled_from(x)) ), lambda x: len(x) >= 10 and len(set(x)) == 1, ) assert x == [0] * 10 @given(st.lists(st.nothing())) def test_list_of_nothing(xs): assert xs == [] @given(st.sets(st.nothing())) def test_set_of_nothing(xs): assert xs == set() def test_validates_min_size(): with pytest.raises(InvalidArgument): st.lists(st.nothing(), min_size=1).validate() def test_function_composition(): assert st.nothing().map(lambda x: "hi").is_empty assert st.nothing().filter(lambda x: True).is_empty assert st.nothing().flatmap(lambda x: st.integers()).is_empty def test_tuples_detect_empty_elements(): assert st.tuples(st.nothing()).is_empty def test_fixed_dictionaries_detect_empty_values(): assert st.fixed_dictionaries({"a": st.nothing()}).is_empty def test_no_examples(): assert_no_examples(st.nothing()) @pytest.mark.parametrize( "s", [ st.nothing(), st.nothing().map(lambda x: x), st.nothing().filter(lambda x: True), st.nothing().flatmap(lambda x: st.integers()), ], ) def test_empty(s): assert s.is_empty hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_numerics.py000066400000000000000000000131561354103617500307700ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import decimal from math import copysign import pytest from hypothesis import assume, given, reject, settings from hypothesis.errors import HypothesisDeprecationWarning, InvalidArgument from hypothesis.internal.floats import next_down from hypothesis.strategies import ( booleans, data, decimals, floats, fractions, integers, none, tuples, ) from tests.common.debug import find_any @given(data()) def test_fuzz_floats_bounds(data): bound = none() | floats(allow_nan=False) low, high = data.draw(tuples(bound, bound), label="low, high") if low is not None and high is not None and low > high: low, high = high, low exmin = ( low is not None and low != float("inf") and data.draw(booleans(), label="exclude_min") ) exmax = ( high is not None and high != float("-inf") and data.draw(booleans(), label="exclude_max") ) try: val = data.draw( floats(low, high, exclude_min=exmin, exclude_max=exmax), label="value" ) assume(val) # positive/negative zero is an issue except (InvalidArgument, HypothesisDeprecationWarning): assert ( (exmin and exmax and low == next_down(high)) or (low == high and (exmin or exmax)) or ( low == high == 0 and copysign(1.0, low) == 1 and copysign(1.0, high) == -1 ) ) reject() # no floats in required range if low is not None: assert low <= val if high is not None: assert val <= high if exmin: assert low != val if exmax: assert high != val @given(data()) def test_fuzz_fractions_bounds(data): denom = data.draw(none() | integers(1, 100), label="denominator") fracs = none() | fractions(max_denominator=denom) low, high = data.draw(tuples(fracs, fracs), label="low, high") if low is not None and high is not None and low > high: low, high = high, low try: val = data.draw(fractions(low, high, denom), label="value") except InvalidArgument: reject() # fractions too close for given max_denominator if low is not None: assert low <= val if high is not None: assert val <= high if denom is not None: assert 1 <= val.denominator <= denom @given(data()) def test_fuzz_decimals_bounds(data): places = data.draw(none() | integers(0, 20), label="places") finite_decs = ( decimals(allow_nan=False, allow_infinity=False, places=places) | none() ) low, high = data.draw(tuples(finite_decs, finite_decs), label="low, high") if low is not None and high is not None and low > high: low, high = high, low ctx = decimal.Context(prec=data.draw(integers(1, 100), label="precision")) try: with decimal.localcontext(ctx): strat = decimals( low, high, allow_nan=False, allow_infinity=False, places=places ) val = data.draw(strat, label="value") except InvalidArgument: reject() # decimals too close for given places if low is not None: assert low <= val if high is not None: assert val <= high if places is not None: assert val.as_tuple().exponent == -places def test_all_decimals_can_be_exact_floats(): find_any( decimals(), lambda x: assume(x.is_finite()) and decimal.Decimal(float(x)) == x ) @given(fractions(), fractions(), fractions()) def test_fraction_addition_is_well_behaved(x, y, z): assert x + y + z == y + x + z def test_decimals_include_nan(): find_any(decimals(), lambda x: x.is_nan()) def test_decimals_include_inf(): find_any(decimals(), lambda x: x.is_infinite(), settings(max_examples=10 ** 6)) @given(decimals(allow_nan=False)) def test_decimals_can_disallow_nan(x): assert not x.is_nan() @given(decimals(allow_infinity=False)) def test_decimals_can_disallow_inf(x): assert not x.is_infinite() @pytest.mark.parametrize("places", range(10)) def test_decimals_have_correct_places(places): @given(decimals(0, 10, allow_nan=False, places=places)) def inner_tst(n): assert n.as_tuple().exponent == -places inner_tst() @given(decimals(min_value="0.1", max_value="0.2", allow_nan=False, places=1)) def test_works_with_few_values(dec): assert dec in (decimal.Decimal("0.1"), decimal.Decimal("0.2")) @given(decimals(places=3, allow_nan=False, allow_infinity=False)) def test_issue_725_regression(x): pass @given(decimals(min_value="0.1", max_value="0.3")) def test_issue_739_regression(x): pass def test_consistent_decimal_error(): bad = "invalid argument to Decimal" with pytest.raises(InvalidArgument) as excinfo: decimals(bad).example() with pytest.raises(InvalidArgument) as excinfo2: with decimal.localcontext(decimal.Context(traps=[])): decimals(bad).example() assert str(excinfo.value) == str(excinfo2.value) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_one_of.py000066400000000000000000000021311354103617500303770ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from hypothesis import given from tests.common.debug import assert_no_examples def test_one_of_empty(): e = st.one_of() assert e.is_empty assert_no_examples(e) @given(st.one_of(st.integers().filter(bool))) def test_one_of_filtered(i): assert bool(i) @given(st.one_of(st.just(100).flatmap(st.integers))) def test_one_of_flatmapped(i): assert i >= 100 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_permutations.py000066400000000000000000000025761354103617500317010ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import given from hypothesis.errors import InvalidArgument from hypothesis.strategies import permutations from tests.common.debug import minimal from tests.common.utils import fails_with def test_can_find_non_trivial_permutation(): x = minimal(permutations(list(range(5))), lambda x: x[0] != 0) assert x == [1, 0, 2, 3, 4] @given(permutations(list(u"abcd"))) def test_permutation_values_are_permutations(perm): assert len(perm) == 4 assert set(perm) == set(u"abcd") @given(permutations([])) def test_empty_permutations_are_empty(xs): assert xs == [] @fails_with(InvalidArgument) def test_cannot_permute_non_sequence_types(): permutations(set()).example() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_phases.py000066400000000000000000000055261354103617500304300ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import Phase, example, given, settings from hypothesis.database import ExampleDatabase, InMemoryExampleDatabase from hypothesis.errors import InvalidArgument from tests.common.utils import checks_deprecated_behaviour @example(11) @settings(phases=(Phase.explicit,)) @given(st.integers()) def test_only_runs_explicit_examples(i): assert i == 11 @example(u"hello world") @settings(phases=(Phase.reuse, Phase.generate, Phase.shrink)) @given(st.booleans()) def test_does_not_use_explicit_examples(i): assert isinstance(i, bool) @settings(phases=(Phase.reuse, Phase.shrink)) @given(st.booleans()) def test_this_would_fail_if_you_ran_it(b): assert False @pytest.mark.parametrize( "arg,expected", [ (tuple(Phase)[::-1], tuple(Phase)), ([Phase.explicit, Phase.explicit], (Phase.explicit,)), ], ) def test_sorts_and_dedupes_phases(arg, expected): assert settings(phases=arg).phases == expected def test_phases_default_to_all(): assert settings().phases == tuple(Phase) @checks_deprecated_behaviour def test_phases_none_equals_all(): assert settings(phases=None).phases == tuple(Phase) def test_does_not_reuse_saved_examples_if_reuse_not_in_phases(): class BadDatabase(ExampleDatabase): def save(self, key, value): pass def delete(self, key, value): pass def fetch(self, key): raise ValueError() def close(self): pass @settings(database=BadDatabase(), phases=(Phase.generate,)) @given(st.integers()) def test_usage(i): pass test_usage() def test_will_save_when_reuse_not_in_phases(): database = InMemoryExampleDatabase() assert not database.data @settings(database=database, phases=(Phase.generate,)) @given(st.integers()) def test_usage(i): raise ValueError() with pytest.raises(ValueError): test_usage() saved, = [v for k, v in database.data.items() if b"coverage" not in k] assert len(saved) == 1 def test_rejects_non_phases(): with pytest.raises(InvalidArgument): settings(phases=["cabbage"]) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_pretty.py000066400000000000000000000454771354103617500305050ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER # coding: utf-8 """This file originates in the IPython project and is made use of under the following licensing terms: The IPython licensing terms IPython is licensed under the terms of the Modified BSD License (also known as New or Revised or 3-Clause BSD), as follows: Copyright (c) 2008-2014, IPython Development Team Copyright (c) 2001-2007, Fernando Perez Copyright (c) 2001, Janko Hauser Copyright (c) 2001, Nathaniel Gray All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the IPython Development Team nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from __future__ import absolute_import, division, print_function import re from collections import defaultdict, deque import pytest from hypothesis.internal.compat import PY3, PYPY, Counter, OrderedDict, a_good_encoding from hypothesis.vendor import pretty from tests.common.utils import capture_out py2_only = pytest.mark.skipif(PY3, reason="This test only runs on python 2") if PY3: from io import StringIO def unicode_to_str(x, encoding=None): return x else: from StringIO import StringIO def unicode_to_str(x, encoding=None): return x.encode(encoding or a_good_encoding()) def assert_equal(x, y): assert x == y def assert_true(x): assert x def assert_in(x, xs): assert x in xs def skip_without(mod): try: __import__(mod) return lambda f: f except ImportError: return pytest.mark.skipif(True, reason="Missing %s" % (mod,)) assert_raises = pytest.raises class MyList(object): def __init__(self, content): self.content = content def _repr_pretty_(self, p, cycle): if cycle: p.text("MyList(...)") else: with p.group(3, "MyList(", ")"): for (i, child) in enumerate(self.content): if i: p.text(",") p.breakable() else: p.breakable("") p.pretty(child) class MyDict(dict): def _repr_pretty_(self, p, cycle): p.text("MyDict(...)") class MyObj(object): def somemethod(self): pass class Dummy1(object): def _repr_pretty_(self, p, cycle): p.text("Dummy1(...)") class Dummy2(Dummy1): _repr_pretty_ = None class NoModule(object): pass NoModule.__module__ = None class Breaking(object): def _repr_pretty_(self, p, cycle): with p.group(4, "TG: ", ":"): p.text("Breaking(") p.break_() p.text(")") class BreakingRepr(object): def __repr__(self): return "Breaking(\n)" class BreakingReprParent(object): def _repr_pretty_(self, p, cycle): with p.group(4, "TG: ", ":"): p.pretty(BreakingRepr()) class BadRepr(object): def __repr__(self): return 1 / 0 def test_list(): assert pretty.pretty([]) == "[]" assert pretty.pretty([1]) == "[1]" def test_dict(): assert pretty.pretty({}) == "{}" assert pretty.pretty({1: 1}) == "{1: 1}" def test_tuple(): assert pretty.pretty(()) == "()" assert pretty.pretty((1,)) == "(1,)" assert pretty.pretty((1, 2)) == "(1, 2)" class ReprDict(dict): def __repr__(self): return "hi" def test_dict_with_custom_repr(): assert pretty.pretty(ReprDict()) == "hi" class ReprList(list): def __repr__(self): return "bye" class ReprSet(set): def __repr__(self): return "cat" def test_set_with_custom_repr(): assert pretty.pretty(ReprSet()) == "cat" def test_list_with_custom_repr(): assert pretty.pretty(ReprList()) == "bye" def test_indentation(): """Test correct indentation in groups.""" count = 40 gotoutput = pretty.pretty(MyList(range(count))) expectedoutput = "MyList(\n" + ",\n".join(" %d" % i for i in range(count)) + ")" assert_equal(gotoutput, expectedoutput) def test_dispatch(): """Test correct dispatching: The _repr_pretty_ method for MyDict must be found before the registered printer for dict.""" gotoutput = pretty.pretty(MyDict()) expectedoutput = "MyDict(...)" assert_equal(gotoutput, expectedoutput) def test_callability_checking(): """Test that the _repr_pretty_ method is tested for callability and skipped if not.""" gotoutput = pretty.pretty(Dummy2()) expectedoutput = "Dummy1(...)" assert_equal(gotoutput, expectedoutput) def test_sets(): """Test that set and frozenset use Python 3 formatting.""" objects = [ set(), frozenset(), {1}, frozenset([1]), {1, 2}, frozenset([1, 2]), {-1, -2, -3}, ] expected = [ "set()", "frozenset()", "{1}", "frozenset({1})", "{1, 2}", "frozenset({1, 2})", "{-3, -2, -1}", ] for obj, expected_output in zip(objects, expected): got_output = pretty.pretty(obj) assert_equal(got_output, expected_output) def test_unsortable_set(): xs = {1, 2, 3, "foo", "bar", "baz", object()} p = pretty.pretty(xs) for x in xs: assert pretty.pretty(x) in p def test_unsortable_dict(): xs = {k: 1 for k in [1, 2, 3, "foo", "bar", "baz", object()]} p = pretty.pretty(xs) for x in xs: assert pretty.pretty(x) in p @skip_without("xxlimited") def test_pprint_heap_allocated_type(): """Test that pprint works for heap allocated types.""" import xxlimited output = pretty.pretty(xxlimited.Null) assert_equal(output, "xxlimited.Null") def test_pprint_nomod(): """Test that pprint works for classes with no __module__.""" output = pretty.pretty(NoModule) assert_equal(output, "NoModule") def test_pprint_break(): """Test that p.break_ produces expected output.""" output = pretty.pretty(Breaking()) expected = "TG: Breaking(\n ):" assert_equal(output, expected) def test_pprint_break_repr(): """Test that p.break_ is used in repr.""" output = pretty.pretty(BreakingReprParent()) expected = "TG: Breaking(\n ):" assert_equal(output, expected) def test_bad_repr(): """Don't catch bad repr errors.""" with assert_raises(ZeroDivisionError): pretty.pretty(BadRepr()) class BadException(Exception): def __str__(self): return -1 class ReallyBadRepr(object): __module__ = 1 @property def __class__(self): raise ValueError("I am horrible") def __repr__(self): raise BadException() def test_really_bad_repr(): with assert_raises(BadException): pretty.pretty(ReallyBadRepr()) class SA(object): pass class SB(SA): pass try: super(SA).__self__ def test_super_repr(): output = pretty.pretty(super(SA)) assert_in("SA", output) sb = SB() output = pretty.pretty(super(SA, sb)) assert_in("SA", output) except AttributeError: def test_super_repr(): pretty.pretty(super(SA)) sb = SB() pretty.pretty(super(SA, sb)) def test_long_list(): lis = list(range(10000)) p = pretty.pretty(lis) last2 = p.rsplit("\n", 2)[-2:] assert_equal(last2, [" 999,", " ...]"]) def test_long_set(): s = set(range(10000)) p = pretty.pretty(s) last2 = p.rsplit("\n", 2)[-2:] assert_equal(last2, [" 999,", " ...}"]) def test_long_tuple(): tup = tuple(range(10000)) p = pretty.pretty(tup) last2 = p.rsplit("\n", 2)[-2:] assert_equal(last2, [" 999,", " ...)"]) def test_long_dict(): d = {n: n for n in range(10000)} p = pretty.pretty(d) last2 = p.rsplit("\n", 2)[-2:] assert_equal(last2, [" 999: 999,", " ...}"]) def test_unbound_method(): output = pretty.pretty(MyObj.somemethod) assert_in("MyObj.somemethod", output) class MetaClass(type): def __new__(cls, name): return type.__new__(cls, name, (object,), {"name": name}) def __repr__(self): return "[CUSTOM REPR FOR CLASS %s]" % self.name ClassWithMeta = MetaClass("ClassWithMeta") def test_metaclass_repr(): output = pretty.pretty(ClassWithMeta) assert_equal(output, "[CUSTOM REPR FOR CLASS ClassWithMeta]") def test_unicode_repr(): u = u"üniçodé" ustr = unicode_to_str(u) class C(object): def __repr__(self): return ustr c = C() p = pretty.pretty(c) assert_equal(p, u) p = pretty.pretty([c]) assert_equal(p, u"[%s]" % u) def test_basic_class(): def type_pprint_wrapper(obj, p, cycle): if obj is MyObj: type_pprint_wrapper.called = True return pretty._type_pprint(obj, p, cycle) type_pprint_wrapper.called = False stream = StringIO() printer = pretty.RepresentationPrinter(stream) printer.type_pprinters[type] = type_pprint_wrapper printer.pretty(MyObj) printer.flush() output = stream.getvalue() assert_equal(output, "%s.MyObj" % __name__) assert_true(type_pprint_wrapper.called) # This is only run on Python 2 because in Python 3 the language prevents you # from setting a non-unicode value for __qualname__ on a metaclass, and it # doesn't respect the descriptor protocol if you subclass unicode and implement # __get__. @py2_only def test_fallback_to__name__on_type(): # Test that we correctly repr types that have non-string values for # __qualname__ by falling back to __name__ class Type(object): __qualname__ = 5 # Test repring of the type. stream = StringIO() printer = pretty.RepresentationPrinter(stream) printer.pretty(Type) printer.flush() output = stream.getvalue() # If __qualname__ is malformed, we should fall back to __name__. expected = ".".join([__name__, Type.__name__]) assert_equal(output, expected) # Clear stream buffer. stream.buf = "" # Test repring of an instance of the type. instance = Type() printer.pretty(instance) printer.flush() output = stream.getvalue() # Should look like: # prefix = "<" + ".".join([__name__, Type.__name__]) + " at 0x" assert_true(output.startswith(prefix)) @py2_only def test_fail_gracefully_on_bogus__qualname__and__name__(): # Test that we correctly repr types that have non-string values for both # __qualname__ and __name__ class Meta(type): __name__ = 5 class Type(object): __metaclass__ = Meta __qualname__ = 5 stream = StringIO() printer = pretty.RepresentationPrinter(stream) printer.pretty(Type) printer.flush() output = stream.getvalue() # If we can't find __name__ or __qualname__ just use a sentinel string. expected = ".".join([__name__, ""]) assert_equal(output, expected) # Clear stream buffer. stream.buf = "" # Test repring of an instance of the type. instance = Type() printer.pretty(instance) printer.flush() output = stream.getvalue() # Should look like: # at 0x7f7658ae07d0> prefix = "<" + ".".join([__name__, ""]) + " at 0x" assert_true(output.startswith(prefix)) def test_collections_defaultdict(): # Create defaultdicts with cycles a = defaultdict() a.default_factory = a b = defaultdict(list) b["key"] = b # Dictionary order cannot be relied on, test against single keys. cases = [ (defaultdict(list), "defaultdict(list, {})"), ( defaultdict(list, {"key": "-" * 50}), "defaultdict(list,\n" " {'key': '-----------------------------------------" "---------'})", ), (a, "defaultdict(defaultdict(...), {})"), (b, "defaultdict(list, {'key': defaultdict(...)})"), ] for obj, expected in cases: assert_equal(pretty.pretty(obj), expected) @pytest.mark.skipif(PY3 and PYPY, reason="slightly different on PyPy3") def test_collections_ordereddict(): # Create OrderedDict with cycle a = OrderedDict() a["key"] = a cases = [ (OrderedDict(), "OrderedDict()"), ( OrderedDict((i, i) for i in range(1000, 1010)), "OrderedDict([(1000, 1000),\n" " (1001, 1001),\n" " (1002, 1002),\n" " (1003, 1003),\n" " (1004, 1004),\n" " (1005, 1005),\n" " (1006, 1006),\n" " (1007, 1007),\n" " (1008, 1008),\n" " (1009, 1009)])", ), (a, "OrderedDict([('key', OrderedDict(...))])"), ] for obj, expected in cases: assert_equal(pretty.pretty(obj), expected) def test_collections_deque(): # Create deque with cycle a = deque() a.append(a) cases = [ (deque(), "deque([])"), ( deque(i for i in range(1000, 1020)), "deque([1000,\n" " 1001,\n" " 1002,\n" " 1003,\n" " 1004,\n" " 1005,\n" " 1006,\n" " 1007,\n" " 1008,\n" " 1009,\n" " 1010,\n" " 1011,\n" " 1012,\n" " 1013,\n" " 1014,\n" " 1015,\n" " 1016,\n" " 1017,\n" " 1018,\n" " 1019])", ), (a, "deque([deque(...)])"), ] for obj, expected in cases: assert_equal(pretty.pretty(obj), expected) def test_collections_counter(): class MyCounter(Counter): pass cases = [ (Counter(), "Counter()"), (Counter(a=1), "Counter({'a': 1})"), (MyCounter(a=1), "MyCounter({'a': 1})"), ] for obj, expected in cases: assert_equal(pretty.pretty(obj), expected) def test_cyclic_list(): x = [] x.append(x) assert pretty.pretty(x) == "[[...]]" def test_cyclic_dequeue(): x = deque() x.append(x) assert pretty.pretty(x) == "deque([deque(...)])" class HashItAnyway(object): def __init__(self, value): self.value = value def __hash__(self): return 0 def __eq__(self, other): return isinstance(other, HashItAnyway) and self.value == other.value def __ne__(self, other): return not self.__eq__(other) def _repr_pretty_(self, pretty, cycle): pretty.pretty(self.value) def test_cyclic_counter(): c = Counter() k = HashItAnyway(c) c[k] = 1 assert pretty.pretty(c) == "Counter({Counter(...): 1})" def test_cyclic_dict(): x = {} k = HashItAnyway(x) x[k] = x assert pretty.pretty(x) == "{{...}: {...}}" def test_cyclic_set(): x = set() x.add(HashItAnyway(x)) assert pretty.pretty(x) == "{{...}}" def test_pprint(): t = {"hi": 1} with capture_out() as o: pretty.pprint(t) assert o.getvalue().strip() == pretty.pretty(t) class BigList(list): def _repr_pretty_(self, printer, cycle): if cycle: return "[...]" else: with printer.group(open="[", close="]"): with printer.indent(5): for v in self: printer.pretty(v) printer.breakable(",") def test_print_with_indent(): pretty.pretty(BigList([1, 2, 3])) class MyException(Exception): pass def test_exception(): assert pretty.pretty(ValueError("hi")) == "ValueError('hi')" assert pretty.pretty(ValueError("hi", "there")) == "ValueError('hi', 'there')" assert "test_pretty." in pretty.pretty(MyException()) def test_re_evals(): for r in [ re.compile(r"hi"), re.compile(r"b\nc", re.MULTILINE), re.compile(br"hi", 0), re.compile(u"foo", re.MULTILINE | re.UNICODE), ]: r2 = eval(pretty.pretty(r), globals()) assert r.pattern == r2.pattern and r.flags == r2.flags class CustomStuff(object): def __init__(self): self.hi = 1 self.bye = "fish" self.spoon = self @property def oops(self): raise AttributeError("Nope") def squirrels(self): pass def test_custom(): assert "bye" not in pretty.pretty(CustomStuff()) assert "bye=" in pretty.pretty(CustomStuff(), verbose=True) assert "squirrels" not in pretty.pretty(CustomStuff(), verbose=True) def test_print_builtin_function(): assert pretty.pretty(abs) == "" def test_pretty_function(): assert "." in pretty.pretty(test_pretty_function) def test_empty_printer(): printer = pretty.RepresentationPrinter( pretty.CUnicodeIO(), singleton_pprinters={}, type_pprinters={int: pretty._repr_pprint, list: pretty._repr_pprint}, deferred_pprinters={}, ) printer.pretty([1, 2, 3]) assert printer.output.getvalue() == u"[1, 2, 3]" def test_breakable_at_group_boundary(): assert "\n" in pretty.pretty([[], "000000"], max_width=5) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_provisional_strategies.py000066400000000000000000000047431354103617500337440ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import re import string from binascii import unhexlify import pytest from hypothesis import given from hypothesis.errors import InvalidArgument from hypothesis.provisional import domains, ip4_addr_strings, ip6_addr_strings, urls @given(urls()) def test_is_URL(url): allowed_chars = set(string.ascii_letters + string.digits + "$-_.+!*'(),%/") url_schemeless = url.split("://", 1)[1] path = url_schemeless.split("/", 1)[1] if "/" in url_schemeless else "" assert all(c in allowed_chars for c in path) assert all( re.match("^[0-9A-Fa-f]{2}", after_perc) for after_perc in path.split("%")[1:] ) @given(ip4_addr_strings()) def test_is_IP4_addr(address): as_num = [int(n) for n in address.split(".")] assert len(as_num) == 4 assert all(0 <= n <= 255 for n in as_num) @given(ip6_addr_strings()) def test_is_IP6_addr(address): # Works for non-normalised addresses produced by this strategy, but not # a particularly general test assert address == address.upper() as_hex = address.split(":") assert len(as_hex) == 8 assert all(len(part) == 4 for part in as_hex) raw = unhexlify(address.replace(u":", u"").encode("ascii")) assert len(raw) == 16 @pytest.mark.parametrize("max_length", [-1, 0, 3, 4.0, 256]) @pytest.mark.parametrize("max_element_length", [-1, 0, 4.0, 64, 128]) def test_invalid_domain_arguments(max_length, max_element_length): with pytest.raises(InvalidArgument): domains(max_length=max_length, max_element_length=max_element_length).example() @pytest.mark.parametrize("max_length", [None, 4, 8, 255]) @pytest.mark.parametrize("max_element_length", [None, 1, 2, 4, 8, 63]) def test_valid_domains_arguments(max_length, max_element_length): domains(max_length=max_length, max_element_length=max_element_length).example() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_random_module.py000066400000000000000000000072141354103617500317660ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import random import pytest import hypothesis.strategies as st from hypothesis import find, given, register_random, reporting from hypothesis.errors import InvalidArgument from hypothesis.internal import entropy from hypothesis.internal.entropy import deterministic_PRNG from tests.common.utils import capture_out, checks_deprecated_behaviour def test_can_seed_random(): with capture_out() as out: with reporting.with_reporter(reporting.default): with pytest.raises(AssertionError): @given(st.random_module()) def test(r): assert False test() assert "RandomSeeder(0)" in out.getvalue() @given(st.random_module(), st.random_module()) def test_seed_random_twice(r, r2): assert repr(r) == repr(r2) @given(st.random_module()) def test_does_not_fail_health_check_if_randomness_is_used(r): random.getrandbits(128) def test_cannot_register_non_Random(): with pytest.raises(InvalidArgument): register_random("not a Random instance") def test_registering_a_Random_is_idempotent(): r = random.Random() register_random(r) register_random(r) assert entropy.RANDOMS_TO_MANAGE.pop() is r assert r not in entropy.RANDOMS_TO_MANAGE def test_manages_registered_Random_instance(): r = random.Random() register_random(r) state = r.getstate() result = [] @given(st.integers()) def inner(x): v = r.random() if result: assert v == result[0] else: result.append(v) inner() assert state == r.getstate() entropy.RANDOMS_TO_MANAGE.remove(r) assert r not in entropy.RANDOMS_TO_MANAGE def test_registered_Random_is_seeded_by_random_module_strategy(): r = random.Random() register_random(r) state = r.getstate() results = set() count = [0] @given(st.integers()) def inner(x): results.add(r.random()) count[0] += 1 inner() assert count[0] > len(results) * 0.9, "too few unique random numbers" assert state == r.getstate() entropy.RANDOMS_TO_MANAGE.remove(r) assert r not in entropy.RANDOMS_TO_MANAGE @given(st.random_module()) def test_will_actually_use_the_random_seed(rnd): a = random.randint(0, 100) b = random.randint(0, 100) random.seed(rnd.seed) assert a == random.randint(0, 100) assert b == random.randint(0, 100) def test_given_does_not_pollute_state(): with deterministic_PRNG(): @given(st.random_module()) def test(r): pass test() state_a = random.getstate() test() state_b = random.getstate() assert state_a != state_b @checks_deprecated_behaviour def test_find_does_not_pollute_state(): with deterministic_PRNG(): find(st.random_module(), lambda r: True) state_a = random.getstate() find(st.random_module(), lambda r: True) state_b = random.getstate() assert state_a != state_b hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_recursive.py000066400000000000000000000033751354103617500311540ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import given from hypothesis.errors import InvalidArgument from tests.common.debug import minimal @given(st.recursive(st.booleans(), st.lists, max_leaves=10)) def test_respects_leaf_limit(xs): def flatten(x): if isinstance(x, list): return sum(map(flatten, x), []) else: return [x] assert len(flatten(xs)) <= 10 def test_can_find_nested(): x = minimal( st.recursive(st.booleans(), lambda x: st.tuples(x, x)), lambda x: isinstance(x, tuple) and isinstance(x[0], tuple), ) assert x == ((False, False), False) def test_recursive_call_validates_expand_returns_strategies(): with pytest.raises(InvalidArgument): st.recursive(st.booleans(), lambda x: 1).example() def test_recursive_call_validates_base_is_strategy(): x = st.recursive(1, lambda x: st.none()) with pytest.raises(InvalidArgument): x.example() @given(st.recursive(st.none(), lambda x: st.one_of(x, x))) def test_issue_1502_regression(s): pass hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_reflection.py000066400000000000000000000426271354103617500313020ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import sys from copy import deepcopy from functools import partial import pytest from hypothesis.internal.compat import PY2, PY3, FullArgSpec, getfullargspec from hypothesis.internal.reflection import ( arg_string, convert_keyword_arguments, convert_positional_arguments, define_function_signature, eval_directory, fully_qualified_name, function_digest, get_pretty_function_description, is_mock, proxies, required_args, source_exec_as_module, unbind_method, ) from tests.common.utils import raises try: from unittest.mock import MagicMock, Mock, NonCallableMagicMock, NonCallableMock except ImportError: from mock import MagicMock, Mock, NonCallableMagicMock, NonCallableMock def do_conversion_test(f, args, kwargs): result = f(*args, **kwargs) cargs, ckwargs = convert_keyword_arguments(f, args, kwargs) assert result == f(*cargs, **ckwargs) cargs2, ckwargs2 = convert_positional_arguments(f, args, kwargs) assert result == f(*cargs2, **ckwargs2) def test_simple_conversion(): def foo(a, b, c): return (a, b, c) assert convert_keyword_arguments(foo, (1, 2, 3), {}) == ((1, 2, 3), {}) assert convert_keyword_arguments(foo, (), {"a": 3, "b": 2, "c": 1}) == ( (3, 2, 1), {}, ) do_conversion_test(foo, (1, 0), {"c": 2}) do_conversion_test(foo, (1,), {"c": 2, "b": "foo"}) def test_populates_defaults(): def bar(x=[], y=1): pass assert convert_keyword_arguments(bar, (), {}) == (([], 1), {}) assert convert_keyword_arguments(bar, (), {"y": 42}) == (([], 42), {}) do_conversion_test(bar, (), {}) do_conversion_test(bar, (1,), {}) def test_leaves_unknown_kwargs_in_dict(): def bar(x, **kwargs): pass assert convert_keyword_arguments(bar, (1,), {"foo": "hi"}) == ((1,), {"foo": "hi"}) assert convert_keyword_arguments(bar, (), {"x": 1, "foo": "hi"}) == ( (1,), {"foo": "hi"}, ) do_conversion_test(bar, (1,), {}) do_conversion_test(bar, (), {"x": 1, "y": 1}) def test_errors_on_bad_kwargs(): def bar(): pass with raises(TypeError): convert_keyword_arguments(bar, (), {"foo": 1}) def test_passes_varargs_correctly(): def foo(*args): pass assert convert_keyword_arguments(foo, (1, 2, 3), {}) == ((1, 2, 3), {}) do_conversion_test(foo, (1, 2, 3), {}) def test_errors_if_keyword_precedes_positional(): def foo(x, y): pass with raises(TypeError): convert_keyword_arguments(foo, (1,), {"x": 2}) def test_errors_if_not_enough_args(): def foo(a, b, c, d=1): pass with raises(TypeError): convert_keyword_arguments(foo, (1, 2), {"d": 4}) def test_errors_on_extra_kwargs(): def foo(a): pass with raises(TypeError) as e: convert_keyword_arguments(foo, (1,), {"b": 1}) assert "keyword" in e.value.args[0] with raises(TypeError) as e2: convert_keyword_arguments(foo, (1,), {"b": 1, "c": 2}) assert "keyword" in e2.value.args[0] def test_positional_errors_if_too_many_args(): def foo(a): pass with raises(TypeError) as e: convert_positional_arguments(foo, (1, 2), {}) assert "2 given" in e.value.args[0] def test_positional_errors_if_too_few_args(): def foo(a, b, c): pass with raises(TypeError): convert_positional_arguments(foo, (1, 2), {}) def test_positional_does_not_error_if_extra_args_are_kwargs(): def foo(a, b, c): pass convert_positional_arguments(foo, (1, 2), {"c": 3}) def test_positional_errors_if_given_bad_kwargs(): def foo(a): pass with raises(TypeError) as e: convert_positional_arguments(foo, (), {"b": 1}) assert "unexpected keyword argument" in e.value.args[0] def test_positional_errors_if_given_duplicate_kwargs(): def foo(a): pass with raises(TypeError) as e: convert_positional_arguments(foo, (2,), {"a": 1}) assert "multiple values" in e.value.args[0] def test_names_of_functions_are_pretty(): assert ( get_pretty_function_description(test_names_of_functions_are_pretty) == "test_names_of_functions_are_pretty" ) class Foo(object): @classmethod def bar(cls): pass def baz(cls): pass def __repr__(self): return "SoNotFoo()" def test_class_names_are_not_included_in_class_method_prettiness(): assert get_pretty_function_description(Foo.bar) == "bar" def test_repr_is_included_in_bound_method_prettiness(): assert get_pretty_function_description(Foo().baz) == "SoNotFoo().baz" def test_class_is_not_included_in_unbound_method(): assert get_pretty_function_description(Foo.baz) == "baz" def test_does_not_error_on_confused_sources(): def ed(f, *args): return f x = ed( lambda x, y: (x * y).conjugate() == x.conjugate() * y.conjugate(), complex, complex, ) get_pretty_function_description(x) def test_digests_are_reasonably_unique(): assert function_digest(test_simple_conversion) != function_digest( test_does_not_error_on_confused_sources ) def test_digest_returns_the_same_value_for_two_calls(): assert function_digest(test_simple_conversion) == function_digest( test_simple_conversion ) def test_can_digest_a_built_in_function(): import math assert function_digest(math.isnan) != function_digest(range) def test_can_digest_a_unicode_lambda(): function_digest(lambda x: "☃" in str(x)) def test_can_digest_a_function_with_no_name(): def foo(x, y): pass function_digest(partial(foo, 1)) def test_arg_string_is_in_order(): def foo(c, a, b, f, a1): pass assert arg_string(foo, (1, 2, 3, 4, 5), {}) == "c=1, a=2, b=3, f=4, a1=5" assert ( arg_string(foo, (1, 2), {"b": 3, "f": 4, "a1": 5}) == "c=1, a=2, b=3, f=4, a1=5" ) def test_varkwargs_are_sorted_and_after_real_kwargs(): def foo(d, e, f, **kwargs): pass assert ( arg_string(foo, (), {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6}) == "d=4, e=5, f=6, a=1, b=2, c=3" ) def test_varargs_come_without_equals(): def foo(a, *args): pass assert arg_string(foo, (1, 2, 3, 4), {}) == "2, 3, 4, a=1" def test_can_mix_varargs_and_varkwargs(): def foo(*args, **kwargs): pass assert arg_string(foo, (1, 2, 3), {"c": 7}) == "1, 2, 3, c=7" def test_arg_string_does_not_include_unprovided_defaults(): def foo(a, b, c=9, d=10): pass assert arg_string(foo, (1,), {"b": 1, "d": 11}) == "a=1, b=1, d=11" class A(object): def f(self): pass def g(self): pass class B(A): pass class C(A): def f(self): pass def test_unbind_gives_parent_class_function(): assert unbind_method(B().f) == unbind_method(A.f) def test_unbind_distinguishes_different_functions(): assert unbind_method(A.f) != unbind_method(A.g) def test_unbind_distinguishes_overridden_functions(): assert unbind_method(C().f) != unbind_method(A.f) def universal_acceptor(*args, **kwargs): return args, kwargs def has_one_arg(hello): pass def has_two_args(hello, world): pass def has_a_default(x, y, z=1): pass def has_varargs(*args): pass def has_kwargs(**kwargs): pass @pytest.mark.parametrize("f", [has_one_arg, has_two_args, has_varargs, has_kwargs]) def test_copying_preserves_argspec(f): af = getfullargspec(f) t = define_function_signature("foo", "docstring", af)(universal_acceptor) at = getfullargspec(t) assert af.args == at.args assert af.varargs == at.varargs assert af.varkw == at.varkw assert len(af.defaults or ()) == len(at.defaults or ()) assert af.kwonlyargs == at.kwonlyargs assert af.kwonlydefaults == at.kwonlydefaults assert af.annotations == at.annotations def test_name_does_not_clash_with_function_names(): def f(): pass @define_function_signature("f", "A docstring for f", getfullargspec(f)) def g(): pass g() def test_copying_sets_name(): f = define_function_signature( "hello_world", "A docstring for hello_world", getfullargspec(has_two_args) )(universal_acceptor) assert f.__name__ == "hello_world" def test_copying_sets_docstring(): f = define_function_signature( "foo", "A docstring for foo", getfullargspec(has_two_args) )(universal_acceptor) assert f.__doc__ == "A docstring for foo" def test_uses_defaults(): f = define_function_signature( "foo", "A docstring for foo", getfullargspec(has_a_default) )(universal_acceptor) assert f(3, 2) == ((3, 2, 1), {}) def test_uses_varargs(): f = define_function_signature( "foo", "A docstring for foo", getfullargspec(has_varargs) )(universal_acceptor) assert f(1, 2) == ((1, 2), {}) DEFINE_FOO_FUNCTION = """ def foo(x): return x """ def test_exec_as_module_execs(): m = source_exec_as_module(DEFINE_FOO_FUNCTION) assert m.foo(1) == 1 def test_exec_as_module_caches(): assert source_exec_as_module(DEFINE_FOO_FUNCTION) is source_exec_as_module( DEFINE_FOO_FUNCTION ) def test_exec_leaves_sys_path_unchanged(): old_path = deepcopy(sys.path) source_exec_as_module("hello_world = 42") assert sys.path == old_path def test_define_function_signature_works_with_conflicts(): def accepts_everything(*args, **kwargs): pass define_function_signature( "hello", "A docstring for hello", FullArgSpec( args=("f",), varargs=None, varkw=None, defaults=None, kwonlyargs=[], kwonlydefaults=None, annotations={}, ), )(accepts_everything)(1) define_function_signature( "hello", "A docstring for hello", FullArgSpec( args=(), varargs="f", varkw=None, defaults=None, kwonlyargs=[], kwonlydefaults=None, annotations={}, ), )(accepts_everything)(1) define_function_signature( "hello", "A docstring for hello", FullArgSpec( args=(), varargs=None, varkw="f", defaults=None, kwonlyargs=[], kwonlydefaults=None, annotations={}, ), )(accepts_everything)() define_function_signature( "hello", "A docstring for hello", FullArgSpec( args=("f", "f_3"), varargs="f_1", varkw="f_2", defaults=None, kwonlyargs=[], kwonlydefaults=None, annotations={}, ), )(accepts_everything)(1, 2) def test_define_function_signature_validates_arguments(): with raises(ValueError): define_function_signature( "hello_world", None, FullArgSpec( args=["a b"], varargs=None, varkw=None, defaults=None, kwonlyargs=[], kwonlydefaults=None, annotations={}, ), ) def test_define_function_signature_validates_function_name(): with raises(ValueError): define_function_signature( "hello world", None, FullArgSpec( args=["a", "b"], varargs=None, varkw=None, defaults=None, kwonlyargs=[], kwonlydefaults=None, annotations={}, ), ) class Container(object): def funcy(self): pass def test_fully_qualified_name(): assert ( fully_qualified_name(test_copying_preserves_argspec) == "tests.cover.test_reflection.test_copying_preserves_argspec" ) assert ( fully_qualified_name(Container.funcy) == "tests.cover.test_reflection.Container.funcy" ) assert ( fully_qualified_name(fully_qualified_name) == "hypothesis.internal.reflection.fully_qualified_name" ) def test_qualname_of_function_with_none_module_is_name(): def f(): pass f.__module__ = None assert fully_qualified_name(f)[-1] == "f" def test_can_proxy_functions_with_mixed_args_and_varargs(): def foo(a, *args): return (a, args) @proxies(foo) def bar(*args, **kwargs): return foo(*args, **kwargs) assert bar(1, 2) == (1, (2,)) def test_can_delegate_to_a_function_with_no_positional_args(): def foo(a, b): return (a, b) @proxies(foo) def bar(**kwargs): return foo(**kwargs) assert bar(2, 1) == (2, 1) @pytest.mark.parametrize( "func,args,expected", [ (lambda: None, (), None), (lambda a: a ** 2, (2,), 4), (lambda *a: a, [1, 2, 3], (1, 2, 3)), ], ) def test_can_proxy_lambdas(func, args, expected): @proxies(func) def wrapped(*args, **kwargs): return func(*args, **kwargs) assert wrapped.__name__ == "" assert wrapped(*args) == expected class Snowman(object): def __repr__(self): return "☃" class BittySnowman(object): def __repr__(self): return "☃" def test_can_handle_unicode_repr(): def foo(x): pass assert arg_string(foo, [Snowman()], {}) == "x=☃" assert arg_string(foo, [], {"x": Snowman()}) == "x=☃" class NoRepr(object): pass def test_can_handle_repr_on_type(): def foo(x): pass assert arg_string(foo, [Snowman], {}) == "x=Snowman" assert arg_string(foo, [NoRepr], {}) == "x=NoRepr" def test_can_handle_repr_of_none(): def foo(x): pass assert arg_string(foo, [None], {}) == "x=None" assert arg_string(foo, [], {"x": None}) == "x=None" if not PY3: def test_can_handle_non_unicode_repr_containing_non_ascii(): def foo(x): pass assert arg_string(foo, [BittySnowman()], {}) == "x=☃" assert arg_string(foo, [], {"x": BittySnowman()}) == "x=☃" def test_does_not_put_eval_directory_on_path(): source_exec_as_module("hello = 'world'") assert eval_directory() not in sys.path def test_kwargs_appear_in_arg_string(): def varargs(*args, **kwargs): pass assert "x=1" in arg_string(varargs, (), {"x": 1}) def test_is_mock_with_negative_cases(): assert not is_mock(None) assert not is_mock(1234) assert not is_mock(is_mock) assert not is_mock(BittySnowman()) assert not is_mock("foobar") assert not is_mock(Mock(spec=BittySnowman)) assert not is_mock(MagicMock(spec=BittySnowman)) def test_is_mock_with_positive_cases(): assert is_mock(Mock()) assert is_mock(MagicMock()) assert is_mock(NonCallableMock()) assert is_mock(NonCallableMagicMock()) class Target(object): def __init__(self, a, b): pass def method(self, a, b): pass @pytest.mark.parametrize("target", [Target, Target(1, 2).method]) @pytest.mark.parametrize( "args,kwargs,expected", [ ((), {}, set("ab")), ((1,), {}, set("b")), ((1, 2), {}, set()), ((), dict(a=1), set("b")), ((), dict(b=2), set("a")), ((), dict(a=1, b=2), set()), ], ) def test_required_args(target, args, kwargs, expected): # Mostly checking that `self` (and only self) is correctly excluded assert required_args(target, args, kwargs) == expected # fmt: off pi = "π"; is_str_pi = lambda x: x == pi # noqa: E731 # fmt: on def test_can_handle_unicode_identifier_in_same_line_as_lambda_def(): assert get_pretty_function_description(is_str_pi) == "lambda x: x == pi" @pytest.mark.skipif(PY2, reason="detect_encoding does not exist in Python 2") def test_can_render_lambda_with_no_encoding(): is_positive = lambda x: x > 0 # Monkey-patching out the `tokenize.detect_encoding` method here means # that our reflection can't detect the encoding of the source file, and # has to fall back to assuming it's ASCII. import tokenize old_detect_encoding = tokenize.detect_encoding try: del tokenize.detect_encoding assert get_pretty_function_description(is_positive) == "lambda x: x > 0" finally: tokenize.detect_encoding = old_detect_encoding @pytest.mark.skipif(PY2, reason="detect_encoding does not exist in Python 2") def test_does_not_crash_on_utf8_lambda_without_encoding(): # Monkey-patching out the `tokenize.detect_encoding` method here means # that our reflection can't detect the encoding of the source file, and # has to fall back to assuming it's ASCII. import tokenize old_detect_encoding = tokenize.detect_encoding try: del tokenize.detect_encoding assert get_pretty_function_description(is_str_pi) == "lambda x: " finally: tokenize.detect_encoding = old_detect_encoding hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_regex.py000066400000000000000000000317131354103617500302540ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import re import sys import unicodedata import pytest import hypothesis.strategies as st from hypothesis import assume, given, settings from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import PY3, hrange, hunichr from hypothesis.searchstrategy.regex import ( SPACE_CHARS, UNICODE_DIGIT_CATEGORIES, UNICODE_SPACE_CATEGORIES, UNICODE_SPACE_CHARS, UNICODE_WEIRD_NONWORD_CHARS, UNICODE_WORD_CATEGORIES, base_regex_strategy, ) from tests.common.debug import assert_all_examples, assert_no_examples, find_any def is_ascii(s): return all(ord(c) < 128 for c in s) def is_digit(s): return all(unicodedata.category(c) in UNICODE_DIGIT_CATEGORIES for c in s) def is_space(s): return all(c in SPACE_CHARS for c in s) def is_unicode_space(s): return all( unicodedata.category(c) in UNICODE_SPACE_CATEGORIES or c in UNICODE_SPACE_CHARS for c in s ) def is_word(s): return all( c == "_" or ( (PY3 or c not in UNICODE_WEIRD_NONWORD_CHARS) and unicodedata.category(c) in UNICODE_WORD_CATEGORIES ) for c in s ) def ascii_regex(pattern): flags = re.ASCII if PY3 else 0 return re.compile(pattern, flags) def unicode_regex(pattern): return re.compile(pattern, re.UNICODE) def _test_matching_pattern(pattern, isvalidchar, is_unicode=False): r = unicode_regex(pattern) if is_unicode else ascii_regex(pattern) codepoints = hrange(0, sys.maxunicode + 1) if is_unicode else hrange(1, 128) for c in [hunichr(x) for x in codepoints]: if isvalidchar(c): assert r.search(c), ( '"%s" supposed to match "%s" (%r, category "%s"), ' "but it doesn't" % (pattern, c, c, unicodedata.category(c)) ) else: assert not r.search(c), ( '"%s" supposed not to match "%s" (%r, category "%s"), ' "but it does" % (pattern, c, c, unicodedata.category(c)) ) @pytest.mark.parametrize( "category,predicate", [(r"\w", is_word), (r"\d", is_digit), (r"\s", None)] ) @pytest.mark.parametrize("invert", [False, True]) @pytest.mark.parametrize("is_unicode", [False, True]) def test_matching(category, predicate, invert, is_unicode): if predicate is None: # Special behaviour due to \x1c, INFORMATION SEPARATOR FOUR predicate = is_unicode_space if is_unicode else is_space if invert: category = category.swapcase() def pred(s): return not predicate(s) else: pred = predicate _test_matching_pattern(category, pred, is_unicode) @pytest.mark.parametrize( "pattern", [ u".", # anything u"a", u"abc", u"[a][b][c]", u"[^a][^b][^c]", # literals u"[a-z0-9_]", u"[^a-z0-9_]", # range and negative range u"ab?", u"ab*", u"ab+", # quantifiers u"ab{5}", u"ab{5,10}", u"ab{,10}", u"ab{5,}", # repeaters u"ab|cd|ef", # branch u"(foo)+", u"(['\"])[a-z]+\\1", u"(?:[a-z])(['\"])[a-z]+\\1", u"(?P['\"])[a-z]+(?P=foo)", # groups u"^abc", # beginning u"\\d", u"[\\d]", u"[^\\D]", u"\\w", u"[\\w]", u"[^\\W]", u"\\s", u"[\\s]", u"[^\\S]", # categories ], ) @pytest.mark.parametrize("encode", [False, True]) def test_can_generate(pattern, encode): if encode: pattern = pattern.encode("ascii") assert_all_examples(st.from_regex(pattern), re.compile(pattern).search) @pytest.mark.parametrize( "pattern", [ re.compile(u"\\Aa\\Z", re.IGNORECASE), u"(?i)\\Aa\\Z", re.compile(u"\\A[ab]\\Z", re.IGNORECASE), u"(?i)\\A[ab]\\Z", ], ) def test_literals_with_ignorecase(pattern): strategy = st.from_regex(pattern) find_any(strategy, lambda s: s == u"a") find_any(strategy, lambda s: s == u"A") @pytest.mark.parametrize( "pattern", [re.compile(u"\\A[^a][^b]\\Z", re.IGNORECASE), u"(?i)\\A[^a][^b]\\Z"] ) def test_not_literal_with_ignorecase(pattern): assert_all_examples( st.from_regex(pattern), lambda s: s[0] not in (u"a", u"A") and s[1] not in (u"b", u"B"), ) def test_any_doesnt_generate_newline(): assert_all_examples(st.from_regex(u"\\A.\\Z"), lambda s: s != u"\n") @pytest.mark.parametrize("pattern", [re.compile(u"\\A.\\Z", re.DOTALL), u"(?s)\\A.\\Z"]) def test_any_with_dotall_generate_newline(pattern): find_any( st.from_regex(pattern), lambda s: s == u"\n", settings(max_examples=10 ** 6) ) @pytest.mark.parametrize("pattern", [re.compile(b"\\A.\\Z", re.DOTALL), b"(?s)\\A.\\Z"]) def test_any_with_dotall_generate_newline_binary(pattern): find_any( st.from_regex(pattern), lambda s: s == b"\n", settings(max_examples=10 ** 6) ) @pytest.mark.parametrize( "pattern", [ u"\\d", u"[\\d]", u"[^\\D]", u"\\w", u"[\\w]", u"[^\\W]", u"\\s", u"[\\s]", u"[^\\S]", ], ) @pytest.mark.parametrize("is_unicode", [False, True]) @pytest.mark.parametrize("invert", [False, True]) def test_groups(pattern, is_unicode, invert): if u"d" in pattern.lower(): group_pred = is_digit elif u"w" in pattern.lower(): group_pred = is_word else: # Special behaviour due to \x1c, INFORMATION SEPARATOR FOUR group_pred = is_unicode_space if is_unicode else is_space if invert: pattern = pattern.swapcase() _p = group_pred def group_pred(s): # pylint:disable=function-redefined return not _p(s) pattern = u"^%s\\Z" % (pattern,) compiler = unicode_regex if is_unicode else ascii_regex strategy = st.from_regex(compiler(pattern)) find_any(strategy.filter(group_pred), is_ascii) if is_unicode: find_any(strategy, lambda s: group_pred(s) and not is_ascii(s)) assert_all_examples(strategy, group_pred) def test_caret_in_the_middle_does_not_generate_anything(): r = re.compile(u"a^b") assert_no_examples(st.from_regex(r)) def test_end_with_terminator_does_not_pad(): assert_all_examples(st.from_regex(u"abc\\Z"), lambda x: x[-3:] == u"abc") def test_end(): strategy = st.from_regex(u"\\Aabc$") find_any(strategy, lambda s: s == u"abc") find_any(strategy, lambda s: s == u"abc\n") def test_groupref_exists(): assert_all_examples( st.from_regex(u"^(<)?a(?(1)>)$"), lambda s: s in (u"a", u"a\n", u"", u"\n"), ) assert_all_examples( st.from_regex(u"^(a)?(?(1)b|c)$"), lambda s: s in (u"ab", u"ab\n", u"c", u"c\n") ) def test_impossible_negative_lookahead(): assert_no_examples(st.from_regex(u"(?!foo)foo")) @given(st.from_regex(u"(\\Afoo\\Z)")) def test_can_handle_boundaries_nested(s): assert s == u"foo" def test_groupref_not_shared_between_regex(): # If group references are (incorrectly!) shared between regex, this would # fail as the would only be one reference. st.tuples(st.from_regex("(a)\\1"), st.from_regex("(b)\\1")).example() @given(st.data()) def test_group_ref_is_not_shared_between_identical_regex(data): pattern = re.compile(u"^(.+)\\1\\Z", re.UNICODE) x = data.draw(base_regex_strategy(pattern)) y = data.draw(base_regex_strategy(pattern)) assume(x != y) assert pattern.match(x).end() == len(x) assert pattern.match(y).end() == len(y) @given(st.data()) def test_does_not_leak_groups(data): a = data.draw(base_regex_strategy(re.compile(u"^(a)\\Z"))) assert a == "a" b = data.draw(base_regex_strategy(re.compile(u"^(?(1)a|b)(.)\\Z"))) assert b[0] == "b" def test_positive_lookbehind(): find_any(st.from_regex(u".*(?<=ab)c"), lambda s: s.endswith(u"abc")) def test_positive_lookahead(): st.from_regex(u"a(?=bc).*").filter(lambda s: s.startswith(u"abc")).example() def test_negative_lookbehind(): # no efficient support strategy = st.from_regex(u"[abc]*(? 1000: raise ValueError() with capture_out() as o: with pytest.raises(ValueError): test() assert "@reproduce_failure" not in o.getvalue() class Foo(object): def __repr__(self): return "not a valid python expression" def test_does_not_print_reproduction_if_told_not_to(): @settings(print_blob=False) @given(st.integers().map(lambda x: Foo())) def test(i): raise ValueError() with capture_out() as o: with pytest.raises(ValueError): test() assert "@reproduce_failure" not in o.getvalue() def test_raises_invalid_if_wrong_version(): b = b"hello world" n = len(b) @reproduce_failure("1.0.0", encode_failure(b)) @given(st.binary(min_size=n, max_size=n)) def test(x): pass with pytest.raises(InvalidArgument): test() def test_does_not_print_reproduction_if_verbosity_set_to_quiet(): @given(st.data()) @settings(verbosity=Verbosity.quiet) def test_always_fails(data): assert data.draw(st.just(False)) with capture_out() as out: with pytest.raises(AssertionError): test_always_fails() assert "@reproduce_failure" not in out.getvalue() @pytest.mark.parametrize( "ps,b", [ (PrintSettings.NEVER, False), (PrintSettings.INFER, True), (PrintSettings.ALWAYS, True), ], ) @checks_deprecated_behaviour def test_converts_print_settings_to_boolean(ps, b): assert settings(print_blob=ps).print_blob is b hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_reusable_values.py000066400000000000000000000056131354103617500323230ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import example, given, reject from hypothesis.errors import HypothesisDeprecationWarning, InvalidArgument base_reusable_strategies = ( st.text(), st.binary(), st.dates(), st.times(), st.timedeltas(), st.booleans(), st.complex_numbers(), st.floats(), st.floats(-1.0, 1.0), st.integers(), st.integers(1, 10), st.integers(1), ) @st.deferred def reusable(): return st.one_of( st.sampled_from(base_reusable_strategies), st.builds( st.floats, min_value=st.none() | st.floats(), max_value=st.none() | st.floats(), allow_infinity=st.booleans(), allow_nan=st.booleans(), ), st.builds(st.just, st.builds(list)), st.builds(st.sampled_from, st.lists(st.builds(list), min_size=1)), st.lists(reusable).map(st.one_of), st.lists(reusable).map(lambda ls: st.tuples(*ls)), ) assert not reusable.is_empty @example(st.integers(min_value=1)) @given(reusable) def test_reusable_strategies_are_all_reusable(s): try: s.validate() except (InvalidArgument, HypothesisDeprecationWarning): reject() assert s.has_reusable_values for s in base_reusable_strategies: test_reusable_strategies_are_all_reusable = example(s)( test_reusable_strategies_are_all_reusable ) test_reusable_strategies_are_all_reusable = example(st.tuples(s))( test_reusable_strategies_are_all_reusable ) def test_composing_breaks_reusability(): s = st.integers() assert s.has_reusable_values assert not s.filter(lambda x: True).has_reusable_values assert not s.map(lambda x: x).has_reusable_values assert not s.flatmap(lambda x: st.just(x)).has_reusable_values @pytest.mark.parametrize( "strat", [ st.lists(st.booleans()), st.sets(st.booleans()), st.dictionaries(st.booleans(), st.booleans()), ], ) def test_mutable_collections_do_not_have_reusable_values(strat): assert not strat.has_reusable_values def test_recursion_does_not_break_reusability(): x = st.deferred(lambda: st.none() | st.tuples(x)) assert x.has_reusable_values hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_runner_strategy.py000066400000000000000000000036041354103617500323730ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from unittest import TestCase import pytest from hypothesis import find, given, strategies as st from hypothesis.errors import InvalidArgument from hypothesis.stateful import RuleBasedStateMachine, rule from tests.common.utils import checks_deprecated_behaviour def test_cannot_use_without_a_runner(): @given(st.runner()) def f(x): pass with pytest.raises(InvalidArgument): f() @checks_deprecated_behaviour def test_cannot_use_in_find_without_default(): with pytest.raises(InvalidArgument): find(st.runner(), lambda x: True) @checks_deprecated_behaviour def test_is_default_in_find(): t = object() assert find(st.runner(t), lambda x: True) == t @given(st.runner(1)) def test_is_default_without_self(runner): assert runner == 1 class TestStuff(TestCase): @given(st.runner()) def test_runner_is_self(self, runner): assert runner is self @given(st.runner(default=3)) def test_runner_is_self_even_with_default(self, runner): assert runner is self class RunnerStateMachine(RuleBasedStateMachine): @rule(runner=st.runner()) def step(self, runner): assert runner is self TestState = RunnerStateMachine.TestCase hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_sampled_from.py000066400000000000000000000053671354103617500316200ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import collections import enum import hypothesis.strategies as st from hypothesis import given from hypothesis.errors import FailedHealthCheck, InvalidArgument, Unsatisfiable from hypothesis.internal.compat import hrange from hypothesis.strategies import sampled_from from tests.common.utils import checks_deprecated_behaviour, fails_with an_enum = enum.Enum("A", "a b c") an_ordereddict = collections.OrderedDict([("a", 1), ("b", 2), ("c", 3)]) @fails_with(InvalidArgument) def test_cannot_sample_sets(): sampled_from(set("abc")).example() def test_can_sample_sequence_without_warning(): sampled_from([1, 2, 3]).example() def test_can_sample_ordereddict_without_warning(): sampled_from(an_ordereddict).example() @given(sampled_from(an_enum)) def test_can_sample_enums(member): assert isinstance(member, an_enum) @checks_deprecated_behaviour def test_sampling_empty_is_deprecated(): assert sampled_from([]).is_empty @fails_with(FailedHealthCheck) @given(sampled_from(hrange(10)).filter(lambda x: x < 0)) def test_unsat_filtered_sampling(x): assert False @fails_with(Unsatisfiable) @given(sampled_from(hrange(2)).filter(lambda x: x < 0)) def test_unsat_filtered_sampling_in_rejection_stage(x): # Rejecting all possible indices before we calculate the allowed indices # takes an early exit path, so we need this test to cover that branch. assert False def test_easy_filtered_sampling(): x = sampled_from(hrange(100)).filter(lambda x: x == 0).example() assert x == 0 @given(sampled_from(hrange(100)).filter(lambda x: x == 99)) def test_filtered_sampling_finds_rare_value(x): assert x == 99 @given(st.sets(st.sampled_from(range(50)), min_size=50)) def test_efficient_sets_of_samples(x): assert x == set(range(50)) @given(st.lists(st.sampled_from([0] * 100), unique=True)) def test_does_not_include_duplicates_even_when_duplicated_in_collection(ls): assert len(ls) <= 1 @given(st.lists(st.sampled_from(hrange(100)), max_size=3, unique=True)) def test_max_size_is_respected_with_unique_sampled_from(ls): assert len(ls) <= 3 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_searchstrategy.py000066400000000000000000000050431354103617500321670ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import functools from collections import namedtuple import pytest from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import text_type from hypothesis.searchstrategy.strategies import one_of_strategies from hypothesis.strategies import booleans, integers, just, randoms, tuples from hypothesis.types import RandomWithSeed from tests.common.debug import assert_no_examples def test_or_errors_when_given_non_strategy(): bools = tuples(booleans()) with pytest.raises(ValueError): bools | u"foo" def test_joining_zero_strategies_fails(): with pytest.raises(ValueError): one_of_strategies(()) SomeNamedTuple = namedtuple(u"SomeNamedTuple", (u"a", u"b")) def last(xs): t = None for x in xs: t = x return t def test_random_repr_has_seed(): rnd = randoms().example() seed = rnd.seed assert text_type(seed) in repr(rnd) def test_random_only_produces_special_random(): st = randoms() assert isinstance(st.example(), RandomWithSeed) def test_just_strategy_uses_repr(): class WeirdRepr(object): def __repr__(self): return u"ABCDEFG" assert repr(just(WeirdRepr())) == u"just(%r)" % (WeirdRepr(),) def test_can_map(): s = integers().map(pack=lambda t: u"foo") assert s.example() == u"foo" def test_example_raises_unsatisfiable_when_too_filtered(): assert_no_examples(integers().filter(lambda x: False)) def nameless_const(x): def f(u, v): return u return functools.partial(f, x) def test_can_map_nameless(): f = nameless_const(2) assert repr(f) in repr(integers().map(f)) def test_can_flatmap_nameless(): f = nameless_const(just(3)) assert repr(f) in repr(integers().flatmap(f)) def test_flatmap_with_invalid_expand(): with pytest.raises(InvalidArgument): just(100).flatmap(lambda n: "a").example() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_seed_printing.py000066400000000000000000000066771354103617500320070ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import time import pytest import hypothesis.core as core import hypothesis.strategies as st from hypothesis import Verbosity, assume, given, settings from hypothesis.database import InMemoryExampleDatabase from hypothesis.errors import FailedHealthCheck from hypothesis.internal.compat import hrange from tests.common.utils import all_values, capture_out @pytest.mark.parametrize("in_pytest", [False, True]) @pytest.mark.parametrize("fail_healthcheck", [False, True]) @pytest.mark.parametrize("verbosity", [Verbosity.normal, Verbosity.quiet]) def test_prints_seed_only_on_healthcheck( monkeypatch, in_pytest, fail_healthcheck, verbosity ): monkeypatch.setattr(core, "running_under_pytest", in_pytest) strategy = st.integers() if fail_healthcheck: def slow_map(i): time.sleep(10) return i strategy = strategy.map(slow_map) expected_exc = FailedHealthCheck else: expected_exc = AssertionError @settings(database=None, verbosity=verbosity) @given(strategy) def test(i): assert fail_healthcheck with capture_out() as o: with pytest.raises(expected_exc): test() output = o.getvalue() seed = test._hypothesis_internal_use_generated_seed assert seed is not None if fail_healthcheck and verbosity != Verbosity.quiet: assert "@seed(%d)" % (seed,) in output contains_pytest_instruction = ("--hypothesis-seed=%d" % (seed,)) in output assert contains_pytest_instruction == in_pytest else: assert "@seed" not in output assert "--hypothesis-seed=%d" % (seed,) not in output def test_uses_global_force(monkeypatch): monkeypatch.setattr(core, "global_force_seed", 42) @given(st.integers()) def test(i): raise ValueError() output = [] for _ in hrange(2): with capture_out() as o: with pytest.raises(ValueError): test() output.append(o.getvalue()) assert output[0] == output[1] assert "@seed" not in output[0] def test_does_print_on_reuse_from_database(): passes_healthcheck = False database = InMemoryExampleDatabase() @settings(database=database) @given(st.integers()) def test(i): assume(passes_healthcheck) raise ValueError() with capture_out() as o: with pytest.raises(FailedHealthCheck): test() assert "@seed" in o.getvalue() passes_healthcheck = True with capture_out() as o: with pytest.raises(ValueError): test() assert all_values(database) assert "@seed" not in o.getvalue() passes_healthcheck = False with capture_out() as o: with pytest.raises(FailedHealthCheck): test() assert "@seed" in o.getvalue() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_settings.py000066400000000000000000000316101354103617500307760ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import datetime import subprocess import sys import pytest import hypothesis.strategies as st from hypothesis import example, given, unlimited from hypothesis._settings import ( Verbosity, default_variable, local_settings, note_deprecation, settings, ) from hypothesis.database import ExampleDatabase from hypothesis.errors import InvalidArgument, InvalidState from hypothesis.stateful import GenericStateMachine, RuleBasedStateMachine, rule from hypothesis.utils.conventions import not_set from tests.common.utils import checks_deprecated_behaviour, fails_with def test_has_docstrings(): assert settings.verbosity.__doc__ original_default = settings.get_profile("default").max_examples def setup_function(fn): settings.load_profile("default") settings.register_profile("test_settings", settings()) settings.load_profile("test_settings") def test_cannot_set_non_settings(): s = settings() with pytest.raises(AttributeError): s.databas_file = u"some_file" def test_settings_uses_defaults(): s = settings() assert s.max_examples == settings.default.max_examples def test_raises_attribute_error(): with pytest.raises(AttributeError): settings().kittens def test_respects_none_database(): assert settings(database=None).database is None def test_can_repeatedly_push_the_same_thing(): s = settings(max_examples=12) t = settings(max_examples=17) assert settings().max_examples == original_default with local_settings(s): assert settings().max_examples == 12 with local_settings(t): assert settings().max_examples == 17 with local_settings(s): assert settings().max_examples == 12 with local_settings(t): assert settings().max_examples == 17 assert settings().max_examples == 12 assert settings().max_examples == 17 assert settings().max_examples == 12 assert settings().max_examples == original_default def test_cannot_create_settings_with_invalid_options(): with pytest.raises(InvalidArgument): settings(a_setting_with_limited_options=u"spoon") def test_cannot_register_with_parent_and_settings_args(): with pytest.raises(InvalidArgument): settings.register_profile( "conflicted", settings.default, settings=settings.default ) assert "conflicted" not in settings._profiles def test_can_set_verbosity(): settings(verbosity=Verbosity.quiet) settings(verbosity=Verbosity.normal) settings(verbosity=Verbosity.verbose) settings(verbosity=Verbosity.debug) def test_can_not_set_verbosity_to_non_verbosity(): with pytest.raises(InvalidArgument): settings(verbosity="kittens") @pytest.mark.parametrize("db", [None, ExampleDatabase()]) def test_inherits_an_empty_database(db): assert settings.default.database is not None s = settings(database=db) assert s.database is db with local_settings(s): t = settings() assert t.database is db @pytest.mark.parametrize("db", [None, ExampleDatabase()]) def test_can_assign_database(db): x = settings(database=db) assert x.database is db def test_will_reload_profile_when_default_is_absent(): original = settings.default default_variable.value = None assert settings.default is original def test_load_profile(): settings.load_profile("default") assert settings.default.max_examples == original_default assert settings.default.stateful_step_count == 50 settings.register_profile("test", settings(max_examples=10), stateful_step_count=5) settings.load_profile("test") assert settings.default.max_examples == 10 assert settings.default.stateful_step_count == 5 settings.load_profile("default") assert settings.default.max_examples == original_default assert settings.default.stateful_step_count == 50 def test_profile_names_must_be_strings(): with pytest.raises(InvalidArgument): settings.register_profile(5) with pytest.raises(InvalidArgument): settings.get_profile(5) with pytest.raises(InvalidArgument): settings.load_profile(5) def test_loading_profile_keeps_expected_behaviour(): settings.register_profile("ci", settings(max_examples=10000)) settings.load_profile("ci") assert settings().max_examples == 10000 with local_settings(settings(max_examples=5)): assert settings().max_examples == 5 assert settings().max_examples == 10000 def test_load_non_existent_profile(): with pytest.raises(InvalidArgument): settings.get_profile("nonsense") def test_cannot_delete_a_setting(): x = settings() with pytest.raises(AttributeError): del x.max_examples x.max_examples x = settings() with pytest.raises(AttributeError): del x.foo @checks_deprecated_behaviour def test_setting_to_unlimited_is_not_error_yet(): settings(timeout=unlimited) def test_cannot_set_settings(): x = settings() with pytest.raises(AttributeError): x.max_examples = "foo" with pytest.raises(AttributeError): x.database = "foo" assert x.max_examples != "foo" assert x.database != "foo" def test_can_have_none_database(): assert settings(database=None).database is None @pytest.mark.parametrize("db", [None, ExampleDatabase(":memory:")]) def test_database_type_must_be_ExampleDatabase(db): with local_settings(settings(database=db)): settings_property_db = settings.database with pytest.raises(InvalidArgument): settings(database=".hypothesis/examples") assert settings.database is settings_property_db def test_cannot_define_settings_once_locked(): with pytest.raises(InvalidState): settings._define_setting("hi", "there", 4) def test_cannot_assign_default(): with pytest.raises(AttributeError): settings.default = settings(max_examples=3) assert settings().max_examples != 3 def test_does_not_warn_if_quiet(): with pytest.warns(None) as rec: note_deprecation("This is bad", since="RELEASEDAY", verbosity=Verbosity.quiet) assert len(rec) == 0 @settings(max_examples=7) @given(st.builds(lambda: settings.default)) def test_settings_in_strategies_are_from_test_scope(s): assert s.max_examples == 7 def test_settings_alone(): @settings() def test_nothing(): pass with pytest.raises(InvalidArgument): test_nothing() @fails_with(InvalidArgument) def test_settings_applied_twice_is_error(): @given(st.integers()) @settings() @settings() def test_nothing(x): pass @settings() @given(st.integers()) def test_outer_ok(x): pass @given(st.integers()) @settings() def test_inner_ok(x): pass def test_settings_as_decorator_must_be_on_callable(): with pytest.raises(InvalidArgument): settings()(1) ASSERT_DATABASE_PATH = """ import tempfile from hypothesis import settings from hypothesis.configuration import set_hypothesis_home_dir from hypothesis.database import DirectoryBasedExampleDatabase settings.default.database if __name__ == '__main__': new_home = tempfile.mkdtemp() set_hypothesis_home_dir(new_home) db = settings.default.database assert isinstance(db, DirectoryBasedExampleDatabase), db assert db.path.startswith(new_home), (db.path, new_home) """ def test_puts_the_database_in_the_home_dir_by_default(tmpdir): script = tmpdir.join("assertlocation.py") script.write(ASSERT_DATABASE_PATH) subprocess.check_call([sys.executable, str(script)]) def test_database_is_reference_preserved(): s = settings(database=not_set) assert s.database is s.database @settings(verbosity=Verbosity.verbose) @example(x=99) @given(st.integers()) def test_settings_apply_for_explicit_examples(x): # Regression test for #1521 assert settings.default.verbosity == Verbosity.verbose def test_setattr_on_settings_singleton_is_error(): # https://github.com/pandas-dev/pandas/pull/22679#issuecomment-420750921 # Should be setting attributes on settings.default, not settings! with pytest.raises(AttributeError): settings.max_examples = 10 def test_deadline_given_none(): x = settings(deadline=None).deadline assert x is None def test_deadline_given_valid_int(): x = settings(deadline=1000).deadline assert isinstance(x, datetime.timedelta) assert x.days == 0 and x.seconds == 1 and x.microseconds == 0 def test_deadline_given_valid_float(): x = settings(deadline=2050.25).deadline assert isinstance(x, datetime.timedelta) assert x.days == 0 and x.seconds == 2 and x.microseconds == 50250 def test_deadline_given_valid_timedelta(): x = settings(deadline=datetime.timedelta(days=1, microseconds=15030000)).deadline assert isinstance(x, datetime.timedelta) assert x.days == 1 and x.seconds == 15 and x.microseconds == 30000 @pytest.mark.parametrize( "x", [ 0, -0.7, -1, 86400000000000000.2, datetime.timedelta(microseconds=-1), datetime.timedelta(0), ], ) def test_invalid_deadline(x): with pytest.raises(InvalidArgument): settings(deadline=x) @pytest.mark.parametrize("value", ["always"]) def test_can_not_set_print_blob_to_non_print_settings(value): with pytest.raises(InvalidArgument): settings(print_blob=value) settings_step_count = 1 @settings(stateful_step_count=settings_step_count) class StepCounter(RuleBasedStateMachine): def __init__(self): super(StepCounter, self).__init__() self.step_count = 0 @rule() def count_step(self): self.step_count += 1 def teardown(self): assert self.step_count <= settings_step_count test_settings_decorator_applies_to_rule_based_state_machine_class = StepCounter.TestCase def test_two_settings_decorators_applied_to_state_machine_class_raises_error(): with pytest.raises(InvalidArgument): @settings() @settings() class StatefulTest(RuleBasedStateMachine): pass def test_settings_decorator_applied_to_non_state_machine_class_raises_error(): with pytest.raises(InvalidArgument): @settings() class NonStateMachine: pass def test_assigning_to_settings_attribute_on_state_machine_raises_error(): with pytest.raises(AttributeError): class StateMachine(GenericStateMachine): pass StateMachine.settings = settings() state_machine_instance = StateMachine() state_machine_instance.settings = "any value" def test_can_not_set_timeout_to_time(): with pytest.raises(InvalidArgument): settings(timeout=60) def test_derandomise_with_explicit_database_is_invalid(): with pytest.raises(InvalidArgument): settings(derandomize=True, database=ExampleDatabase(":memory:")) @pytest.mark.parametrize( "kwargs", [ dict(max_examples=-1), dict(buffer_size=-1), dict(stateful_step_count=-1), dict(deadline=-1), dict(deadline=0), ], ) def test_invalid_settings_are_errors(kwargs): with pytest.raises(InvalidArgument): settings(**kwargs) @checks_deprecated_behaviour def test_boolean_deadlines(): settings(deadline=True) with pytest.raises(InvalidArgument): settings(deadline=False) @checks_deprecated_behaviour def test_non_boolean_derandomize(): assert settings(derandomize=1).derandomize is True assert settings(derandomize=0).derandomize is False @pytest.mark.parametrize("name", ["max_examples", "buffer_size", "stateful_step_count"]) @checks_deprecated_behaviour def test_dubious_settings_deprecations(name): settings(**{name: 2.5}) with pytest.raises(InvalidArgument): settings(**{name: "2.5"}) # deprecation warning, then type cast error. @checks_deprecated_behaviour def test_buffer_size_deprecated(): assert settings(buffer_size=100).buffer_size == 100 @checks_deprecated_behaviour def test_max_example_eq_0_warns_and_disables_generation(): # Terrible way to disable generation, but did predate the phases setting # and existed in our test suite so it's not an error *just* yet. @example(None) @given(st.integers()) @settings(max_examples=0) def inner(x): calls[0] += 1 assert x is None calls = [0] inner() assert calls[0] == 1 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_setup_teardown.py000066400000000000000000000062061354103617500322040ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import assume, given from hypothesis.strategies import integers, text class HasSetup(object): def setup_example(self): self.setups = getattr(self, u"setups", 0) self.setups += 1 class HasTeardown(object): def teardown_example(self, ex): self.teardowns = getattr(self, u"teardowns", 0) self.teardowns += 1 class SomeGivens(object): @given(integers()) def give_me_an_int(self, x): pass @given(text()) def give_me_a_string(myself, x): pass @given(integers()) def give_me_a_positive_int(self, x): assert x >= 0 @given(integers().map(lambda x: x.nope)) def fail_in_reify(self, x): pass @given(integers()) def assume_some_stuff(self, x): assume(x > 0) @given(integers().filter(lambda x: x > 0)) def assume_in_reify(self, x): pass class HasSetupAndTeardown(HasSetup, HasTeardown, SomeGivens): pass def test_calls_setup_and_teardown_on_self_as_first_argument(): x = HasSetupAndTeardown() x.give_me_an_int() x.give_me_a_string() assert x.setups > 0 assert x.teardowns == x.setups def test_calls_setup_and_teardown_on_self_unbound(): x = HasSetupAndTeardown() HasSetupAndTeardown.give_me_an_int(x) assert x.setups > 0 assert x.teardowns == x.setups def test_calls_setup_and_teardown_on_failure(): x = HasSetupAndTeardown() with pytest.raises(AssertionError): x.give_me_a_positive_int() assert x.setups > 0 assert x.teardowns == x.setups def test_still_tears_down_on_error_in_generation(): x = HasSetupAndTeardown() with pytest.raises(AttributeError): x.fail_in_reify() assert x.setups > 0 assert x.teardowns == x.setups def test_still_tears_down_on_failed_assume(): x = HasSetupAndTeardown() x.assume_some_stuff() assert x.setups > 0 assert x.teardowns == x.setups def test_still_tears_down_on_failed_assume_in_reify(): x = HasSetupAndTeardown() x.assume_in_reify() assert x.setups > 0 assert x.teardowns == x.setups def test_sets_up_without_teardown(): class Foo(HasSetup, SomeGivens): pass x = Foo() x.give_me_an_int() assert x.setups > 0 assert not hasattr(x, u"teardowns") def test_tears_down_without_setup(): class Foo(HasTeardown, SomeGivens): pass x = Foo() x.give_me_an_int() assert x.teardowns > 0 assert not hasattr(x, u"setups") hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_shrink_budgeting.py000066400000000000000000000027341354103617500324710ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import math import sys from random import Random import pytest from hypothesis.internal.compat import ceil from hypothesis.internal.conjecture.shrinking import Integer, Lexical, Ordering def measure_baseline(cls, value, **kwargs): shrinker = cls(value, lambda x: x == value, random=Random(0), **kwargs) shrinker.run() return shrinker.calls @pytest.mark.parametrize("cls", [Lexical, Ordering]) @pytest.mark.parametrize("example", [[255] * 8]) def test_meets_budgetary_requirements(cls, example): # Somewhat arbitrary but not unreasonable budget. n = len(example) budget = n * ceil(math.log(n, 2)) + 5 assert measure_baseline(cls, example) <= budget def test_integer_shrinking_is_parsimonious(): assert measure_baseline(Integer, int(sys.float_info.max)) <= 10 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_simple_characters.py000066400000000000000000000115541354103617500326330ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import unicodedata import pytest from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import text_type from hypothesis.strategies import characters from tests.common.debug import assert_no_examples, find_any, minimal from tests.common.utils import fails_with @fails_with(InvalidArgument) def test_nonexistent_category_argument(): characters(blacklist_categories=["foo"]).example() def test_bad_codepoint_arguments(): with pytest.raises(InvalidArgument): characters(min_codepoint=42, max_codepoint=24).example() def test_exclude_all_available_range(): with pytest.raises(InvalidArgument): characters( min_codepoint=ord("0"), max_codepoint=ord("0"), blacklist_characters="0" ).example() def test_when_nothing_could_be_produced(): with pytest.raises(InvalidArgument): characters( whitelist_categories=["Cc"], min_codepoint=ord("0"), max_codepoint=ord("9") ).example() def test_characters_of_specific_groups(): st = characters(whitelist_categories=("Lu", "Nd")) find_any(st, lambda c: unicodedata.category(c) == "Lu") find_any(st, lambda c: unicodedata.category(c) == "Nd") assert_no_examples(st, lambda c: unicodedata.category(c) not in ("Lu", "Nd")) def test_characters_of_major_categories(): st = characters(whitelist_categories=("L", "N")) find_any(st, lambda c: unicodedata.category(c).startswith("L")) find_any(st, lambda c: unicodedata.category(c).startswith("N")) assert_no_examples(st, lambda c: unicodedata.category(c)[0] not in ("L", "N")) def test_exclude_characters_of_specific_groups(): st = characters(blacklist_categories=("Lu", "Nd")) find_any(st, lambda c: unicodedata.category(c) != "Lu") find_any(st, lambda c: unicodedata.category(c) != "Nd") assert_no_examples(st, lambda c: unicodedata.category(c) in ("Lu", "Nd")) def test_exclude_characters_of_major_categories(): st = characters(blacklist_categories=("L", "N")) find_any(st, lambda c: not unicodedata.category(c).startswith("L")) find_any(st, lambda c: not unicodedata.category(c).startswith("N")) assert_no_examples(st, lambda c: unicodedata.category(c)[0] in ("L", "N")) def test_find_one(): char = minimal(characters(min_codepoint=48, max_codepoint=48), lambda _: True) assert char == u"0" def test_find_something_rare(): st = characters(whitelist_categories=["Zs"], min_codepoint=12288) find_any(st, lambda c: unicodedata.category(c) == "Zs") assert_no_examples(st, lambda c: unicodedata.category(c) != "Zs") def test_whitelisted_characters_alone(): with pytest.raises(InvalidArgument): characters(whitelist_characters=u"te02тест49st").example() def test_whitelisted_characters_overlap_blacklisted_characters(): good_chars = u"te02тест49st" bad_chars = u"ts94тсет" with pytest.raises(InvalidArgument) as exc: characters( min_codepoint=ord("0"), max_codepoint=ord("9"), whitelist_characters=good_chars, blacklist_characters=bad_chars, ).example() assert repr(good_chars) in text_type(exc) assert repr(bad_chars) in text_type(exc) def test_whitelisted_characters_override(): good_characters = u"teтестst" st = characters( min_codepoint=ord("0"), max_codepoint=ord("9"), whitelist_characters=good_characters, ) find_any(st, lambda c: c in good_characters) find_any(st, lambda c: c in "0123456789") assert_no_examples(st, lambda c: c not in good_characters + "0123456789") def test_blacklisted_characters(): bad_chars = u"te02тест49st" st = characters( min_codepoint=ord("0"), max_codepoint=ord("9"), blacklist_characters=bad_chars ) assert "1" == minimal(st, lambda c: True) assert_no_examples(st, lambda c: c in bad_chars) def test_whitelist_characters_disjoint_blacklist_characters(): good_chars = u"123abc" bad_chars = u"456def" st = characters( min_codepoint=ord("0"), max_codepoint=ord("9"), blacklist_characters=bad_chars, whitelist_characters=good_chars, ) assert_no_examples(st, lambda c: c in bad_chars) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_simple_collections.py000066400000000000000000000122711354103617500330270ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from random import Random import pytest from hypothesis import given, settings from hypothesis.internal.compat import OrderedDict from hypothesis.strategies import ( booleans, dictionaries, fixed_dictionaries, frozensets, integers, lists, none, nothing, sets, text, tuples, ) from tests.common.debug import find_any, minimal from tests.common.utils import flaky @pytest.mark.parametrize( (u"col", u"strat"), [ ((), tuples()), ([], lists(none(), max_size=0)), (set(), sets(none(), max_size=0)), (frozenset(), frozensets(none(), max_size=0)), ({}, fixed_dictionaries({})), ([], lists(nothing())), ([], lists(nothing(), unique=True)), ], ) def test_find_empty_collection_gives_empty(col, strat): assert minimal(strat, lambda x: True) == col @pytest.mark.parametrize( (u"coltype", u"strat"), [(list, lists), (set, sets), (frozenset, frozensets)] ) def test_find_non_empty_collection_gives_single_zero(coltype, strat): assert minimal(strat(integers()), bool) == coltype((0,)) @pytest.mark.parametrize( (u"coltype", u"strat"), [(list, lists), (set, sets), (frozenset, frozensets)] ) def test_minimizes_to_empty(coltype, strat): assert minimal(strat(integers()), lambda x: True) == coltype() def test_minimizes_list_of_lists(): xs = minimal(lists(lists(booleans())), lambda x: any(x) and not all(x)) xs.sort() assert xs == [[], [False]] @given(sets(integers(0, 100), min_size=2, max_size=10)) @settings(max_examples=100) def test_sets_are_size_bounded(xs): assert 2 <= len(xs) <= 10 def test_ordered_dictionaries_preserve_keys(): r = Random() keys = list(range(100)) r.shuffle(keys) x = fixed_dictionaries(OrderedDict([(k, booleans()) for k in keys])).example() assert list(x.keys()) == keys @pytest.mark.parametrize(u"n", range(10)) def test_lists_of_fixed_length(n): assert minimal(lists(integers(), min_size=n, max_size=n), lambda x: True) == [0] * n @pytest.mark.parametrize(u"n", range(10)) def test_sets_of_fixed_length(n): x = minimal(sets(integers(), min_size=n, max_size=n), lambda x: True) assert len(x) == n if not n: assert x == set() else: assert x == set(range(min(x), min(x) + n)) @pytest.mark.parametrize(u"n", range(10)) def test_dictionaries_of_fixed_length(n): x = set( minimal( dictionaries(integers(), booleans(), min_size=n, max_size=n), lambda x: True ).keys() ) if not n: assert x == set() else: assert x == set(range(min(x), min(x) + n)) @pytest.mark.parametrize(u"n", range(10)) def test_lists_of_lower_bounded_length(n): x = minimal(lists(integers(), min_size=n), lambda x: sum(x) >= 2 * n) assert n <= len(x) <= 2 * n assert all(t >= 0 for t in x) assert len(x) == n or all(t > 0 for t in x) assert sum(x) == 2 * n @flaky(min_passes=1, max_runs=2) def test_can_find_unique_lists_of_non_set_order(): # This test checks that our strategy for unique lists doesn't accidentally # depend on the iteration order of sets. # # Unfortunately, that means that *this* test has to rely on set iteration # order. That makes it tricky to debug on CPython, because set iteration # order changes every time the process is launched. # # To get around this, define the PYTHONHASHSEED environment variable to # a consistent value. This could be 0, or it could be the PYTHONHASHSEED # value listed in a failure log from CI. ls = minimal(lists(text(), unique=True), lambda x: list(set(reversed(x))) != x) assert len(set(ls)) == len(ls) assert len(ls) == 2 def test_can_draw_empty_list_from_unsatisfiable_strategy(): assert find_any(lists(integers().filter(lambda s: False))) == [] def test_can_draw_empty_set_from_unsatisfiable_strategy(): assert find_any(sets(integers().filter(lambda s: False))) == set() small_set = sets(none()) @given(lists(small_set, min_size=10)) def test_small_sized_sets(x): pass def test_minimize_dicts_with_incompatible_keys(): assert minimal( fixed_dictionaries({1: booleans(), u"hi": lists(booleans())}), lambda x: True ) == {1: False, u"hi": []} @given( lists( tuples(integers(), integers()), min_size=2, unique_by=(lambda x: x[0], lambda x: x[1]), ) ) def test_lists_unique_by_tuple_funcs(ls): firstitems, seconditems = zip(*ls) assert len(set(firstitems)) == len(firstitems) assert len(set(seconditems)) == len(seconditems) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_simple_strings.py000066400000000000000000000066361354103617500322120ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import given from hypothesis.strategies import binary, characters, text, tuples from tests.common.debug import minimal from tests.common.utils import checks_deprecated_behaviour, fails_with def test_can_minimize_up_to_zero(): s = minimal(text(), lambda x: any(lambda t: t <= u"0" for t in x)) assert s == u"0" def test_minimizes_towards_ascii_zero(): s = minimal(text(), lambda x: any(t < u"0" for t in x)) assert s == chr(ord(u"0") - 1) def test_can_handle_large_codepoints(): s = minimal(text(), lambda x: x >= u"☃") assert s == u"☃" def test_can_find_mixed_ascii_and_non_ascii_strings(): s = minimal( text(), lambda x: (any(t >= u"☃" for t in x) and any(ord(t) <= 127 for t in x)) ) assert len(s) == 2 assert sorted(s) == [u"0", u"☃"] def test_will_find_ascii_examples_given_the_chance(): s = minimal( tuples(text(max_size=1), text(max_size=1)), lambda x: x[0] and (x[0] < x[1]) ) assert ord(s[1]) == ord(s[0]) + 1 assert u"0" in s def test_minimisation_consistent_with_characters(): s = minimal(text("FEDCBA", min_size=3)) assert s == "AAA" def test_finds_single_element_strings(): assert minimal(text(), bool) == u"0" @fails_with(AssertionError) @given(binary()) def test_binary_generates_large_examples(x): assert len(x) <= 20 @given(binary(max_size=5)) def test_binary_respects_max_size(x): assert len(x) <= 5 def test_does_not_simplify_into_surrogates(): f = minimal(text(), lambda x: x >= u"\udfff") assert f == u"\ue000" size = 5 f = minimal(text(min_size=size), lambda x: sum(t >= u"\udfff" for t in x) >= size) assert f == u"\ue000" * size @given(text(alphabet=[u"a", u"b"])) def test_respects_alphabet_if_list(xs): assert set(xs).issubset(set(u"ab")) @given(text(alphabet=u"cdef")) def test_respects_alphabet_if_string(xs): assert set(xs).issubset(set(u"cdef")) @given(text()) def test_can_encode_as_utf8(s): s.encode("utf-8") @given(text(characters(blacklist_characters=u"\n"))) def test_can_blacklist_newlines(s): assert u"\n" not in s @given(text(characters(blacklist_categories=("Cc", "Cs")))) def test_can_exclude_newlines_by_category(s): assert u"\n" not in s @given(text(characters(max_codepoint=127))) def test_can_restrict_to_ascii_only(s): s.encode("ascii") def test_fixed_size_bytes_just_draw_bytes(): from hypothesis.internal.conjecture.data import ConjectureData x = ConjectureData.for_buffer(b"foo") assert x.draw(binary(min_size=3, max_size=3)) == b"foo" @given(text(max_size=10 ** 6)) def test_can_set_max_size_large(s): pass @checks_deprecated_behaviour def test_explicit_alphabet_None_is_deprecated(): text(alphabet=None).example() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_slices.py000066400000000000000000000061671354103617500304310ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import given, settings, strategies as st from tests.common.debug import assert_all_examples, find_any, minimal use_several_sizes = pytest.mark.parametrize("size", [1, 2, 5, 10, 100, 1000]) @use_several_sizes def test_stop_stays_within_bounds(size): assert_all_examples( st.slices(size), lambda x: x.stop is None or (x.stop >= 0 and x.stop <= size) ) @use_several_sizes def test_start_stay_within_bounds(size): assert_all_examples( st.slices(size), lambda x: x.start is None or (x.start >= 0 and x.start <= size) ) @use_several_sizes def test_step_stays_within_bounds(size): # indices -> (start, stop, step) # Stop is exclusive so we use -1 as the floor. # This uses the indices that slice produces to make this test more readable # due to how splice processes None being a little complex assert_all_examples( st.slices(size), lambda x: ( x.indices(size)[0] + x.indices(size)[2] <= size and x.indices(size)[0] + x.indices(size)[2] >= -1 ) or x.start == x.stop, ) @use_several_sizes def test_step_will_not_be_zero(size): assert_all_examples(st.slices(size), lambda x: x.step != 0) @use_several_sizes def test_slices_will_shrink(size): sliced = minimal(st.slices(size)) assert sliced.start == 0 or sliced.start is None assert sliced.stop == 0 or sliced.stop is None assert sliced.step == 1 @given(st.integers(1, 1000)) @settings(deadline=None) def test_step_will_be_negative(size): find_any(st.slices(size), lambda x: x.step < 0, settings(max_examples=10 ** 6)) @given(st.integers(1, 1000)) @settings(deadline=None) def test_step_will_be_positive(size): find_any(st.slices(size), lambda x: x.step > 0) @pytest.mark.parametrize("size", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) def test_stop_will_equal_size(size): find_any(st.slices(size), lambda x: x.stop == size, settings(max_examples=10 ** 6)) @pytest.mark.parametrize("size", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) def test_start_will_equal_size(size): find_any( st.slices(size), lambda x: x.start == size - 1, settings(max_examples=10 ** 6) ) @given(st.integers(1, 1000)) @settings(deadline=None) def test_start_will_equal_0(size): find_any(st.slices(size), lambda x: x.start == 0) @given(st.integers(1, 1000)) @settings(deadline=None) def test_start_will_equal_stop(size): find_any(st.slices(size), lambda x: x.start == x.stop) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_slippage.py000066400000000000000000000175431354103617500307530ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import Phase, assume, given, settings from hypothesis.database import InMemoryExampleDatabase from hypothesis.errors import Flaky, MultipleFailures from hypothesis.internal.conjecture.engine import MIN_TEST_CALLS from tests.common.utils import capture_out, flaky, non_covering_examples def test_raises_multiple_failures_with_varying_type(): target = [None] @settings(database=None) @given(st.integers()) def test(i): if abs(i) < 1000: return if target[0] is None: # Ensure that we have some space to shrink into, so we can't # trigger an minimal example and mask the other exception type. assume(1003 < abs(i)) target[0] = i exc_class = TypeError if target[0] == i else ValueError raise exc_class() with capture_out() as o: with pytest.raises(MultipleFailures): test() assert "TypeError" in o.getvalue() assert "ValueError" in o.getvalue() def test_raises_multiple_failures_when_position_varies(): target = [None] @given(st.integers()) def test(i): if abs(i) < 1000: return if target[0] is None: target[0] = i if target[0] == i: raise ValueError("loc 1") else: raise ValueError("loc 2") with capture_out() as o: with pytest.raises(MultipleFailures): test() assert "loc 1" in o.getvalue() assert "loc 2" in o.getvalue() def test_replays_both_failing_values(): target = [None] @settings(database=InMemoryExampleDatabase()) @given(st.integers()) def test(i): if abs(i) < 1000: return if target[0] is None: target[0] = i exc_class = TypeError if target[0] == i else ValueError raise exc_class() with pytest.raises(MultipleFailures): test() with pytest.raises(MultipleFailures): test() @pytest.mark.parametrize("fix", [TypeError, ValueError]) def test_replays_slipped_examples_once_initial_bug_is_fixed(fix): target = [] bug_fixed = False @settings(database=InMemoryExampleDatabase()) @given(st.integers()) def test(i): if abs(i) < 1000: return if not target: target.append(i) if i == target[0]: if bug_fixed and fix == TypeError: return raise TypeError() if len(target) == 1: target.append(i) if bug_fixed and fix == ValueError: return if i == target[1]: raise ValueError() with pytest.raises(MultipleFailures): test() bug_fixed = True with pytest.raises(ValueError if fix == TypeError else TypeError): test() def test_garbage_collects_the_secondary_key(): target = [] bug_fixed = False db = InMemoryExampleDatabase() @settings(database=db) @given(st.integers()) def test(i): if bug_fixed: return if abs(i) < 1000: return if not target: target.append(i) if i == target[0]: raise TypeError() if len(target) == 1: target.append(i) if i == target[1]: raise ValueError() with pytest.raises(MultipleFailures): test() bug_fixed = True def count(): return len(non_covering_examples(db)) prev = count() while prev > 0: test() current = count() assert current < prev prev = current def test_shrinks_both_failures(): first_has_failed = [False] duds = set() second_target = [None] @settings(database=None, max_examples=1000) @given(st.integers(min_value=0).map(int)) def test(i): if i >= 10000: first_has_failed[0] = True assert False assert i < 10000 if first_has_failed[0]: if second_target[0] is None: for j in range(10000): if j not in duds: second_target[0] = j break assert i < second_target[0] else: duds.add(i) with capture_out() as o: with pytest.raises(MultipleFailures): test() assert "test(i=10000)" in o.getvalue() assert "test(i=%d)" % (second_target[0],) in o.getvalue() def test_handles_flaky_tests_where_only_one_is_flaky(): flaky_fixed = False target = [] flaky_failed_once = [False] @settings(database=InMemoryExampleDatabase()) @given(st.integers()) def test(i): if abs(i) < 1000: return if not target: target.append(i) if i == target[0]: raise TypeError() if flaky_failed_once[0] and not flaky_fixed: return if len(target) == 1: target.append(i) if i == target[1]: flaky_failed_once[0] = True raise ValueError() with pytest.raises(Flaky): test() flaky_fixed = True with pytest.raises(MultipleFailures): test() @pytest.mark.parametrize("allow_multi", [True, False]) def test_can_disable_multiple_error_reporting(allow_multi): seen = set() @settings(database=None, derandomize=True, report_multiple_bugs=allow_multi) @given(st.integers(min_value=0)) def test(i): # We will pass on the minimal i=0, then fail with a large i, and eventually # slip to i=1 and a different error. We check both seen and raised errors. if i == 1: seen.add(TypeError) raise TypeError elif i >= 2: seen.add(ValueError) raise ValueError with pytest.raises(MultipleFailures if allow_multi else TypeError): test() assert seen == {TypeError, ValueError} @flaky(max_runs=3, min_passes=2) def test_finds_multiple_failures_in_generation(): # Very rarely, this raises ZeroDivisionError instead of MultipleFailure, # because we never generated NaN. We therefore allow one additional run. @settings(phases=[Phase.generate]) @given(st.lists(st.floats())) def test(x): mean = sum(x) / len(x) # ZeroDivisionError if len(x) == 1: assert mean == x[0] # x[0] is nan assert min(x) <= mean <= max(x) # x contains nan or +&- inf with pytest.raises(MultipleFailures): test() def test_stops_immediately_if_not_report_multiple_bugs(): seen = set() @settings(phases=[Phase.generate], report_multiple_bugs=False) @given(st.integers()) def test(x): seen.add(x) assert False with pytest.raises(AssertionError): test() assert len(seen) == 1 def test_stops_immediately_on_replay(): seen = set() @settings(database=InMemoryExampleDatabase()) @given(st.integers()) def test(x): seen.add(x) assert x # On the first run, we look for up to ten examples: with pytest.raises(AssertionError): test() assert 1 < len(seen) <= MIN_TEST_CALLS # With failing examples in the database, we stop at one. seen.clear() with pytest.raises(AssertionError): test() assert len(seen) == 1 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_stateful.py000066400000000000000000000714471354103617500310010ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import base64 from collections import defaultdict, namedtuple import pytest from _pytest.outcomes import Failed, Skipped from hypothesis import __version__, reproduce_failure, seed, settings as Settings from hypothesis.control import current_build_context from hypothesis.database import ExampleDatabase from hypothesis.errors import DidNotReproduce, Flaky, InvalidArgument, InvalidDefinition from hypothesis.internal.compat import print_unicode from hypothesis.stateful import ( Bundle, GenericStateMachine, RuleBasedStateMachine, consumes, initialize, invariant, multiple, precondition, rule, run_state_machine_as_test, ) from hypothesis.strategies import ( binary, booleans, integers, just, lists, none, sampled_from, tuples, ) from tests.common.utils import capture_out, checks_deprecated_behaviour, raises NO_BLOB_SETTINGS = Settings(print_blob=False) class SetStateMachine(GenericStateMachine): def __init__(self): self.elements = [] def steps(self): strat = tuples(just(False), integers(0, 5)) if self.elements: strat |= tuples(just(True), sampled_from(self.elements)) return strat def execute_step(self, step): delete, value = step if delete: self.elements.remove(value) assert value not in self.elements else: self.elements.append(value) class OrderedStateMachine(GenericStateMachine): def __init__(self): self.counter = 0 def steps(self): return integers(self.counter - 1, self.counter + 50) def execute_step(self, step): assert step >= self.counter self.counter = step Leaf = namedtuple(u"Leaf", (u"label",)) Split = namedtuple(u"Split", (u"left", u"right")) class BalancedTrees(RuleBasedStateMachine): trees = Bundle(u"BinaryTree") @rule(target=trees, x=booleans()) def leaf(self, x): return Leaf(x) @rule(target=trees, left=trees, right=trees) def split(self, left, right): return Split(left, right) @rule(tree=trees) def test_is_balanced(self, tree): if isinstance(tree, Leaf): return else: assert abs(self.size(tree.left) - self.size(tree.right)) <= 1 self.test_is_balanced(tree.left) self.test_is_balanced(tree.right) def size(self, tree): if isinstance(tree, Leaf): return 1 else: return 1 + self.size(tree.left) + self.size(tree.right) class DepthCharge(object): def __init__(self, value): if value is None: self.depth = 0 else: self.depth = value.depth + 1 class DepthMachine(RuleBasedStateMachine): charges = Bundle(u"charges") @rule(targets=(charges,), child=charges) def charge(self, child): return DepthCharge(child) @rule(targets=(charges,)) def none_charge(self): return DepthCharge(None) @rule(check=charges) def is_not_too_deep(self, check): assert check.depth < 3 class MultipleRulesSameFuncMachine(RuleBasedStateMachine): def myfunc(self, data): print_unicode(data) rule1 = rule(data=just(u"rule1data"))(myfunc) rule2 = rule(data=just(u"rule2data"))(myfunc) class PreconditionMachine(RuleBasedStateMachine): num = 0 @rule() def add_one(self): self.num += 1 @rule() def set_to_zero(self): self.num = 0 @rule(num=integers()) @precondition(lambda self: self.num != 0) def div_by_precondition_after(self, num): self.num = num / self.num @precondition(lambda self: self.num != 0) @rule(num=integers()) def div_by_precondition_before(self, num): self.num = num / self.num class RoseTreeStateMachine(RuleBasedStateMachine): nodes = Bundle("nodes") @rule(target=nodes, source=lists(nodes)) def bunch(self, source): return source @rule(source=nodes) def shallow(self, source): def d(ls): if not ls: return 0 else: return 1 + max(map(d, ls)) assert d(source) <= 5 class NotTheLastMachine(RuleBasedStateMachine): stuff = Bundle("stuff") def __init__(self): super(NotTheLastMachine, self).__init__() self.last = None self.bye_called = False @rule(target=stuff) def hi(self): result = object() self.last = result return result @precondition(lambda self: not self.bye_called) @rule(v=stuff) def bye(self, v): assert v == self.last self.bye_called = True class PopulateMultipleTargets(RuleBasedStateMachine): b1 = Bundle("b1") b2 = Bundle("b2") @rule(targets=(b1, b2)) def populate(self): return 1 @rule(x=b1, y=b2) def fail(self, x, y): assert False bad_machines = ( BalancedTrees, DepthMachine, RoseTreeStateMachine, NotTheLastMachine, PopulateMultipleTargets, ) for m in bad_machines: m.TestCase.settings = Settings(m.TestCase.settings, max_examples=1000) cheap_bad_machines = list(bad_machines) cheap_bad_machines.remove(BalancedTrees) with_cheap_bad_machines = pytest.mark.parametrize( u"machine", cheap_bad_machines, ids=[t.__name__ for t in cheap_bad_machines] ) @pytest.mark.parametrize( u"machine", bad_machines, ids=[t.__name__ for t in bad_machines] ) def test_bad_machines_fail(machine): test_class = machine.TestCase try: with capture_out() as o: with raises(AssertionError): test_class().runTest() except Exception: print_unicode(o.getvalue()) raise v = o.getvalue() print_unicode(v) steps = [l for l in v.splitlines() if "Step " in l or "state." in l] assert 1 <= len(steps) <= 50 def test_multiple_rules_same_func(): test_class = MultipleRulesSameFuncMachine.TestCase with capture_out() as o: test_class().runTest() output = o.getvalue() assert "rule1data" in output assert "rule2data" in output def test_can_get_test_case_off_machine_instance(): assert SetStateMachine().TestCase is SetStateMachine().TestCase assert SetStateMachine().TestCase is not None class FlakyDrawLessMachine(GenericStateMachine): def steps(self): cb = current_build_context() if cb.is_final: return binary(min_size=1, max_size=1) else: return binary(min_size=1024, max_size=1024) def execute_step(self, step): cb = current_build_context() if not cb.is_final: assert 0 not in bytearray(step) @checks_deprecated_behaviour def test_flaky_draw_less_raises_flaky(): with raises(Flaky): FlakyDrawLessMachine.TestCase().runTest() class FlakyStateMachine(GenericStateMachine): def steps(self): return just(()) def execute_step(self, step): assert current_build_context().is_final @checks_deprecated_behaviour def test_flaky_raises_flaky(): with raises(Flaky): FlakyStateMachine.TestCase().runTest() class FlakyRatchettingMachine(GenericStateMachine): ratchet = 0 def steps(self): FlakyRatchettingMachine.ratchet += 1 n = FlakyRatchettingMachine.ratchet return lists(integers(), min_size=n, max_size=n) def execute_step(self, step): assert False class MachineWithConsumingRule(RuleBasedStateMachine): b1 = Bundle("b1") b2 = Bundle("b2") def __init__(self): self.created_counter = 0 self.consumed_counter = 0 super(MachineWithConsumingRule, self).__init__() @invariant() def bundle_length(self): assert len(self.bundle("b1")) == self.created_counter - self.consumed_counter @rule(target=b1) def populate_b1(self): self.created_counter += 1 return self.created_counter @rule(target=b2, consumed=consumes(b1)) def depopulate_b1(self, consumed): self.consumed_counter += 1 return consumed @rule(consumed=lists(consumes(b1))) def depopulate_b1_multiple(self, consumed): self.consumed_counter += len(consumed) @rule(value1=b1, value2=b2) def check(self, value1, value2): assert value1 != value2 TestMachineWithConsumingRule = MachineWithConsumingRule.TestCase def test_multiple(): none = multiple() some = multiple(1, 2.01, "3", b"4", 5) assert len(none.values) == 0 and len(some.values) == 5 assert all(value in some.values for value in (1, 2.01, "3", b"4", 5)) class MachineUsingMultiple(RuleBasedStateMachine): b = Bundle("b") def __init__(self): self.expected_bundle_length = 0 super(MachineUsingMultiple, self).__init__() @invariant() def bundle_length(self): assert len(self.bundle("b")) == self.expected_bundle_length @rule(target=b, items=lists(elements=integers(), max_size=10)) def populate_bundle(self, items): self.expected_bundle_length += len(items) return multiple(*items) @rule(target=b) def do_not_populate(self): return multiple() TestMachineUsingMultiple = MachineUsingMultiple.TestCase def test_consumes_typecheck(): with pytest.raises(TypeError): consumes(integers()) @checks_deprecated_behaviour def test_ratchetting_raises_flaky(): with raises(Flaky): FlakyRatchettingMachine.TestCase().runTest() def test_empty_machine_is_invalid(): class EmptyMachine(RuleBasedStateMachine): pass with raises(InvalidDefinition): EmptyMachine.TestCase().runTest() def test_machine_with_no_terminals_is_invalid(): class NonTerminalMachine(RuleBasedStateMachine): @rule(value=Bundle(u"hi")) def bye(self, hi): pass with raises(InvalidDefinition): NonTerminalMachine.TestCase().runTest() class DynamicMachine(RuleBasedStateMachine): @rule(value=Bundle(u"hi")) def test_stuff(x): pass DynamicMachine.define_rule(targets=(), function=lambda self: 1, arguments={}) class IntAdder(RuleBasedStateMachine): pass IntAdder.define_rule( targets=(u"ints",), function=lambda self, x: x, arguments={u"x": integers()} ) IntAdder.define_rule( targets=(u"ints",), function=lambda self, x, y: x, arguments={u"x": integers(), u"y": Bundle(u"ints")}, ) TestDynamicMachine = DynamicMachine.TestCase TestIntAdder = IntAdder.TestCase TestPrecondition = PreconditionMachine.TestCase for test_case in (TestDynamicMachine, TestIntAdder, TestPrecondition): test_case.settings = Settings(test_case.settings, max_examples=10) def test_picks_up_settings_at_first_use_of_testcase(): assert TestDynamicMachine.settings.max_examples == 10 def test_new_rules_are_picked_up_before_and_after_rules_call(): class Foo(RuleBasedStateMachine): pass Foo.define_rule(targets=(), function=lambda self: 1, arguments={}) assert len(Foo.rules()) == 1 Foo.define_rule(targets=(), function=lambda self: 2, arguments={}) assert len(Foo.rules()) == 2 @checks_deprecated_behaviour def test_minimizes_errors_in_teardown(): counter = [0] class Foo(GenericStateMachine): def __init__(self): counter[0] = 0 def steps(self): return tuples() def execute_step(self, value): counter[0] += 1 def teardown(self): assert not counter[0] with raises(AssertionError): run_state_machine_as_test(Foo) assert counter[0] == 1 class RequiresInit(GenericStateMachine): def __init__(self, threshold): super(RequiresInit, self).__init__() self.threshold = threshold def steps(self): return integers() def execute_step(self, value): if value > self.threshold: raise ValueError(u"%d is too high" % (value,)) @checks_deprecated_behaviour def test_can_use_factory_for_tests(): with raises(ValueError): run_state_machine_as_test(lambda: RequiresInit(42)) @Settings(stateful_step_count=5) class FailsEventually(GenericStateMachine): def __init__(self): super(FailsEventually, self).__init__() self.counter = 0 def steps(self): return none() def execute_step(self, _): self.counter += 1 assert self.counter < 10 @checks_deprecated_behaviour def test_can_explicitly_pass_settings(): run_state_machine_as_test(FailsEventually) try: FailsEventually.TestCase.settings = Settings( FailsEventually.TestCase.settings, stateful_step_count=15 ) run_state_machine_as_test( FailsEventually, settings=Settings(stateful_step_count=2) ) finally: FailsEventually.TestCase.settings = Settings( FailsEventually.TestCase.settings, stateful_step_count=5 ) def test_settings_argument_is_validated(): with pytest.raises(InvalidArgument): run_state_machine_as_test(FailsEventually, settings=object()) def test_runner_that_checks_factory_produced_a_machine(): with pytest.raises(InvalidArgument): run_state_machine_as_test(object) def test_settings_attribute_is_validated(): real_settings = FailsEventually.TestCase.settings try: FailsEventually.TestCase.settings = object() with pytest.raises(InvalidArgument): run_state_machine_as_test(FailsEventually) finally: FailsEventually.TestCase.settings = real_settings @checks_deprecated_behaviour def test_saves_failing_example_in_database(): db = ExampleDatabase(":memory:") with raises(AssertionError): run_state_machine_as_test(SetStateMachine, Settings(database=db)) assert any(list(db.data.values())) @checks_deprecated_behaviour def test_can_run_with_no_db(): with raises(AssertionError): run_state_machine_as_test(SetStateMachine, Settings(database=None)) def test_stateful_double_rule_is_forbidden(recwarn): with pytest.raises(InvalidDefinition): class DoubleRuleMachine(RuleBasedStateMachine): @rule(num=just(1)) @rule(num=just(2)) def whatevs(self, num): pass def test_can_explicitly_call_functions_when_precondition_not_satisfied(): class BadPrecondition(RuleBasedStateMachine): def __init__(self): super(BadPrecondition, self).__init__() @precondition(lambda self: False) @rule() def test_blah(self): raise ValueError() @rule() def test_foo(self): self.test_blah() with pytest.raises(ValueError): run_state_machine_as_test(BadPrecondition) def test_invariant(): """If an invariant raise an exception, the exception is propagated.""" class Invariant(RuleBasedStateMachine): def __init__(self): super(Invariant, self).__init__() @invariant() def test_blah(self): raise ValueError() @rule() def do_stuff(self): pass with pytest.raises(ValueError): run_state_machine_as_test(Invariant) def test_no_double_invariant(): """The invariant decorator can't be applied multiple times to a single function.""" with raises(InvalidDefinition): class Invariant(RuleBasedStateMachine): def __init__(self): super(Invariant, self).__init__() @invariant() @invariant() def test_blah(self): pass @rule() def do_stuff(self): pass def test_invariant_precondition(): """If an invariant precodition isn't met, the invariant isn't run. The precondition decorator can be applied in any order. """ class Invariant(RuleBasedStateMachine): def __init__(self): super(Invariant, self).__init__() @invariant() @precondition(lambda _: False) def an_invariant(self): raise ValueError() @precondition(lambda _: False) @invariant() def another_invariant(self): raise ValueError() @rule() def do_stuff(self): pass run_state_machine_as_test(Invariant) def test_multiple_invariants(): """If multiple invariants are present, they all get run.""" class Invariant(RuleBasedStateMachine): def __init__(self): super(Invariant, self).__init__() self.first_invariant_ran = False @invariant() def invariant_1(self): self.first_invariant_ran = True @precondition(lambda self: self.first_invariant_ran) @invariant() def invariant_2(self): raise ValueError() @rule() def do_stuff(self): pass with pytest.raises(ValueError): run_state_machine_as_test(Invariant) def test_explicit_invariant_call_with_precondition(): """Invariants can be called explicitly even if their precondition is not satisfied.""" class BadPrecondition(RuleBasedStateMachine): def __init__(self): super(BadPrecondition, self).__init__() @precondition(lambda self: False) @invariant() def test_blah(self): raise ValueError() @rule() def test_foo(self): self.test_blah() with pytest.raises(ValueError): run_state_machine_as_test(BadPrecondition) def test_invariant_checks_initial_state(): """Invariants are checked before any rules run.""" class BadPrecondition(RuleBasedStateMachine): def __init__(self): super(BadPrecondition, self).__init__() self.num = 0 @invariant() def test_blah(self): if self.num == 0: raise ValueError() @rule() def test_foo(self): self.num += 1 with pytest.raises(ValueError): run_state_machine_as_test(BadPrecondition) def test_always_runs_at_least_one_step(): class CountSteps(RuleBasedStateMachine): def __init__(self): super(CountSteps, self).__init__() self.count = 0 @rule() def do_something(self): self.count += 1 def teardown(self): assert self.count > 0 run_state_machine_as_test(CountSteps) def test_removes_needless_steps(): """Regression test from an example based on tests/nocover/test_database_agreement.py, but without the expensive bits. Comparing two database implementations in which deletion is broken, so as soon as a key/value pair is successfully deleted the test will now fail if you ever check that key. The main interesting feature of this is that it has a lot of opportunities to generate keys and values before it actually fails, but will still fail with very high probability. """ @Settings(derandomize=True) class IncorrectDeletion(RuleBasedStateMachine): def __init__(self): super(IncorrectDeletion, self).__init__() self.__saved = defaultdict(set) self.__deleted = defaultdict(set) keys = Bundle("keys") values = Bundle("values") @rule(target=keys, k=binary()) def k(self, k): return k @rule(target=values, v=binary()) def v(self, v): return v @rule(k=keys, v=values) def save(self, k, v): self.__saved[k].add(v) @rule(k=keys, v=values) def delete(self, k, v): if v in self.__saved[k]: self.__deleted[k].add(v) @rule(k=keys) def values_agree(self, k): assert not self.__deleted[k] with capture_out() as o: with pytest.raises(AssertionError): run_state_machine_as_test(IncorrectDeletion) assert o.getvalue().count(" = state.k(") == 1 assert o.getvalue().count(" = state.v(") == 1 def test_prints_equal_values_with_correct_variable_name(): class MovesBetweenBundles(RuleBasedStateMachine): b1 = Bundle("b1") b2 = Bundle("b2") @rule(target=b1) def create(self): return [] @rule(target=b2, source=b1) def transfer(self, source): return source @rule(source=b2) def fail(self, source): assert False with capture_out() as o: with pytest.raises(AssertionError): run_state_machine_as_test(MovesBetweenBundles) result = o.getvalue() for m in ["create", "transfer", "fail"]: assert result.count("state." + m) == 1 assert "v1 = state.create()" in result assert "v2 = state.transfer(source=v1)" in result assert "state.fail(source=v2)" in result def test_initialize_rule(): class WithInitializeRules(RuleBasedStateMachine): initialized = [] @initialize() def initialize_a(self): self.initialized.append("a") @initialize() def initialize_b(self): self.initialized.append("b") @initialize() def initialize_c(self): self.initialized.append("c") @rule() def fail_fast(self): assert False with capture_out() as o: with pytest.raises(AssertionError): run_state_machine_as_test(WithInitializeRules) assert set(WithInitializeRules.initialized[-3:]) == {"a", "b", "c"} result = o.getvalue().splitlines()[1:] assert result[0] == "state = WithInitializeRules()" # Initialize rules call order is shuffled assert {result[1], result[2], result[3]} == { "state.initialize_a()", "state.initialize_b()", "state.initialize_c()", } assert result[4] == "state.fail_fast()" assert result[5] == "state.teardown()" def test_initialize_rule_populate_bundle(): class WithInitializeBundleRules(RuleBasedStateMachine): a = Bundle("a") @initialize(target=a, dep=just("dep")) def initialize_a(self, dep): return "a v1 with (%s)" % dep @rule(param=a) def fail_fast(self, param): assert False WithInitializeBundleRules.TestCase.settings = NO_BLOB_SETTINGS with capture_out() as o: with pytest.raises(AssertionError): run_state_machine_as_test(WithInitializeBundleRules) result = o.getvalue() assert ( result == """\ Falsifying example: run_state_machine(\ factory=WithInitializeBundleRules, data=data(...)) state = WithInitializeBundleRules() v1 = state.initialize_a(dep='dep') state.fail_fast(param=v1) state.teardown() """ ) def test_initialize_rule_dont_mix_with_precondition(): with pytest.raises(InvalidDefinition) as exc: class BadStateMachine(RuleBasedStateMachine): @precondition(lambda self: True) @initialize() def initialize(self): pass assert "An initialization rule cannot have a precondition." in str(exc.value) # Also test decorator application in reverse order with pytest.raises(InvalidDefinition) as exc: class BadStateMachineReverseOrder(RuleBasedStateMachine): @initialize() @precondition(lambda self: True) def initialize(self): pass assert "An initialization rule cannot have a precondition." in str(exc.value) def test_initialize_rule_dont_mix_with_regular_rule(): with pytest.raises(InvalidDefinition) as exc: class BadStateMachine(RuleBasedStateMachine): @rule() @initialize() def initialize(self): pass assert "A function cannot be used for two distinct rules." in str(exc.value) def test_initialize_rule_cannot_be_double_applied(): with pytest.raises(InvalidDefinition) as exc: class BadStateMachine(RuleBasedStateMachine): @initialize() @initialize() def initialize(self): pass assert "A function cannot be used for two distinct rules." in str(exc.value) def test_initialize_rule_in_state_machine_with_inheritance(): class ParentStateMachine(RuleBasedStateMachine): initialized = [] @initialize() def initialize_a(self): self.initialized.append("a") class ChildStateMachine(ParentStateMachine): @initialize() def initialize_b(self): self.initialized.append("b") @rule() def fail_fast(self): assert False with capture_out() as o: with pytest.raises(AssertionError): run_state_machine_as_test(ChildStateMachine) assert set(ChildStateMachine.initialized[-2:]) == {"a", "b"} result = o.getvalue().splitlines()[1:] assert result[0] == "state = ChildStateMachine()" # Initialize rules call order is shuffled assert {result[1], result[2]} == {"state.initialize_a()", "state.initialize_b()"} assert result[3] == "state.fail_fast()" assert result[4] == "state.teardown()" def test_can_manually_call_initialize_rule(): class StateMachine(RuleBasedStateMachine): initialize_called_counter = 0 @initialize() def initialize(self): self.initialize_called_counter += 1 return self.initialize_called_counter @rule() def fail_eventually(self): assert self.initialize() <= 2 StateMachine.TestCase.settings = NO_BLOB_SETTINGS with capture_out() as o: with pytest.raises(AssertionError): run_state_machine_as_test(StateMachine) result = o.getvalue() assert ( result == """\ Falsifying example: run_state_machine(factory=StateMachine, data=data(...)) state = StateMachine() state.initialize() state.fail_eventually() state.fail_eventually() state.teardown() """ ) def test_new_initialize_rules_are_picked_up_before_and_after_rules_call(): class Foo(RuleBasedStateMachine): pass Foo.define_initialize_rule(targets=(), function=lambda self: 1, arguments={}) assert len(Foo.initialize_rules()) == 1 Foo.define_initialize_rule(targets=(), function=lambda self: 2, arguments={}) assert len(Foo.initialize_rules()) == 2 def test_steps_printed_despite_pytest_fail(capsys): # Test for https://github.com/HypothesisWorks/hypothesis/issues/1372 class RaisesProblem(RuleBasedStateMachine): @rule() def oops(self): pytest.fail() with pytest.raises(Failed): run_state_machine_as_test(RaisesProblem) out, _ = capsys.readouterr() assert ( """\ Falsifying example: run_state_machine(factory=RaisesProblem, data=data(...)) state = RaisesProblem() state.oops() state.teardown() """ in out ) def test_steps_not_printed_with_pytest_skip(capsys): class RaisesProblem(RuleBasedStateMachine): @rule() def skip_whole_test(self): pytest.skip() with pytest.raises(Skipped): run_state_machine_as_test(RaisesProblem) out, _ = capsys.readouterr() assert "state" not in out @checks_deprecated_behaviour def test_rule_deprecation_targets_and_target(): k, v = Bundle("k"), Bundle("v") rule(targets=(k,), target=v) @checks_deprecated_behaviour def test_rule_deprecation_bundle_by_name(): Bundle("k") rule(target="k") def test_rule_non_bundle_target(): with pytest.raises(InvalidArgument): rule(target=integers()) def test_rule_non_bundle_target_oneof(): k, v = Bundle("k"), Bundle("v") pattern = r".+ `one_of(a, b)` or `a | b` .+" with pytest.raises(InvalidArgument, match=pattern): rule(target=k | v) def test_uses_seed(capsys): @seed(0) class TrivialMachine(RuleBasedStateMachine): @rule() def oops(self): assert False with pytest.raises(AssertionError): run_state_machine_as_test(TrivialMachine) out, _ = capsys.readouterr() assert "@seed" not in out def test_reproduce_failure_works(): @reproduce_failure(__version__, base64.b64encode(b"\0\0\0")) class TrivialMachine(RuleBasedStateMachine): @rule() def oops(self): assert False with pytest.raises(AssertionError): run_state_machine_as_test(TrivialMachine) def test_reproduce_failure_fails_if_no_error(): @reproduce_failure(__version__, base64.b64encode(b"\0\0\0")) class TrivialMachine(RuleBasedStateMachine): @rule() def ok(self): assert True with pytest.raises(DidNotReproduce): run_state_machine_as_test(TrivialMachine) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_statistical_events.py000066400000000000000000000124201354103617500330440ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import re import time import traceback import pytest from hypothesis import ( HealthCheck, assume, event, example, given, settings, strategies as st, ) from hypothesis.internal.conjecture.data import Status from hypothesis.internal.conjecture.engine import ConjectureRunner, ExitReason from hypothesis.statistics import Statistics, collector def call_for_statistics(test_function): result = [None] def callback(statistics): result[0] = statistics with collector.with_value(callback): try: test_function() except Exception: traceback.print_exc() assert result[0] is not None return result[0] def test_notes_hard_to_satisfy(): @given(st.integers()) @settings(suppress_health_check=HealthCheck.all()) def test(i): assume(i == 0) stats = call_for_statistics(test) assert "satisfied assumptions" in stats.exit_reason def test_can_callback_with_a_string(): @given(st.integers()) def test(i): event("hi") stats = call_for_statistics(test) assert any("hi" in s for s in stats.events) counter = 0 seen = [] class Foo(object): def __eq__(self, other): return True def __ne__(self, other): return False def __hash__(self): return 0 def __str__(self): seen.append(self) global counter counter += 1 return "COUNTER %d" % (counter,) def test_formats_are_evaluated_only_once(): global counter counter = 0 @given(st.integers()) def test(i): event(Foo()) stats = call_for_statistics(test) assert any("COUNTER 1" in s for s in stats.events) assert not any("COUNTER 2" in s for s in stats.events) def test_does_not_report_on_examples(): @example("hi") @given(st.integers()) def test(i): if isinstance(i, str): event("boo") stats = call_for_statistics(test) assert not any("boo" in e for e in stats.events) def test_exact_timing(): @settings(suppress_health_check=[HealthCheck.too_slow], deadline=None) @given(st.integers()) def test(i): time.sleep(0.5) stats = call_for_statistics(test) assert re.match(r"~ 5\d\dms", stats.runtimes) def test_apparently_instantaneous_tests(): time.freeze() @given(st.integers()) def test(i): pass stats = call_for_statistics(test) assert stats.runtimes == "< 1ms" def test_flaky_exit(): first = [True] @settings(derandomize=True) @given(st.integers()) def test(i): if i > 1001: if first[0]: first[0] = False print("Hi") assert False stats = call_for_statistics(test) assert stats.exit_reason == "test was flaky" @pytest.mark.parametrize("draw_delay", [False, True]) @pytest.mark.parametrize("test_delay", [False, True]) def test_draw_time_percentage(draw_delay, test_delay): time.freeze() @st.composite def s(draw): if draw_delay: time.sleep(0.05) @given(s()) def test(_): if test_delay: time.sleep(0.05) stats = call_for_statistics(test) if not draw_delay: assert stats.draw_time_percentage == "~ 0%" elif test_delay: assert stats.draw_time_percentage == "~ 50%" else: assert stats.draw_time_percentage == "~ 100%" def test_has_lambdas_in_output(): @given(st.integers().filter(lambda x: x % 2 == 0)) def test(i): pass stats = call_for_statistics(test) assert any("lambda x: x % 2 == 0" in e for e in stats.events) def test_stops_after_x_shrinks(monkeypatch): # the max_shrinks argument is deprecated, but we still stop after some # number - which we can reduce to zero to check that this works. from hypothesis.internal.conjecture import engine monkeypatch.setattr(engine, "MAX_SHRINKS", 0) @given(st.integers()) def test(n): assert n < 100 stats = call_for_statistics(test) assert "shrunk example" in stats.exit_reason @pytest.mark.parametrize("drawtime,runtime", [(1, 0), (-1, 0), (0, -1), (-1, -1)]) def test_weird_drawtime_issues(drawtime, runtime): # Regression test for #1346, where we don't have the expected relationship # 0<=drawtime<= runtime due to changing clocks or floating-point issues. engine = ConjectureRunner(lambda: None) engine.exit_reason = ExitReason.finished engine.status_runtimes[Status.VALID] = [0] engine.all_drawtimes.append(drawtime) engine.all_runtimes.extend([0, runtime]) stats = Statistics(engine) assert stats.draw_time_percentage == "NaN" hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_testdecorators.py000066400000000000000000000254131354103617500322070ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import functools import threading from collections import namedtuple import hypothesis.reporting as reporting from hypothesis import HealthCheck, Verbosity, assume, given, note, settings from hypothesis.internal.conjecture.engine import MIN_TEST_CALLS from hypothesis.strategies import ( binary, booleans, builds, data, floats, frozensets, integers, just, lists, one_of, sampled_from, sets, text, ) from tests.common.utils import capture_out, fails, fails_with, no_shrink, raises @given(integers(), integers()) def test_int_addition_is_commutative(x, y): assert x + y == y + x @fails @given(text(), text()) def test_str_addition_is_commutative(x, y): assert x + y == y + x @fails @given(binary(), binary()) def test_bytes_addition_is_commutative(x, y): assert x + y == y + x @given(integers(), integers(), integers()) def test_int_addition_is_associative(x, y, z): assert x + (y + z) == (x + y) + z @fails @given(floats(), floats(), floats()) @settings(max_examples=2000) def test_float_addition_is_associative(x, y, z): assert x + (y + z) == (x + y) + z @given(lists(integers())) def test_reversing_preserves_integer_addition(xs): assert sum(xs) == sum(reversed(xs)) def test_still_minimizes_on_non_assertion_failures(): @settings(max_examples=50) @given(integers()) def is_not_too_large(x): if x >= 10: raise ValueError("No, %s is just too large. Sorry" % x) with raises(ValueError) as exinfo: is_not_too_large() assert " 10 " in exinfo.value.args[0] @given(integers()) def test_integer_division_shrinks_positive_integers(n): assume(n > 0) assert n / 2 < n class TestCases(object): @given(integers()) def test_abs_non_negative(self, x): assert abs(x) >= 0 assert isinstance(self, TestCases) @given(x=integers()) def test_abs_non_negative_varargs(self, x, *args): assert abs(x) >= 0 assert isinstance(self, TestCases) @given(x=integers()) def test_abs_non_negative_varargs_kwargs(self, *args, **kw): assert abs(kw["x"]) >= 0 assert isinstance(self, TestCases) @given(x=integers()) def test_abs_non_negative_varargs_kwargs_only(*args, **kw): assert abs(kw["x"]) >= 0 assert isinstance(args[0], TestCases) @fails @given(integers()) def test_int_is_always_negative(self, x): assert x < 0 @fails @given(floats(), floats()) def test_float_addition_cancels(self, x, y): assert x + (y - x) == y @fails @given(x=integers(min_value=0, max_value=3), name=text()) def test_can_be_given_keyword_args(x, name): assume(x > 0) assert len(name) < x @fails @given(one_of(floats(), booleans()), one_of(floats(), booleans())) def test_one_of_produces_different_values(x, y): assert type(x) == type(y) @given(just(42)) def test_is_the_answer(x): assert x == 42 @fails @given(text(), text()) def test_text_addition_is_not_commutative(x, y): assert x + y == y + x @fails @given(binary(), binary()) def test_binary_addition_is_not_commutative(x, y): assert x + y == y + x @given(integers(1, 10)) def test_integers_are_in_range(x): assert 1 <= x <= 10 @given(integers(min_value=100)) def test_integers_from_are_from(x): assert x >= 100 def test_does_not_catch_interrupt_during_falsify(): calls = [0] @given(integers()) def flaky_base_exception(x): if not calls[0]: calls[0] = 1 raise KeyboardInterrupt() with raises(KeyboardInterrupt): flaky_base_exception() @given(lists(integers(), unique=True), integers()) def test_removing_an_element_from_a_unique_list(xs, y): assume(len(set(xs)) == len(xs)) try: xs.remove(y) except ValueError: pass assert y not in xs @fails @given(lists(integers(), min_size=2), data()) def test_removing_an_element_from_a_non_unique_list(xs, data): y = data.draw(sampled_from(xs)) xs.remove(y) assert y not in xs @given(sets(sampled_from(list(range(10))))) def test_can_test_sets_sampled_from(xs): assert all(isinstance(x, int) for x in xs) assert all(0 <= x < 10 for x in xs) mix = one_of(sampled_from([1, 2, 3]), text()) @fails @given(mix, mix) def test_can_mix_sampling_with_generating(x, y): assert type(x) == type(y) @fails @given(frozensets(integers())) def test_can_find_large_sum_frozenset(xs): assert sum(xs) < 100 def test_prints_on_failure_by_default(): @given(integers(), integers()) @settings(max_examples=100) def test_ints_are_sorted(balthazar, evans): assume(evans >= 0) assert balthazar <= evans with raises(AssertionError): with capture_out() as out: with reporting.with_reporter(reporting.default): test_ints_are_sorted() out = out.getvalue() lines = [l.strip() for l in out.split("\n")] assert "Falsifying example: test_ints_are_sorted(balthazar=1, evans=0)" in lines def test_does_not_print_on_success(): @settings(verbosity=Verbosity.normal) @given(integers()) def test_is_an_int(x): return with capture_out() as out: test_is_an_int() out = out.getvalue() lines = [l.strip() for l in out.split(u"\n")] assert all(not l for l in lines), lines @given(sampled_from([1])) def test_can_sample_from_single_element(x): assert x == 1 @fails @given(lists(integers())) def test_list_is_sorted(xs): assert sorted(xs) == xs @fails @given(floats(1.0, 2.0)) def test_is_an_endpoint(x): assert x == 1.0 or x == 2.0 def test_breaks_bounds(): @fails @given(x=integers()) def test_is_bounded(t, x): assert x < t for t in [1, 10, 100, 1000]: test_is_bounded(t) @given(x=booleans()) def test_can_test_kwargs_only_methods(**kwargs): assert isinstance(kwargs["x"], bool) @fails_with(UnicodeEncodeError) @given(text()) @settings(max_examples=100) def test_is_ascii(x): x.encode("ascii") @fails @given(text()) def test_is_not_ascii(x): try: x.encode("ascii") assert False except UnicodeEncodeError: pass @fails @given(text()) def test_can_find_string_with_duplicates(s): assert len(set(s)) == len(s) @fails @given(text()) def test_has_ascii(x): if not x: return ascii_characters = ( u"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ \t\n" ) assert any(c in ascii_characters for c in x) def test_can_derandomize(): values = [] @fails @given(integers()) @settings(derandomize=True, database=None) def test_blah(x): values.append(x) assert x > 0 test_blah() assert values v1 = values values = [] test_blah() assert v1 == values def test_can_run_without_database(): @given(integers()) @settings(database=None) def test_blah(x): assert False with raises(AssertionError): test_blah() def test_can_run_with_database_in_thread(): results = [] @given(integers()) def test_blah(x): raise ValueError() def run_test(): try: test_blah() except ValueError: results.append("success") # Run once in the main thread and once in another thread. Execution is # strictly serial, so no need for locking. run_test() assert results == ["success"] thread = threading.Thread(target=run_test) thread.start() thread.join() assert results == ["success", "success"] @given(integers()) def test_can_call_an_argument_f(f): # See issue https://github.com/HypothesisWorks/hypothesis-python/issues/38 # for details pass Litter = namedtuple("Litter", ("kitten1", "kitten2")) @given(builds(Litter, integers(), integers())) def test_named_tuples_are_of_right_type(litter): assert isinstance(litter, Litter) @fails_with(AttributeError) @given(integers().map(lambda x: x.nope)) @settings(suppress_health_check=HealthCheck.all()) def test_fails_in_reify(x): pass @given(text(u"a")) def test_a_text(x): assert set(x).issubset(set(u"a")) @given(text(u"")) def test_empty_text(x): assert not x @given(text(u"abcdefg")) def test_mixed_text(x): assert set(x).issubset(set(u"abcdefg")) def test_when_set_to_no_simplifies_runs_failing_example_twice(): failing = [0] @given(integers()) @settings(phases=no_shrink, max_examples=100, verbosity=Verbosity.normal) def foo(x): if x > 11: note("Lo") failing[0] += 1 assert False with raises(AssertionError): with capture_out() as out: foo() assert failing[0] <= MIN_TEST_CALLS + 2 assert "Falsifying example" in out.getvalue() assert "Lo" in out.getvalue() @given(integers().filter(lambda x: x % 4 == 0)) def test_filtered_values_satisfy_condition(i): assert i % 4 == 0 def nameless_const(x): def f(u, v): return u return functools.partial(f, x) @given(sets(booleans()).map(nameless_const(2))) def test_can_map_nameless(x): assert x == 2 @given(integers(0, 10).flatmap(nameless_const(just(3)))) def test_can_flatmap_nameless(x): assert x == 3 def test_can_be_used_with_none_module(): def test_is_cool(i): pass test_is_cool.__module__ = None test_is_cool = given(integers())(test_is_cool) test_is_cool() def test_does_not_print_notes_if_all_succeed(): @given(integers()) @settings(verbosity=Verbosity.normal) def test(i): note("Hi there") with capture_out() as out: with reporting.with_reporter(reporting.default): test() assert not out.getvalue() def test_prints_notes_once_on_failure(): @given(lists(integers())) @settings(database=None, verbosity=Verbosity.normal) def test(xs): note("Hi there") if sum(xs) <= 100: raise ValueError() with capture_out() as out: with reporting.with_reporter(reporting.default): with raises(ValueError): test() lines = out.getvalue().strip().splitlines() assert lines.count("Hi there") == 1 @given(lists(integers(), max_size=0)) def test_empty_lists(xs): assert xs == [] hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_type_lookup.py000066400000000000000000000155011354103617500315110ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import enum import pytest import hypothesis.strategies as st from hypothesis import given, infer from hypothesis._strategies import _strategies from hypothesis.errors import ( HypothesisDeprecationWarning, InvalidArgument, ResolutionFailed, ) from hypothesis.internal.compat import PY2, integer_types from hypothesis.searchstrategy import types from hypothesis.searchstrategy.types import _global_type_lookup from tests.common.utils import checks_deprecated_behaviour # Build a set of all types output by core strategies blacklist = [ "builds", "iterables", "permutations", "random_module", "randoms", "runner", "sampled_from", ] types_with_core_strat = set(integer_types) for thing in ( getattr(st, name) for name in sorted(_strategies) if name in dir(st) and name not in blacklist ): for n in range(3): try: ex = thing(*([st.nothing()] * n)).example() types_with_core_strat.add(type(ex)) break except (TypeError, InvalidArgument, HypothesisDeprecationWarning): continue @pytest.mark.parametrize("typ", sorted(types_with_core_strat, key=str)) def test_resolve_core_strategies(typ): @given(st.from_type(typ)) def inner(ex): if PY2 and issubclass(typ, integer_types): assert isinstance(ex, integer_types) else: assert isinstance(ex, typ) inner() def test_lookup_knows_about_all_core_strategies(): cannot_lookup = types_with_core_strat - set(types._global_type_lookup) assert not cannot_lookup def test_lookup_keys_are_types(): with pytest.raises(InvalidArgument): st.register_type_strategy("int", st.integers()) assert "int" not in types._global_type_lookup def test_lookup_values_are_strategies(): with pytest.raises(InvalidArgument): st.register_type_strategy(int, 42) assert 42 not in types._global_type_lookup.values() @pytest.mark.parametrize("typ", sorted(types_with_core_strat, key=str)) def test_lookup_overrides_defaults(typ): sentinel = object() try: strat = types._global_type_lookup[typ] st.register_type_strategy(typ, st.just(sentinel)) assert st.from_type(typ).example() is sentinel finally: st.register_type_strategy(typ, strat) st.from_type.__clear_cache() assert st.from_type(typ).example() is not sentinel class ParentUnknownType(object): pass def test_can_resolve_trivial_types(): # Under Python 2, this inherits a special wrapper_descriptor slots # thing from object.__init__, which chokes inspect.getargspec. # from_type should and does work anyway; see issues #1655 and #1656. st.from_type(ParentUnknownType).example() class UnknownType(ParentUnknownType): def __init__(self, arg): pass def test_custom_type_resolution_fails_without_registering(): fails = st.from_type(UnknownType) with pytest.raises(ResolutionFailed): fails.example() def test_custom_type_resolution(): sentinel = object() try: st.register_type_strategy(UnknownType, st.just(sentinel)) assert st.from_type(UnknownType).example() is sentinel # Also covered by registration of child class assert st.from_type(ParentUnknownType).example() is sentinel finally: types._global_type_lookup.pop(UnknownType) st.from_type.__clear_cache() assert UnknownType not in types._global_type_lookup def test_custom_type_resolution_with_function(): sentinel = object() try: st.register_type_strategy(UnknownType, lambda _: st.just(sentinel)) assert st.from_type(UnknownType).example() is sentinel assert st.from_type(ParentUnknownType).example() is sentinel finally: types._global_type_lookup.pop(UnknownType) st.from_type.__clear_cache() def test_custom_type_resolution_with_function_non_strategy(): try: st.register_type_strategy(UnknownType, lambda _: None) with pytest.raises(ResolutionFailed): st.from_type(UnknownType).example() with pytest.raises(ResolutionFailed): st.from_type(ParentUnknownType).example() finally: types._global_type_lookup.pop(UnknownType) def test_errors_if_generic_resolves_empty(): try: st.register_type_strategy(UnknownType, lambda _: st.nothing()) fails_1 = st.from_type(UnknownType) with pytest.raises(ResolutionFailed): fails_1.example() fails_2 = st.from_type(ParentUnknownType) with pytest.raises(ResolutionFailed): fails_2.example() finally: types._global_type_lookup.pop(UnknownType) st.from_type.__clear_cache() def test_cannot_register_empty(): # Cannot register and did not register with pytest.raises(InvalidArgument): st.register_type_strategy(UnknownType, st.nothing()) fails = st.from_type(UnknownType) with pytest.raises(ResolutionFailed): fails.example() assert UnknownType not in types._global_type_lookup def test_pulic_interface_works(): st.from_type(int).example() fails = st.from_type("not a type or annotated function") with pytest.raises(InvalidArgument): fails.example() def test_given_can_infer_on_py2(): # Editing annotations before decorating is hilariously awkward, but works! def inner(a): pass inner.__annotations__ = {"a": int} given(a=infer)(inner)() class EmptyEnum(enum.Enum): pass @checks_deprecated_behaviour def test_error_if_enum_is_empty(): assert st.from_type(EmptyEnum).is_empty class BrokenClass(object): __init__ = "Hello!" def test_uninspectable_builds(): with pytest.raises(TypeError, match="object is not callable"): st.builds(BrokenClass).example() def test_uninspectable_from_type(): with pytest.raises(TypeError, match="object is not callable"): st.from_type(BrokenClass).example() @pytest.mark.parametrize( "typ", sorted([x for x in _global_type_lookup if x.__module__ != "typing"], key=str) ) @given(data=st.data()) def test_can_generate_from_all_registered_types(data, typ): value = data.draw(st.from_type(typ), label="value") assert isinstance(value, typ) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_unittest.py000066400000000000000000000034601354103617500310170ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import io import unittest import pytest from hypothesis import given, strategies as st from hypothesis.errors import FailedHealthCheck, HypothesisWarning from hypothesis.internal.compat import PY2 from tests.common.utils import fails_with class Thing_with_a_subThing(unittest.TestCase): """Example test case using subTest for the actual test below.""" @given(st.tuples(st.booleans(), st.booleans())) def thing(self, lst): for i, b in enumerate(lst): with pytest.warns(HypothesisWarning): with self.subTest((i, b)): self.assertTrue(b) def test_subTest(): suite = unittest.TestSuite() suite.addTest(Thing_with_a_subThing("thing")) stream = io.BytesIO() if PY2 else io.StringIO() out = unittest.TextTestRunner(stream=stream).run(suite) assert len(out.failures) <= out.testsRun, out class test_given_on_setUp_fails_health_check(unittest.TestCase): @fails_with(FailedHealthCheck) @given(st.integers()) def setUp(self, i): pass def test(self): """Provide something to set up for, so the setUp method is called.""" hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_validation.py000066400000000000000000000144451354103617500312770ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import functools import pytest from hypothesis import find, given from hypothesis.errors import InvalidArgument from hypothesis.internal.validation import check_type from hypothesis.strategies import ( binary, booleans, dictionaries, floats, frozensets, integers, lists, recursive, sets, text, ) from tests.common.utils import checks_deprecated_behaviour, fails_with @checks_deprecated_behaviour def test_min_size_none_behavior(): lists(integers(), min_size=None).example() @checks_deprecated_behaviour @given(lists(integers(), min_size=5.0)) def test_min_size_float_behaviour(arr): assert len(arr) >= 5 @checks_deprecated_behaviour @given(lists(integers(), max_size=5.0)) def test_max_size_float_behaviour(arr): assert len(arr) <= 5 def test_errors_when_given_varargs(): @given(integers()) def has_varargs(*args): pass with pytest.raises(InvalidArgument) as e: has_varargs() assert u"varargs" in e.value.args[0] def test_varargs_without_positional_arguments_allowed(): @given(somearg=integers()) def has_varargs(somearg, *args): pass def test_errors_when_given_varargs_and_kwargs_with_positional_arguments(): @given(integers()) def has_varargs(*args, **kw): pass with pytest.raises(InvalidArgument) as e: has_varargs() assert u"varargs" in e.value.args[0] def test_varargs_and_kwargs_without_positional_arguments_allowed(): @given(somearg=integers()) def has_varargs(*args, **kw): pass def test_bare_given_errors(): @given() def test(): pass with pytest.raises(InvalidArgument): test() def test_errors_on_unwanted_kwargs(): @given(hello=int, world=int) def greet(world): pass with pytest.raises(InvalidArgument): greet() def test_errors_on_too_many_positional_args(): @given(integers(), int, int) def foo(x, y): pass with pytest.raises(InvalidArgument): foo() def test_errors_on_any_varargs(): @given(integers()) def oops(*args): pass with pytest.raises(InvalidArgument): oops() def test_can_put_arguments_in_the_middle(): @given(y=integers()) def foo(x, y, z): pass foo(1, 2) def test_float_ranges(): with pytest.raises(InvalidArgument): floats(float(u"nan"), 0).example() with pytest.raises(InvalidArgument): floats(1, -1).example() def test_float_range_and_allow_nan_cannot_both_be_enabled(): with pytest.raises(InvalidArgument): floats(min_value=1, allow_nan=True).example() with pytest.raises(InvalidArgument): floats(max_value=1, allow_nan=True).example() def test_float_finite_range_and_allow_infinity_cannot_both_be_enabled(): with pytest.raises(InvalidArgument): floats(0, 1, allow_infinity=True).example() def test_does_not_error_if_min_size_is_bigger_than_default_size(): lists(integers(), min_size=50).example() sets(integers(), min_size=50).example() frozensets(integers(), min_size=50).example() lists(integers(), min_size=50, unique=True).example() def test_list_unique_and_unique_by_cannot_both_be_enabled(): @given(lists(integers(), unique=True, unique_by=lambda x: x)) def boom(t): pass with pytest.raises(InvalidArgument) as e: boom() assert "unique " in e.value.args[0] assert "unique_by" in e.value.args[0] def test_min_before_max(): with pytest.raises(InvalidArgument): integers(min_value=1, max_value=0).validate() def test_filter_validates(): with pytest.raises(InvalidArgument): integers(min_value=1, max_value=0).filter(bool).validate() def test_recursion_validates_base_case(): with pytest.raises(InvalidArgument): recursive(integers(min_value=1, max_value=0), lists).validate() def test_recursion_validates_recursive_step(): with pytest.raises(InvalidArgument): recursive(integers(), lambda x: lists(x, min_size=3, max_size=1)).validate() @fails_with(InvalidArgument) @given(x=integers()) def test_stuff_keyword(x=1): pass @fails_with(InvalidArgument) @given(integers()) def test_stuff_positional(x=1): pass @fails_with(InvalidArgument) @given(integers(), integers()) def test_too_many_positional(x): pass def test_given_warns_on_use_of_non_strategies(): @given(bool) def test(x): pass with pytest.raises(InvalidArgument): test() def test_given_warns_when_mixing_positional_with_keyword(): @given(booleans(), y=booleans()) def test(x, y): pass with pytest.raises(InvalidArgument): test() @checks_deprecated_behaviour def test_cannot_find_non_strategies(): with pytest.raises(InvalidArgument): find(bool, bool) @pytest.mark.parametrize( "strategy", [ functools.partial(lists, elements=integers()), functools.partial(dictionaries, keys=integers(), values=integers()), text, binary, ], ) @pytest.mark.parametrize("min_size,max_size", [(0, "10"), ("0", 10)]) def test_valid_sizes(strategy, min_size, max_size): @given(strategy(min_size=min_size, max_size=max_size)) def test(x): pass with pytest.raises(InvalidArgument): test() def test_check_type_with_tuple_of_length_two(): # This test covers logic for length-two tuples that is essential on PY2, # e.g. string_types (str, unicode) which are all length-one on Python 3. def type_checker(x): check_type((int, str), x, "x") type_checker(1) type_checker("1") with pytest.raises(InvalidArgument, match="Expected one of int, str but got "): type_checker(1.0) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/cover/test_verbosity.py000066400000000000000000000057631354103617500311760ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from contextlib import contextmanager from hypothesis import find, given from hypothesis._settings import Verbosity, settings from hypothesis.reporting import default as default_reporter, with_reporter from hypothesis.strategies import booleans, integers, lists from tests.common.debug import minimal from tests.common.utils import capture_out, checks_deprecated_behaviour, fails @contextmanager def capture_verbosity(): with capture_out() as o: with with_reporter(default_reporter): yield o def test_prints_intermediate_in_success(): with capture_verbosity() as o: @settings(verbosity=Verbosity.verbose) @given(booleans()) def test_works(x): pass test_works() assert "Trying example" in o.getvalue() def test_does_not_log_in_quiet_mode(): with capture_verbosity() as o: @fails @settings(verbosity=Verbosity.quiet) @given(integers()) def test_foo(x): assert False test_foo() assert not o.getvalue() def test_includes_progress_in_verbose_mode(): with capture_verbosity() as o: minimal( lists(integers(), min_size=1), lambda x: sum(x) >= 100, settings(verbosity=Verbosity.verbose), ) out = o.getvalue() assert out assert u"Trying example: " in out assert u"Falsifying example: " in out @checks_deprecated_behaviour def test_prints_initial_attempts_on_find(): with capture_verbosity() as o: def foo(): seen = [] def not_first(x): if not seen: seen.append(x) return False return x not in seen find(integers(), not_first, settings=settings(verbosity=Verbosity.verbose)) foo() assert u"Tried non-satisfying example" in o.getvalue() def test_includes_intermediate_results_in_verbose_mode(): with capture_verbosity() as o: @fails @settings(verbosity=Verbosity.verbose, database=None, derandomize=True) @given(lists(integers(), min_size=1)) def test_foo(x): assert sum(x) < 1000000 test_foo() lines = o.getvalue().splitlines() assert len([l for l in lines if u"example" in l]) > 2 assert [l for l in lines if u"AssertionError" in l] hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/datetime/000077500000000000000000000000001354103617500262025ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/datetime/__init__.py000066400000000000000000000012751354103617500303200ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/datetime/test_dateutil_timezones.py000066400000000000000000000052341354103617500335270ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import datetime as dt import pytest from dateutil import tz, zoneinfo from hypothesis import assume, given from hypothesis.errors import InvalidArgument from hypothesis.extra.dateutil import timezones from hypothesis.strategies import datetimes, sampled_from, times from tests.common.debug import minimal def test_utc_is_minimal(): assert tz.UTC is minimal(timezones()) def test_can_generate_non_naive_time(): assert minimal(times(timezones=timezones()), lambda d: d.tzinfo).tzinfo == tz.UTC def test_can_generate_non_naive_datetime(): assert ( minimal(datetimes(timezones=timezones()), lambda d: d.tzinfo).tzinfo == tz.UTC ) @given(datetimes(timezones=timezones())) def test_timezone_aware_datetimes_are_timezone_aware(dt): assert dt.tzinfo is not None @given(sampled_from(["min_value", "max_value"]), datetimes(timezones=timezones())) def test_datetime_bounds_must_be_naive(name, val): with pytest.raises(InvalidArgument): datetimes(**{name: val}).validate() def test_timezones_arg_to_datetimes_must_be_search_strategy(): all_timezones = zoneinfo.get_zonefile_instance().zones with pytest.raises(InvalidArgument): datetimes(timezones=all_timezones).validate() @given(times(timezones=timezones())) def test_timezone_aware_times_are_timezone_aware(dt): assert dt.tzinfo is not None def test_can_generate_non_utc(): times(timezones=timezones()).filter( lambda d: assume(d.tzinfo) and d.tzinfo.zone != u"UTC" ).validate() @given(sampled_from(["min_value", "max_value"]), times(timezones=timezones())) def test_time_bounds_must_be_naive(name, val): with pytest.raises(InvalidArgument): times(**{name: val}).validate() def test_should_have_correct_ordering(): def offset(timezone): return abs(timezone.utcoffset(dt.datetime(2000, 1, 1))) next_interesting_tz = minimal(timezones(), lambda tz: offset(tz) > dt.timedelta(0)) assert offset(next_interesting_tz) == dt.timedelta(seconds=3600) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/datetime/test_pytz_timezones.py000066400000000000000000000063741354103617500327300ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import datetime as dt import pytest import pytz from hypothesis import assume, given from hypothesis.errors import InvalidArgument from hypothesis.extra.pytz import timezones from hypothesis.strategies import datetimes, sampled_from, times from tests.common.debug import assert_can_trigger_event, minimal def test_utc_is_minimal(): assert pytz.UTC is minimal(timezones()) def test_can_generate_non_naive_time(): assert minimal(times(timezones=timezones()), lambda d: d.tzinfo).tzinfo == pytz.UTC def test_can_generate_non_naive_datetime(): assert ( minimal(datetimes(timezones=timezones()), lambda d: d.tzinfo).tzinfo == pytz.UTC ) @given(datetimes(timezones=timezones())) def test_timezone_aware_datetimes_are_timezone_aware(dt): assert dt.tzinfo is not None @given(sampled_from(["min_value", "max_value"]), datetimes(timezones=timezones())) def test_datetime_bounds_must_be_naive(name, val): with pytest.raises(InvalidArgument): datetimes(**{name: val}).validate() def test_underflow_in_simplify(): # we shouldn't trigger a pytz bug when we're simplifying minimal( datetimes( max_value=dt.datetime.min + dt.timedelta(days=3), timezones=timezones() ), lambda x: x.tzinfo != pytz.UTC, ) def test_overflow_in_simplify(): # we shouldn't trigger a pytz bug when we're simplifying minimal( datetimes( min_value=dt.datetime.max - dt.timedelta(days=3), timezones=timezones() ), lambda x: x.tzinfo != pytz.UTC, ) def test_timezones_arg_to_datetimes_must_be_search_strategy(): with pytest.raises(InvalidArgument): datetimes(timezones=pytz.all_timezones).validate() with pytest.raises(InvalidArgument): tz = [pytz.timezone(t) for t in pytz.all_timezones] datetimes(timezones=tz).validate() @given(times(timezones=timezones())) def test_timezone_aware_times_are_timezone_aware(dt): assert dt.tzinfo is not None def test_can_generate_non_utc(): times(timezones=timezones()).filter( lambda d: assume(d.tzinfo) and d.tzinfo.zone != u"UTC" ).validate() @given(sampled_from(["min_value", "max_value"]), times(timezones=timezones())) def test_time_bounds_must_be_naive(name, val): with pytest.raises(InvalidArgument): times(**{name: val}).validate() def test_can_trigger_error_in_draw_near_max_date(): assert_can_trigger_event( datetimes( min_value=dt.datetime.max - dt.timedelta(days=3), timezones=timezones() ), lambda event: "Failed to draw a datetime" in event, ) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/000077500000000000000000000000001354103617500256505ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/__init__.py000066400000000000000000000012751354103617500277660ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/manage.py000077500000000000000000000023021354103617500274520ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import sys from hypothesis import HealthCheck, settings from tests.common.setup import run if __name__ == u"__main__": run() settings.register_profile( "default", settings(suppress_health_check=[HealthCheck.too_slow]) ) settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "default")) os.environ.setdefault(u"DJANGO_SETTINGS_MODULE", u"tests.django.toys.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/toys/000077500000000000000000000000001354103617500266465ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/toys/__init__.py000066400000000000000000000012751354103617500307640ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/toys/settings.py000066400000000000000000000073021354103617500310620ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """Django settings for toys project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) from __future__ import absolute_import, division, print_function import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = u"o0zlv@74u4e3s+o0^h$+tlalh&$r(7hbx01g4^h5-3gizj%hub" # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( u"django.contrib.admin", u"django.contrib.auth", u"django.contrib.contenttypes", u"django.contrib.sessions", u"django.contrib.messages", u"django.contrib.staticfiles", u"tests.django.toystore", ) MIDDLEWARE_CLASSES = ( u"django.contrib.sessions.middleware.SessionMiddleware", u"django.middleware.common.CommonMiddleware", u"django.middleware.csrf.CsrfViewMiddleware", u"django.contrib.auth.middleware.AuthenticationMiddleware", u"django.contrib.auth.middleware.SessionAuthenticationMiddleware", u"django.contrib.messages.middleware.MessageMiddleware", u"django.middleware.clickjacking.XFrameOptionsMiddleware", ) ROOT_URLCONF = u"tests.django.toys.urls" WSGI_APPLICATION = u"tests.django.toys.wsgi.application" # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { u"default": { u"ENGINE": u"django.db.backends.sqlite3", u"NAME": os.path.join(BASE_DIR, u"db.sqlite3"), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = u"en-us" TIME_ZONE = u"UTC" USE_I18N = True USE_L10N = True USE_TZ = os.environ.get("HYPOTHESIS_DJANGO_USETZ", "TRUE") == "TRUE" # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = u"/static/" # Added these bits to avoid warnings on Django 2.2 TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "OPTIONS": { "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", ] }, } ] MIDDLEWARE = [ "django.middleware.security.SecurityMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", ] hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/toys/urls.py000066400000000000000000000017731354103617500302150ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from django.conf.urls import include, url from django.contrib import admin patterns, namespace, name = admin.site.urls urlpatterns = [ # Examples: # url(r'^$', 'toys.views.home', name='home'), # url(r'^blog/', include('blog.urls')), url(r"^admin/", include((patterns, name), namespace=namespace)) ] hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/toys/wsgi.py000066400000000000000000000021011354103617500301630ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """WSGI config for toys project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/ """ from __future__ import absolute_import, division, print_function import os from django.core.wsgi import get_wsgi_application os.environ.setdefault(u"DJANGO_SETTINGS_MODULE", u"toys.settings") application = get_wsgi_application() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/toystore/000077500000000000000000000000001354103617500275405ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/toystore/__init__.py000066400000000000000000000012751354103617500316560ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/toystore/admin.py000066400000000000000000000012751354103617500312070ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/toystore/forms.py000066400000000000000000000142471354103617500312500ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from django import forms from django.forms import widgets from tests.django.toystore.models import ( CouldBeCharming, Customer, ManyNumerics, ManyTimes, OddFields, ) class ReprModelForm(forms.ModelForm): def __repr__(self): """I recommend putting this in your form to show the failed cases.""" return "%r\n%r" % (self.data, self.errors) class ReprForm(forms.Form): def __repr__(self): return "%r\n%r" % (self.data, self.errors) class CouldBeCharmingForm(ReprModelForm): class Meta: model = CouldBeCharming fields = "__all__" class CustomerForm(ReprModelForm): class Meta: model = Customer fields = "__all__" class ManyNumericsForm(ReprModelForm): class Meta: model = ManyNumerics fields = "__all__" class ManyTimesForm(ReprModelForm): class Meta: model = ManyTimes fields = "__all__" class OddFieldsForm(ReprModelForm): class Meta: model = OddFields fields = "__all__" class DynamicForm(ReprForm): def __init__(self, field_count=5, **kwargs): super(DynamicForm, self).__init__(**kwargs) for i in range(field_count): field_name = "field-%d" % (i,) self.fields[field_name] = forms.CharField(required=False) class BasicFieldForm(ReprForm): _boolean_required = forms.BooleanField() _boolean = forms.BooleanField(required=False) # This took me too long to figure out... The BooleanField will actually # raise a ValidationError when it recieves a value of False. Why they # didn't call it a TrueOnlyField escapes me, but *if* you actually want # to accept both True and False in your BooleanField, make sure you set # `required=False`. This behavior has been hotly contested in the bug # tracker (e.g. https://code.djangoproject.com/ticket/23547), but it # seems that since the tests and documentation are already written # this behavior is Truth. # see the note in the documentation # https://docs.djangoproject.com/en/dev/ref/forms/fields/#booleanfield _char_required = forms.CharField(required=True) _char = forms.CharField(required=False) _decimal = forms.DecimalField(max_digits=8, decimal_places=3) _float = forms.FloatField() _integer = forms.IntegerField() _null_boolean = forms.NullBooleanField() class TemporalFieldForm(ReprForm): _date = forms.DateField() _date_time = forms.DateTimeField() _duration = forms.DurationField() _time = forms.TimeField() _split_date_time = forms.SplitDateTimeField() class EmailFieldForm(ReprForm): _email = forms.EmailField() class SlugFieldForm(ReprForm): _slug = forms.SlugField() class URLFieldForm(ReprForm): _url = forms.URLField() class RegexFieldForm(ReprForm): _regex = forms.RegexField(regex=u"[A-Z]{3}\\.[a-z]{4}") class UUIDFieldForm(ReprForm): _uuid = forms.UUIDField() class ChoiceFieldForm(ReprForm): _choice = forms.ChoiceField( choices=(("cola", "Cola"), ("tea", "Tea"), ("water", "Water")) ) _multiple = forms.MultipleChoiceField( choices=(("cola", "Cola"), ("tea", "Tea"), ("water", "Water")) ) _typed = forms.TypedChoiceField( choices=(("1", "one"), ("2", "two"), ("3", "three"), ("4", "four")), coerce=int, empty_value=0, ) _typed_multiple = forms.TypedMultipleChoiceField( choices=(("1", "one"), ("2", "two"), ("3", "three"), ("4", "four")), coerce=int, empty_value=0, ) class InternetProtocolForm(ReprForm): _ip_both = forms.GenericIPAddressField() _ip_v4 = forms.GenericIPAddressField(protocol="IPv4") _ip_v6 = forms.GenericIPAddressField(protocol="IPv6") class BroadBooleanInput(widgets.CheckboxInput): """Basically pulled directly from the Django CheckboxInput. I added some stuff to ``values`` """ def value_from_datadict(self, data, files, name): if name not in data: return False value = data.get(name) # Translate true and false strings to boolean values. values = {u"true": True, u"false": False, u"0": False, u"1": True} if isinstance(value, str): value = values.get(value.lower(), value) return bool(value) class MultiCheckboxWidget(widgets.MultiWidget): def __init__(self, subfield_count=12, **kwargs): _widgets = [BroadBooleanInput()] * subfield_count super(MultiCheckboxWidget, self).__init__(_widgets, **kwargs) def decompress(self, value): values = [] for _value in value.split(u"::"): if _value in (u"0", u"", u"False", 0, None, False): values.append(False) else: values.append(True) return values class BroadBooleanField(forms.BooleanField): pass class MultiBooleanField(forms.MultiValueField): def __init__(self, subfield_count=12, **kwargs): subfields = [BroadBooleanField()] * subfield_count widget = MultiCheckboxWidget(subfield_count=subfield_count) super(MultiBooleanField, self).__init__(fields=subfields, widget=widget) def compress(self, values): return u"::".join([str(x) for x in values]) class ManyMultiValueForm(ReprForm): def __init__(self, subfield_count=12, **kwargs): super(ManyMultiValueForm, self).__init__(**kwargs) self.fields["mv_field"] = MultiBooleanField(subfield_count=subfield_count) class ShortStringForm(ReprForm): _not_too_long = forms.CharField(max_length=20, required=False) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/toystore/models.py000066400000000000000000000106051354103617500313770ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from django.core.exceptions import ValidationError from django.db import models class Company(models.Model): name = models.CharField(max_length=100, unique=True) class Store(models.Model): name = models.CharField(max_length=100, unique=True) company = models.ForeignKey(Company, null=False, on_delete=models.CASCADE) class CharmField(models.Field): def db_type(self, connection): return u"char(1)" class CustomishField(models.Field): def db_type(self, connection): return u"char(1)" class Customish(models.Model): customish = CustomishField() class Customer(models.Model): name = models.CharField(max_length=100, unique=True) email = models.EmailField(max_length=100, unique=True) gender = models.CharField(max_length=50, null=True) age = models.IntegerField() birthday = models.DateTimeField() class Charming(models.Model): charm = CharmField() class CouldBeCharming(models.Model): charm = CharmField(null=True) class SelfLoop(models.Model): me = models.ForeignKey(u"self", null=True, on_delete=models.SET_NULL) class LoopA(models.Model): b = models.ForeignKey(u"LoopB", null=False, on_delete=models.CASCADE) class LoopB(models.Model): a = models.ForeignKey(u"LoopA", null=True, on_delete=models.SET_NULL) class ManyNumerics(models.Model): i1 = models.IntegerField() i2 = models.SmallIntegerField() i3 = models.BigIntegerField() p1 = models.PositiveIntegerField() p2 = models.PositiveSmallIntegerField() d = models.DecimalField(decimal_places=2, max_digits=5) class ManyTimes(models.Model): time = models.TimeField() date = models.DateField() duration = models.DurationField() class OddFields(models.Model): uuid = models.UUIDField() slug = models.SlugField() url = models.URLField() ipv4 = models.GenericIPAddressField(protocol="IPv4") ipv6 = models.GenericIPAddressField(protocol="IPv6") class CustomishDefault(models.Model): customish = CustomishField(default=u"b") class MandatoryComputed(models.Model): name = models.CharField(max_length=100, unique=True) company = models.ForeignKey(Company, null=False, on_delete=models.CASCADE) def __init__(self, **kw): if u"company" in kw: raise RuntimeError() cname = kw[u"name"] + u"_company" kw[u"company"] = Company.objects.create(name=cname) super(MandatoryComputed, self).__init__(**kw) def validate_even(value): if value % 2 != 0: raise ValidationError("") class RestrictedFields(models.Model): text_field_4 = models.TextField(max_length=4, blank=True) char_field_4 = models.CharField(max_length=4, blank=True) choice_field_text = models.TextField(choices=(("foo", "Foo"), ("bar", "Bar"))) choice_field_int = models.IntegerField(choices=((1, "First"), (2, "Second"))) null_choice_field_int = models.IntegerField( choices=((1, "First"), (2, "Second")), null=True, blank=True ) choice_field_grouped = models.TextField( choices=( ("Audio", (("vinyl", "Vinyl"), ("cd", "CD"))), ("Video", (("vhs", "VHS Tape"), ("dvd", "DVD"))), ("unknown", "Unknown"), ) ) even_number_field = models.IntegerField(validators=[validate_even]) non_blank_text_field = models.TextField(blank=False) class SelfModifyingField(models.IntegerField): def pre_save(self, model_instance, add): value = getattr(model_instance, self.attname) value += 1 setattr(model_instance, self.attname, value) return value class CompanyExtension(models.Model): company = models.OneToOneField(Company, primary_key=True, on_delete=models.CASCADE) self_modifying = SelfModifyingField() test_basic_configuration.py000066400000000000000000000056301354103617500351060ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/toystore# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from unittest import TestCase as VanillaTestCase import pytest from django.db import IntegrityError from django.test import TestCase as DjangoTestCase from hypothesis import HealthCheck, Verbosity, given, settings from hypothesis.errors import InvalidArgument from hypothesis.extra.django import TestCase, TransactionTestCase from hypothesis.internal.compat import PYPY from hypothesis.strategies import integers from tests.django.toystore.models import Company class SomeStuff(object): @settings(suppress_health_check=[HealthCheck.too_slow]) @given(integers()) def test_is_blank_slate(self, unused): Company.objects.create(name=u"MickeyCo") def test_normal_test_1(self): Company.objects.create(name=u"MickeyCo") def test_normal_test_2(self): Company.objects.create(name=u"MickeyCo") class TestConstraintsWithTransactions(SomeStuff, TestCase): pass if not PYPY: # xfail # This is excessively slow in general, but particularly on pypy. We just # disable it altogether there as it's a niche case. class TestConstraintsWithoutTransactions(SomeStuff, TransactionTestCase): pass class TestWorkflow(VanillaTestCase): def test_does_not_break_later_tests(self): def break_the_db(i): Company.objects.create(name=u"MickeyCo") Company.objects.create(name=u"MickeyCo") class LocalTest(TestCase): @given(integers().map(break_the_db)) @settings( suppress_health_check=HealthCheck.all(), verbosity=Verbosity.quiet ) def test_does_not_break_other_things(self, unused): pass def test_normal_test_1(self): Company.objects.create(name=u"MickeyCo") t = LocalTest(u"test_normal_test_1") try: t.test_does_not_break_other_things() except IntegrityError: pass t.test_normal_test_1() def test_given_needs_hypothesis_test_case(self): class LocalTest(DjangoTestCase): @given(integers()) def tst(self, i): assert False, "InvalidArgument should be raised in @given" with pytest.raises(InvalidArgument): LocalTest("tst").tst() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/toystore/test_given_forms.py000066400000000000000000000100111354103617500334600ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import given from hypothesis.extra.django import TestCase, from_form, register_field_strategy from hypothesis.strategies import booleans, sampled_from from tests.django.toystore.forms import ( BasicFieldForm, BroadBooleanField, ChoiceFieldForm, CustomerForm, DynamicForm, EmailFieldForm, InternetProtocolForm, ManyMultiValueForm, ManyNumericsForm, ManyTimesForm, OddFieldsForm, RegexFieldForm, ShortStringForm, SlugFieldForm, TemporalFieldForm, URLFieldForm, UUIDFieldForm, ) register_field_strategy( BroadBooleanField, booleans() | sampled_from([u"1", u"0", u"True", u"False"]) ) class TestGetsBasicForms(TestCase): @given(from_form(CustomerForm)) def test_valid_customer(self, customer_form): self.assertTrue(customer_form.is_valid()) @given(from_form(ManyNumericsForm)) def test_valid_numerics(self, numerics_form): self.assertTrue(numerics_form.is_valid()) @given(from_form(ManyTimesForm)) def test_valid_times(self, times_form): self.assertTrue(times_form.is_valid()) @given(from_form(OddFieldsForm)) def test_valid_odd_fields(self, odd_form): self.assertTrue(odd_form.is_valid()) def test_dynamic_form(self): for field_count in range(2, 7): @given(from_form(DynamicForm, form_kwargs={"field_count": field_count})) def _test(dynamic_form): self.assertTrue(dynamic_form.is_valid()) _test() @given(from_form(BasicFieldForm)) def test_basic_fields_form(self, basic_field_form): self.assertTrue(basic_field_form.is_valid()) @given(from_form(TemporalFieldForm)) def test_temporal_fields_form(self, time_field_form): self.assertTrue(time_field_form.is_valid()) @given(from_form(EmailFieldForm)) def test_email_field_form(self, email_field_form): self.assertTrue(email_field_form.is_valid()) @given(from_form(SlugFieldForm)) def test_slug_field_form(self, slug_field_form): self.assertTrue(slug_field_form.is_valid()) @given(from_form(URLFieldForm)) def test_url_field_form(self, url_field_form): self.assertTrue(url_field_form.is_valid()) @given(from_form(RegexFieldForm)) def test_regex_field_form(self, regex_field_form): self.assertTrue(regex_field_form.is_valid()) @given(from_form(UUIDFieldForm)) def test_uuid_field_form(self, uuid_field_form): self.assertTrue(uuid_field_form.is_valid()) @given(from_form(ChoiceFieldForm)) def test_choice_fields_form(self, choice_field_form): self.assertTrue(choice_field_form.is_valid()) @given(from_form(InternetProtocolForm)) def test_ip_fields_form(self, ip_field_form): self.assertTrue(ip_field_form.is_valid()) @given(from_form(ManyMultiValueForm, form_kwargs={"subfield_count": 2})) def test_many_values_in_multi_value_field(self, many_multi_value_form): self.assertTrue(many_multi_value_form.is_valid()) @given(from_form(ManyMultiValueForm, form_kwargs={"subfield_count": 105})) def test_excessive_values_in_multi_value_field(self, excessive_form): self.assertTrue(excessive_form.is_valid()) @given(from_form(ShortStringForm)) def test_short_string_form(self, short_string_form): self.assertTrue(short_string_form.is_valid()) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/toystore/test_given_models.py000066400000000000000000000203611354103617500336260ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import datetime as dt from uuid import UUID from django.conf import settings as django_settings from django.contrib.auth.models import User from hypothesis import HealthCheck, assume, given, infer, settings from hypothesis.control import reject from hypothesis.errors import HypothesisException, InvalidArgument from hypothesis.extra.django import ( TestCase, TransactionTestCase, from_model, register_field_strategy, ) from hypothesis.extra.django.models import ( add_default_field_mapping, default_value, models, ) from hypothesis.internal.compat import text_type from hypothesis.internal.conjecture.data import ConjectureData from hypothesis.strategies import binary, just, lists from tests.common.utils import checks_deprecated_behaviour from tests.django.toystore.models import ( Company, CompanyExtension, CouldBeCharming, Customer, Customish, CustomishDefault, CustomishField, MandatoryComputed, ManyNumerics, ManyTimes, OddFields, RestrictedFields, SelfLoop, Store, ) register_field_strategy(CustomishField, just(u"a")) class TestGetsBasicModels(TestCase): @checks_deprecated_behaviour def test_add_default_field_mapping_is_deprecated(self): class UnregisteredCustomishField(CustomishField): """Just to get deprecation warning when registered.""" add_default_field_mapping(UnregisteredCustomishField, just(u"a")) with self.assertRaises(InvalidArgument): # Double-registering is an error, and registry is shared. register_field_strategy(UnregisteredCustomishField, just(u"a")) @given(from_model(Company)) def test_is_company(self, company): self.assertIsInstance(company, Company) self.assertIsNotNone(company.pk) @given(from_model(Store, company=from_model(Company))) def test_can_get_a_store(self, store): assert store.company.pk @given(lists(from_model(Company))) def test_can_get_multiple_models_with_unique_field(self, companies): assume(len(companies) > 1) for c in companies: self.assertIsNotNone(c.pk) self.assertEqual( len({c.pk for c in companies}), len({c.name for c in companies}) ) @settings(suppress_health_check=[HealthCheck.too_slow]) @given(from_model(Customer)) def test_is_customer(self, customer): self.assertIsInstance(customer, Customer) self.assertIsNotNone(customer.pk) self.assertIsNotNone(customer.email) @settings(suppress_health_check=[HealthCheck.too_slow]) @given(from_model(Customer)) def test_tz_presence(self, customer): if django_settings.USE_TZ: self.assertIsNotNone(customer.birthday.tzinfo) else: self.assertIsNone(customer.birthday.tzinfo) @given(from_model(CouldBeCharming)) def test_is_not_charming(self, not_charming): self.assertIsInstance(not_charming, CouldBeCharming) self.assertIsNotNone(not_charming.pk) self.assertIsNone(not_charming.charm) @given(from_model(SelfLoop)) def test_sl(self, sl): self.assertIsNone(sl.me) @given(lists(from_model(ManyNumerics))) def test_no_overflow_in_integer(self, manyints): pass @given(from_model(Customish)) def test_custom_field(self, x): assert x.customish == u"a" def test_mandatory_fields_are_mandatory(self): self.assertRaises(InvalidArgument, from_model(Store).example) @checks_deprecated_behaviour def test_mandatory_fields_are_mandatory_old(self): self.assertRaises(InvalidArgument, models(Store).example) def test_mandatory_computed_fields_are_mandatory(self): with self.assertRaises(InvalidArgument): from_model(MandatoryComputed).example() @checks_deprecated_behaviour def test_mandatory_computed_fields_are_mandatory_old(self): with self.assertRaises(InvalidArgument): models(MandatoryComputed).example() def test_mandatory_computed_fields_may_not_be_provided(self): mc = from_model(MandatoryComputed, company=from_model(Company)) self.assertRaises(RuntimeError, mc.example) @checks_deprecated_behaviour def test_mandatory_computed_fields_may_not_be_provided_old(self): mc = models(MandatoryComputed, company=models(Company)) self.assertRaises(RuntimeError, mc.example) @checks_deprecated_behaviour @given(models(MandatoryComputed, company=default_value)) def test_mandatory_computed_field_default(self, x): assert x.company.name == x.name + u"_company" @given(from_model(CustomishDefault, customish=infer)) def test_customish_default_overridden_by_infer(self, x): assert x.customish == u"a" @given(from_model(CustomishDefault, customish=infer)) def test_customish_infer_uses_registered_instead_of_default(self, x): assert x.customish == u"a" @checks_deprecated_behaviour @given(models(CustomishDefault, customish=default_value)) def test_customish_default_generated(self, x): assert x.customish == u"b" @given(from_model(OddFields)) def test_odd_fields(self, x): assert isinstance(x.uuid, UUID) assert isinstance(x.slug, text_type) assert u" " not in x.slug assert isinstance(x.ipv4, text_type) assert len(x.ipv4.split(".")) == 4 assert all(int(i) in range(256) for i in x.ipv4.split(".")) assert isinstance(x.ipv6, text_type) assert set(x.ipv6).issubset(set(u"0123456789abcdefABCDEF:.")) @given(from_model(ManyTimes)) def test_time_fields(self, x): assert isinstance(x.time, dt.time) assert isinstance(x.date, dt.date) assert isinstance(x.duration, dt.timedelta) @given(from_model(Company)) def test_no_null_in_charfield(self, x): # regression test for #1045. Company just has a convenient CharField. assert u"\x00" not in x.name @given(binary(min_size=10)) def test_foreign_key_primary(self, buf): # Regression test for #1307 company_strategy = from_model(Company, name=just("test")) strategy = from_model( CompanyExtension, company=company_strategy, self_modifying=just(2) ) try: ConjectureData.for_buffer(buf).draw(strategy) except HypothesisException: reject() # Draw again with the same buffer. This will cause a duplicate # primary key. ConjectureData.for_buffer(buf).draw(strategy) assert CompanyExtension.objects.all().count() == 1 class TestsNeedingRollback(TransactionTestCase): def test_can_get_examples(self): for _ in range(200): from_model(Company).example() class TestRestrictedFields(TestCase): @given(from_model(RestrictedFields)) def test_constructs_valid_instance(self, instance): self.assertTrue(isinstance(instance, RestrictedFields)) instance.full_clean() self.assertLessEqual(len(instance.text_field_4), 4) self.assertLessEqual(len(instance.char_field_4), 4) self.assertIn(instance.choice_field_text, ("foo", "bar")) self.assertIn(instance.choice_field_int, (1, 2)) self.assertIn(instance.null_choice_field_int, (1, 2, None)) self.assertEqual( instance.choice_field_grouped, instance.choice_field_grouped.lower() ) self.assertEqual(instance.even_number_field % 2, 0) self.assertTrue(instance.non_blank_text_field) class TestValidatorInference(TestCase): @given(from_model(User)) def test_user_issue_1112_regression(self, user): assert user.username hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/django/toystore/views.py000066400000000000000000000012751354103617500312540ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/dpcontracts/000077500000000000000000000000001354103617500267325ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/dpcontracts/__init__.py000066400000000000000000000012751354103617500310500ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/dpcontracts/test_contracts.py000066400000000000000000000026141354103617500323460ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from dpcontracts import require from hypothesis import given from hypothesis.errors import InvalidArgument from hypothesis.extra.dpcontracts import fulfill from hypothesis.strategies import builds, integers def identity(x): return x @require("division is undefined for zero", lambda args: args.n != 0) def invert(n): return 1 / n @given(builds(fulfill(invert), integers())) def test_contract_filter_builds(x): assert -1 <= x <= 1 @given(integers()) def test_contract_filter_inline(n): assert -1 <= fulfill(invert)(n) <= 1 @pytest.mark.parametrize("f", [int, identity, lambda x: None]) def test_no_vacuous_fulfill(f): with pytest.raises(InvalidArgument): fulfill(f) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/lark/000077500000000000000000000000001354103617500253375ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/lark/__init__.py000066400000000000000000000012751354103617500274550ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/lark/test_grammar.py000066400000000000000000000104761354103617500304060ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import json import pytest from lark.lark import Lark from hypothesis import given from hypothesis.errors import InvalidArgument from hypothesis.extra.lark import from_lark from hypothesis.internal.compat import integer_types, text_type from hypothesis.strategies import data, just from tests.common.debug import find_any # Adapted from the official Lark tutorial, with modifications to ensure # that the generated JSON is valid. i.e. no numbers starting with ".", # \f is not ignorable whitespace, and restricted strings only. Source: # https://github.com/lark-parser/lark/blob/master/docs/json_tutorial.md EBNF_GRAMMAR = r""" value: dict | list | STRING | NUMBER | "true" -> true | "false" -> false | "null" -> null list : "[" [value ("," value)*] "]" dict : "{" [STRING ":" value ("," STRING ":" value)*] "}" STRING : /"[a-z]*"/ NUMBER : /-?[1-9][0-9]*(\.[0-9]+)?([eE][+-]?[0-9]+)?/ WS : /[ \t\r\n]+/ %ignore WS """ LIST_GRAMMAR = r""" list : "[" [NUMBER ("," NUMBER)*] "]" NUMBER: /[0-9]+/ """ @given(from_lark(Lark(EBNF_GRAMMAR, start="value"))) def test_generates_valid_json(string): json.loads(string) @pytest.mark.parametrize( "start, type_", [ ("dict", dict), ("list", list), ("STRING", text_type), ("NUMBER", integer_types + (float,)), ("TRUE", bool), ("FALSE", bool), ("NULL", type(None)), ], ) @given(data=data()) def test_can_specify_start_rule(data, start, type_): string = data.draw(from_lark(Lark(EBNF_GRAMMAR, start="value"), start=start)) value = json.loads(string) assert isinstance(value, type_) def test_can_generate_ignored_tokens(): list_grammar = r""" list : "[" [STRING ("," STRING)*] "]" STRING : /"[a-z]*"/ WS : /[ \t\r\n]+/ %ignore WS """ strategy = from_lark(Lark(list_grammar, start="list")) # A JSON list of strings in canoncial form which does not round-trip, # must contain ignorable whitespace in the initial string. find_any(strategy, lambda s: "\t" in s) def test_generation_without_whitespace(): find_any(from_lark(Lark(LIST_GRAMMAR, start="list")), lambda g: " " not in g) def test_cannot_convert_EBNF_to_strategy_directly(): with pytest.raises(InvalidArgument): # Not a Lark object from_lark(EBNF_GRAMMAR).example() with pytest.raises(TypeError): # Not even the right number of arguments from_lark(EBNF_GRAMMAR, start="value").example() with pytest.raises(InvalidArgument): # Wrong type for explicit_strategies from_lark(Lark(LIST_GRAMMAR, start="list"), explicit=[]).example() def test_undefined_terminals_require_explicit_strategies(): elem_grammar = r""" list : "[" [ELEMENT ("," ELEMENT)*] "]" %declare ELEMENT """ with pytest.raises(InvalidArgument): from_lark(Lark(elem_grammar, start="list")).example() strategy = {"ELEMENT": just("200")} from_lark(Lark(elem_grammar, start="list"), explicit=strategy).example() def test_cannot_use_explicit_strategies_for_unknown_terminals(): with pytest.raises(InvalidArgument): from_lark( Lark(LIST_GRAMMAR, start="list"), explicit={"unused_name": just("")} ).example() def test_non_string_explicit_strategies_are_invalid(): with pytest.raises(InvalidArgument): from_lark( Lark(LIST_GRAMMAR, start="list"), explicit={"NUMBER": just(0)} ).example() @given( string=from_lark(Lark(LIST_GRAMMAR, start="list"), explicit={"NUMBER": just("0")}) ) def test_can_override_defined_terminal(string): assert sum(json.loads(string)) == 0 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/000077500000000000000000000000001354103617500260615ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/__init__.py000066400000000000000000000012751354103617500301770ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_argument_validation.py000066400000000000000000000025531354103617500335330ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from tests.common.arguments import argument_validation_test, e BAD_ARGS = [] def adjust(ex, **kwargs): f, a, b = ex b = dict(b) b.update(kwargs) BAD_ARGS.append((f, a, b)) for ex in [ e(st.lists, st.integers()), e(st.sets, st.integers()), e(st.frozensets, st.integers()), e(st.dictionaries, st.integers(), st.integers()), e(st.text), e(st.binary), ]: adjust(ex, min_size=-1) adjust(ex, max_size=-1) adjust(ex, min_size="no") adjust(ex, max_size="no") BAD_ARGS.extend([e(st.lists, st.nothing(), unique=True, min_size=1)]) test_raise_invalid_argument = argument_validation_test(BAD_ARGS) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_bad_repr.py000066400000000000000000000041151354103617500312510ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import given from hypothesis.internal.compat import PY3 from hypothesis.internal.reflection import arg_string class BadRepr(object): def __init__(self, value): self.value = value def __repr__(self): return self.value Frosty = BadRepr("☃") def test_just_frosty(): assert repr(st.just(Frosty)) == "just(☃)" def test_sampling_snowmen(): assert repr(st.sampled_from((Frosty, "hi"))) == "sampled_from((☃, %s))" % ( repr("hi"), ) def varargs(*args, **kwargs): pass @pytest.mark.skipif(PY3, reason="Unicode repr is kosher on python 3") def test_arg_strings_are_bad_repr_safe(): assert arg_string(varargs, (Frosty,), {}) == "☃" @pytest.mark.skipif(PY3, reason="Unicode repr is kosher on python 3") def test_arg_string_kwargs_are_bad_repr_safe(): assert arg_string(varargs, (), {"x": Frosty}) == "x=☃" @given( st.sampled_from( [ "✐", "✑", "✒", "✓", "✔", "✕", "✖", "✗", "✘", "✙", "✚", "✛", "✜", "✝", "✞", "✟", "✠", "✡", "✢", "✣", ] ) ) def test_sampled_from_bad_repr(c): pass hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_boundary_exploration.py000066400000000000000000000030601354103617500337400ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import HealthCheck, Verbosity, given, reject, settings from hypothesis.errors import Unsatisfiable from tests.common.debug import minimal from tests.common.utils import no_shrink @pytest.mark.parametrize("strat", [st.text(min_size=5)]) @settings(phases=no_shrink, deadline=None, suppress_health_check=HealthCheck.all()) @given(st.data()) def test_explore_arbitrary_function(strat, data): cache = {} def predicate(x): try: return cache[x] except KeyError: return cache.setdefault(x, data.draw(st.booleans(), label=repr(x))) try: minimal( strat, predicate, settings=settings( max_examples=10, database=None, verbosity=Verbosity.quiet ), ) except Unsatisfiable: reject() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_cache_implementation.py000066400000000000000000000061511354103617500336450ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from collections import Counter import hypothesis.strategies as st from hypothesis.internal.cache import GenericCache from hypothesis.stateful import ( Bundle, RuleBasedStateMachine, initialize, invariant, rule, ) class CacheWithScores(GenericCache): def __init__(self, max_size): super(CacheWithScores, self).__init__(max_size) self.scores = {} def new_entry(self, key, value): return self.scores[key] class CacheRules(RuleBasedStateMachine): keys = Bundle("keys") @initialize(max_size=st.integers(1, 8)) def create_cache(self, max_size): self.cache = CacheWithScores(max_size) self.__values = {} self.__total_pins = 0 self.__pins = Counter() self.__live = set() self.__next_value = 0 self.__last_key = None def on_evict(evicted_key, value, score): assert self.__pins[evicted_key] == 0 assert score == self.cache.scores[evicted_key] assert value == self.__values[evicted_key] for k in self.cache: assert ( self.__pins[k] > 0 or self.cache.scores[k] >= score or k == self.__last_key ) self.cache.on_evict = on_evict @rule(key=st.integers(), score=st.integers(0, 100), target=keys) def new_key(self, key, score): if key not in self.cache.scores: self.cache.scores[key] = score return key @rule(key=keys) def set_key(self, key): if self.__total_pins < self.cache.max_size or key in self.cache: self.__last_key = key self.cache[key] = self.__next_value self.__values[key] = self.__next_value self.__next_value += 1 @invariant() def check_values(self): for k in getattr(self, "cache", ()): assert self.__values[k] == self.cache[k] @rule(key=keys) def pin_key(self, key): if key in self.cache: self.cache.pin(key) if self.__pins[key] == 0: self.__total_pins += 1 self.__pins[key] += 1 @rule(key=keys) def unpin_key(self, key): if self.__pins[key] > 0: self.cache.unpin(key) self.__pins[key] -= 1 if self.__pins[key] == 0: self.__total_pins -= 1 assert self.__total_pins >= 0 TestCache = CacheRules.TestCase hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_cacheable.py000066400000000000000000000036041354103617500313640ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import gc import weakref import pytest import hypothesis.strategies as st from hypothesis import given, settings @pytest.mark.parametrize( "s", [ st.floats(), st.tuples(st.integers()), st.tuples(), st.one_of(st.integers(), st.text()), ], ) def test_is_cacheable(s): assert s.is_cacheable @pytest.mark.parametrize( "s", [ st.just([]), st.tuples(st.integers(), st.just([])), st.one_of(st.integers(), st.text(), st.just([])), ], ) def test_is_not_cacheable(s): assert not s.is_cacheable def test_non_cacheable_things_are_not_cached(): x = st.just([]) assert st.tuples(x) != st.tuples(x) def test_cacheable_things_are_cached(): x = st.just(()) assert st.tuples(x) == st.tuples(x) def test_local_types_are_garbage_collected_issue_493(): store = [None] def run_locally(): class Test(object): @settings(database=None) @given(st.integers()) def test(self, i): pass store[0] = weakref.ref(Test) Test().test() run_locally() del run_locally assert store[0]() is not None gc.collect() assert store[0]() is None hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_characters.py000066400000000000000000000025211354103617500316110ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import string from hypothesis import given, strategies as st IDENTIFIER_CHARS = string.ascii_letters + string.digits + "_" @given(st.characters(blacklist_characters=IDENTIFIER_CHARS)) def test_large_blacklist(c): assert c not in IDENTIFIER_CHARS @given(st.data()) def test_arbitrary_blacklist(data): blacklist = data.draw(st.text(st.characters(max_codepoint=1000), min_size=1)) ords = list(map(ord, blacklist)) c = data.draw( st.characters( blacklist_characters=blacklist, min_codepoint=max(0, min(ords) - 1), max_codepoint=max(0, max(ords) + 1), ) ) assert c not in blacklist hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_collective_minimization.py000066400000000000000000000031041354103617500344100ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import settings from hypothesis.errors import Unsatisfiable from hypothesis.strategies import lists from tests.common import standard_types from tests.common.debug import minimal from tests.common.utils import flaky @pytest.mark.parametrize(u"spec", standard_types, ids=list(map(repr, standard_types))) @flaky(min_passes=1, max_runs=2) def test_can_collectively_minimize(spec): """This should generally exercise strategies' strictly_simpler heuristic by putting us in a state where example cloning is required to get to the answer fast enough.""" n = 10 try: xs = minimal( lists(spec, min_size=n, max_size=n), lambda x: len(set(map(repr, x))) >= 2, settings(max_examples=2000), ) assert len(xs) == n assert 2 <= len(set(map(repr, xs))) <= 3 except Unsatisfiable: pass hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_compat.py000066400000000000000000000070151354103617500307600ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import inspect import warnings import pytest from hypothesis import given, strategies as st from hypothesis.internal.compat import ( FullArgSpec, ceil, floor, getfullargspec, hrange, int_from_bytes, int_to_bytes, integer_types, qualname, ) def test_small_hrange(): assert list(hrange(5)) == [0, 1, 2, 3, 4] assert list(hrange(3, 5)) == [3, 4] assert list(hrange(1, 10, 2)) == [1, 3, 5, 7, 9] def test_large_hrange(): n = 1 << 1024 assert list(hrange(n, n + 5, 2)) == [n, n + 2, n + 4] assert list(hrange(n, n)) == [] with pytest.raises(ValueError): hrange(n, n, 0) class Foo: def bar(self): pass def test_qualname(): assert qualname(Foo.bar) == u"Foo.bar" assert qualname(Foo().bar) == u"Foo.bar" assert qualname(qualname) == u"qualname" def a(b, c, d): pass def b(c, d, *ar): pass def c(c, d, *ar, **k): pass def d(a1, a2=1, a3=2, a4=None): pass @pytest.mark.parametrize("f", [a, b, c, d]) def test_agrees_on_argspec(f): with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) basic = inspect.getargspec(f) full = getfullargspec(f) assert basic.args == full.args assert basic.varargs == full.varargs assert basic.keywords == full.varkw assert basic.defaults == full.defaults @given(st.binary()) def test_convert_back(bs): bs = bytearray(bs) assert int_to_bytes(int_from_bytes(bs), len(bs)) == bs bytes8 = st.builds(bytearray, st.binary(min_size=8, max_size=8)) @given(bytes8, bytes8) def test_to_int_in_big_endian_order(x, y): x, y = sorted((x, y)) assert 0 <= int_from_bytes(x) <= int_from_bytes(y) ints8 = st.integers(min_value=0, max_value=2 ** 63 - 1) @given(ints8, ints8) def test_to_bytes_in_big_endian_order(x, y): x, y = sorted((x, y)) assert int_to_bytes(x, 8) <= int_to_bytes(y, 8) @pytest.mark.skipif( not hasattr(inspect, "getfullargspec"), reason="inspect.getfullargspec only exists under Python 3", ) def test_inspection_compat(): assert getfullargspec is inspect.getfullargspec @pytest.mark.skipif( not hasattr(inspect, "FullArgSpec"), reason="inspect.FullArgSpec only exists under Python 3", ) def test_inspection_result_compat(): assert FullArgSpec is inspect.FullArgSpec @given(st.fractions()) def test_ceil(x): """The compat ceil function always has the Python 3 semantics. Under Python 2, math.ceil returns a float, which cannot represent large integers - for example, `float(2**53) == float(2**53 + 1)` - and this is obviously incorrect for unlimited-precision integer operations. """ assert isinstance(ceil(x), integer_types) assert x <= ceil(x) < x + 1 @given(st.fractions()) def test_floor(x): assert isinstance(floor(x), integer_types) assert x - 1 < floor(x) <= x hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_completion.py000066400000000000000000000014561354103617500316510ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import given, strategies as st @given(st.data()) def test_never_draw_anything(data): pass hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_conjecture_engine.py000066400000000000000000000116031354103617500331610ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import given, settings, strategies as st from hypothesis.database import InMemoryExampleDatabase from hypothesis.internal.compat import hbytes, hrange, int_from_bytes from hypothesis.internal.conjecture.data import ConjectureData from hypothesis.internal.conjecture.engine import ConjectureRunner from hypothesis.internal.conjecture.shrinker import Shrinker, block_program from tests.common.utils import counts_calls, non_covering_examples from tests.cover.test_conjecture_engine import run_to_buffer, shrinking_from def test_lot_of_dead_nodes(): @run_to_buffer def x(data): for i in range(4): if data.draw_bytes(1)[0] != i: data.mark_invalid() data.mark_interesting() assert x == hbytes([0, 1, 2, 3]) def test_saves_data_while_shrinking(monkeypatch): key = b"hi there" n = 5 db = InMemoryExampleDatabase() assert list(db.fetch(key)) == [] seen = set() monkeypatch.setattr( ConjectureRunner, "generate_new_examples", lambda runner: runner.cached_test_function([255] * 10), ) def f(data): x = data.draw_bytes(10) if sum(x) >= 2000 and len(seen) < n: seen.add(hbytes(x)) if hbytes(x) in seen: data.mark_interesting() runner = ConjectureRunner(f, settings=settings(database=db), database_key=key) runner.run() assert runner.interesting_examples assert len(seen) == n in_db = non_covering_examples(db) assert in_db.issubset(seen) assert in_db == seen def test_can_discard(monkeypatch): n = 8 monkeypatch.setattr( ConjectureRunner, "generate_new_examples", lambda runner: runner.cached_test_function( [v for i in range(n) for v in [i, i]] ), ) @run_to_buffer def x(data): seen = set() while len(seen) < n: seen.add(hbytes(data.draw_bytes(1))) data.mark_interesting() assert len(x) == n def test_regression_1(): # This is a really hard to reproduce bug that previously triggered a very # specific exception inside one of the shrink passes. It's unclear how # useful this regression test really is, but nothing else caught the # problem. @run_to_buffer def x(data): data.write(hbytes(b"\x01\x02")) data.write(hbytes(b"\x01\x00")) v = data.draw_bits(41) if v >= 512 or v == 254: data.mark_interesting() assert list(x)[:-2] == [1, 2, 1, 0, 0, 0, 0, 0] assert int_from_bytes(x[-2:]) in (254, 512) @given(st.integers(0, 255), st.integers(0, 255)) def test_cached_with_masked_byte_agrees_with_results(byte_a, byte_b): def f(data): data.draw_bits(2) runner = ConjectureRunner(f) cached_a = runner.cached_test_function(hbytes([byte_a])) cached_b = runner.cached_test_function(hbytes([byte_b])) data_b = ConjectureData.for_buffer( hbytes([byte_b]), observer=runner.tree.new_observer() ) runner.test_function(data_b) # If the cache found an old result, then it should match the real result. # If it did not, then it must be because A and B were different. assert (cached_a is cached_b) == (cached_a.buffer == data_b.buffer) def test_block_programs_fail_efficiently(monkeypatch): # Create 256 byte-sized blocks. None of the blocks can be deleted, and # every deletion attempt produces a different buffer. @shrinking_from(hbytes(hrange(256))) def shrinker(data): values = set() for _ in hrange(256): v = data.draw_bits(8) values.add(v) if len(values) == 256: data.mark_interesting() monkeypatch.setattr( Shrinker, "run_block_program", counts_calls(Shrinker.run_block_program) ) shrinker.fixate_shrink_passes([block_program("XX")]) assert shrinker.shrinks == 0 assert 250 <= shrinker.calls <= 260 # The block program should have been run roughly 255 times, with a little # bit of wiggle room for implementation details. # - Too many calls mean that failing steps are doing too much work. # - Too few calls mean that this test is probably miscounting and buggy. assert 250 <= Shrinker.run_block_program.calls <= 260 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_conjecture_int_list.py000066400000000000000000000044601354103617500335440ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from hypothesis.internal.compat import hrange from hypothesis.internal.conjecture.junkdrawer import IntList from hypothesis.stateful import RuleBasedStateMachine, initialize, invariant, rule INTEGERS = st.integers(0, 2 ** 68) @st.composite def valid_index(draw): machine = draw(st.runner()) if not machine.model: return draw(st.nothing()) return draw(st.integers(0, len(machine.model) - 1)) @st.composite def valid_slice(draw): machine = draw(st.runner()) result = [ draw(st.integers(0, max(3, len(machine.model) * 2 - 1))) for _ in hrange(2) ] result.sort() return slice(*result) class IntListRules(RuleBasedStateMachine): @initialize(ls=st.lists(INTEGERS)) def starting_lists(self, ls): self.model = list(ls) self.target = IntList(ls) @invariant() def lists_are_equivalent(self): if hasattr(self, "model"): assert isinstance(self.model, list) assert isinstance(self.target, IntList) assert len(self.model) == len(self.target) assert list(self.target) == self.model @rule(n=INTEGERS) def append(self, n): self.model.append(n) self.target.append(n) @rule(i=valid_index() | valid_slice()) def delete(self, i): del self.model[i] del self.target[i] @rule(sl=valid_slice()) def slice(self, sl): self.model = self.model[sl] self.target = self.target[sl] @rule(i=valid_index()) def agree_on_values(self, i): assert self.model[i] == self.target[i] TestIntList = IntListRules.TestCase hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_conjecture_utils.py000066400000000000000000000030441354103617500330540ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from fractions import Fraction import hypothesis.internal.conjecture.utils as cu from hypothesis.internal.compat import int_to_bytes from hypothesis.internal.conjecture.data import ConjectureData, StopTest def test_gives_the_correct_probabilities(): weights = [Fraction(1), Fraction(9)] total = sum(weights) probabilities = [w / total for w in weights] sampler = cu.Sampler(probabilities) assert cu.Sampler(weights).table == sampler.table counts = [0] * len(weights) i = 0 while i < 2 ** 16: data = ConjectureData.for_buffer(int_to_bytes(i, 2)) try: c = sampler.sample(data) counts[c] += 1 assert probabilities[c] >= Fraction(counts[c], 2 ** 16) except StopTest: pass if 1 in data.forced_indices: i += 256 else: i += 1 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_conventions.py000066400000000000000000000015411354103617500320400ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.utils.conventions import UniqueIdentifier def test_unique_identifier_repr(): assert repr(UniqueIdentifier(u"hello_world")) == u"hello_world" hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_database_agreement.py000066400000000000000000000044421354103617500332710ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import shutil import tempfile import hypothesis.strategies as st from hypothesis.database import DirectoryBasedExampleDatabase, InMemoryExampleDatabase from hypothesis.stateful import Bundle, RuleBasedStateMachine, rule class DatabaseComparison(RuleBasedStateMachine): def __init__(self): super(DatabaseComparison, self).__init__() self.tempd = tempfile.mkdtemp() exampledir = os.path.join(self.tempd, "examples") self.dbs = [ DirectoryBasedExampleDatabase(exampledir), InMemoryExampleDatabase(), DirectoryBasedExampleDatabase(exampledir), ] keys = Bundle("keys") values = Bundle("values") @rule(target=keys, k=st.binary()) def k(self, k): return k @rule(target=values, v=st.binary()) def v(self, v): return v @rule(k=keys, v=values) def save(self, k, v): for db in self.dbs: db.save(k, v) @rule(k=keys, v=values) def delete(self, k, v): for db in self.dbs: db.delete(k, v) @rule(k1=keys, k2=keys, v=values) def move(self, k1, k2, v): for db in self.dbs: db.move(k1, k2, v) @rule(k=keys) def values_agree(self, k): last = None last_db = None for db in self.dbs: keys = set(db.fetch(k)) if last is not None: assert last == keys, (last_db, db) last = keys last_db = db def teardown(self): for d in self.dbs: d.close() shutil.rmtree(self.tempd) TestDBs = DatabaseComparison.TestCase hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_database_usage.py000066400000000000000000000075351354103617500324340ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from hypothesis import assume, core, find, given, settings from hypothesis.database import InMemoryExampleDatabase from hypothesis.errors import NoSuchExample, Unsatisfiable from hypothesis.internal.compat import hbytes from tests.common.utils import ( all_values, checks_deprecated_behaviour, non_covering_examples, ) def has_a_non_zero_byte(x): return any(hbytes(x)) @checks_deprecated_behaviour def test_saves_incremental_steps_in_database(): key = b"a database key" database = InMemoryExampleDatabase() find( st.binary(min_size=10), lambda x: has_a_non_zero_byte(x), settings=settings(database=database), database_key=key, ) assert len(all_values(database)) > 1 @checks_deprecated_behaviour def test_clears_out_database_as_things_get_boring(): key = b"a database key" database = InMemoryExampleDatabase() do_we_care = True def stuff(): try: find( st.binary(min_size=50), lambda x: do_we_care and has_a_non_zero_byte(x), settings=settings(database=database, max_examples=10), database_key=key, ) except NoSuchExample: pass stuff() assert len(non_covering_examples(database)) > 1 do_we_care = False stuff() initial = len(non_covering_examples(database)) assert initial > 0 for _ in range(initial): stuff() keys = len(non_covering_examples(database)) if not keys: break else: assert False @checks_deprecated_behaviour def test_trashes_invalid_examples(): key = b"a database key" database = InMemoryExampleDatabase() finicky = False def stuff(): try: find( st.binary(min_size=100), lambda x: assume(not finicky) and has_a_non_zero_byte(x), settings=settings(database=database), database_key=key, ) except Unsatisfiable: pass stuff() original = len(all_values(database)) assert original > 1 finicky = True stuff() assert len(all_values(database)) < original @checks_deprecated_behaviour def test_respects_max_examples_in_database_usage(): key = b"a database key" database = InMemoryExampleDatabase() do_we_care = True counter = [0] def check(x): counter[0] += 1 return do_we_care and has_a_non_zero_byte(x) def stuff(): try: find( st.binary(min_size=100), check, settings=settings(database=database, max_examples=10), database_key=key, ) except NoSuchExample: pass stuff() assert len(all_values(database)) > 10 do_we_care = False counter[0] = 0 stuff() assert counter == [10] def test_does_not_use_database_when_seed_is_forced(monkeypatch): monkeypatch.setattr(core, "global_force_seed", 42) database = InMemoryExampleDatabase() database.fetch = None @settings(database=database) @given(st.integers()) def test(i): pass test() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_deferred_errors.py000066400000000000000000000041171354103617500326510ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import find, given from hypothesis._strategies import defines_strategy from hypothesis.errors import InvalidArgument from tests.common.utils import checks_deprecated_behaviour def test_does_not_error_on_initial_calculation(): st.floats(max_value=float("nan")) st.sampled_from([]) st.lists(st.integers(), min_size=5, max_size=2) st.floats(min_value=2.0, max_value=1.0) def test_errors_each_time(): s = st.integers(max_value=1, min_value=3) with pytest.raises(InvalidArgument): s.example() with pytest.raises(InvalidArgument): s.example() def test_errors_on_test_invocation(): @given(st.integers(max_value=1, min_value=3)) def test(x): pass with pytest.raises(InvalidArgument): test() @checks_deprecated_behaviour def test_errors_on_find(): s = st.lists(st.integers(), min_size=5, max_size=2) with pytest.raises(InvalidArgument): find(s, lambda x: True) def test_errors_on_example(): s = st.floats(min_value=2.0, max_value=1.0) with pytest.raises(InvalidArgument): s.example() def test_does_not_recalculate_the_strategy(): calls = [0] @defines_strategy def foo(): calls[0] += 1 return st.just(1) f = foo() assert calls == [0] f.example() assert calls == [1] f.example() assert calls == [1] hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_duplication.py000066400000000000000000000041461354103617500320120ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from collections import Counter import pytest from hypothesis import given, settings from hypothesis.searchstrategy import SearchStrategy class Blocks(SearchStrategy): def __init__(self, n): self.n = n def do_draw(self, data): return data.draw_bytes(self.n) @pytest.mark.parametrize("n", range(1, 5)) def test_does_not_duplicate_blocks(n): counts = Counter() @given(Blocks(n)) @settings(database=None) def test(b): counts[b] += 1 test() assert set(counts.values()) == {1} @pytest.mark.parametrize("n", range(1, 5)) def test_mostly_does_not_duplicate_blocks_even_when_failing(n): counts = Counter() @settings(database=None) @given(Blocks(n)) def test(b): counts[b] += 1 if len(counts) > 3: raise ValueError() try: test() except ValueError: pass # There are two circumstances in which a duplicate is allowed: We replay # the failing test once to check for flakiness, and then we replay the # fully minimized failing test at the end to display the error. The # complication comes from the fact that these may or may not be the same # test case, so we can see either two test cases each run twice or one # test case which has been run three times. seen_counts = set(counts.values()) assert seen_counts in ({1, 2}, {1, 3}) assert len([k for k, v in counts.items() if v > 1]) <= 2 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_dynamic_variable.py000066400000000000000000000021601354103617500327620ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.utils.dynamicvariables import DynamicVariable def test_can_assign(): d = DynamicVariable(1) assert d.value == 1 with d.with_value(2): assert d.value == 2 assert d.value == 1 def test_can_nest(): d = DynamicVariable(1) with d.with_value(2): assert d.value == 2 with d.with_value(3): assert d.value == 3 assert d.value == 2 assert d.value == 1 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_emails.py000066400000000000000000000017021354103617500307440ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import given from hypothesis.strategies import emails @given(emails()) def test_is_valid_email(address): local, at_, domain = address.rpartition("@") assert len(address) <= 254 assert at_ == "@" assert local assert domain hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_eval_as_source.py000066400000000000000000000023611354103617500324660ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.internal.reflection import source_exec_as_module def test_can_eval_as_source(): assert source_exec_as_module("foo=1").foo == 1 def test_caches(): x = source_exec_as_module("foo=2") y = source_exec_as_module("foo=2") assert x is y RECURSIVE = """ from hypothesis.internal.reflection import source_exec_as_module def test_recurse(): assert not ( source_exec_as_module("too_much_recursion = False").too_much_recursion) """ def test_can_call_self_recursively(): source_exec_as_module(RECURSIVE).test_recurse() test_explore_arbitrary_languages.py000066400000000000000000000101031354103617500351710ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import random import attr import pytest import hypothesis.internal.escalation as esc import hypothesis.strategies as st from hypothesis import HealthCheck, Phase, Verbosity, assume, given, note, settings from hypothesis.internal.compat import hbytes from hypothesis.internal.conjecture.data import Status from hypothesis.internal.conjecture.engine import ConjectureRunner def setup_module(module): esc.PREVENT_ESCALATION = True def teardown_module(module): esc.PREVENT_ESCALATION = False @attr.s() class Write(object): value = attr.ib() child = attr.ib() @attr.s() class Branch(object): bits = attr.ib() children = attr.ib(default=attr.Factory(dict)) @attr.s() class Terminal(object): status = attr.ib() payload = attr.ib(default=None) nodes = st.deferred(lambda: terminals | writes | branches) # Does not include Status.OVERFLOW by design: That happens because of the size # of the string, not the input language. terminals = st.one_of( st.just(Terminal(Status.VALID)), st.just(Terminal(Status.INVALID)), st.builds(Terminal, status=st.just(Status.INTERESTING), payload=st.integers(0, 10)), ) branches = st.builds(Branch, bits=st.integers(1, 64)) writes = st.builds(Write, value=st.binary(min_size=1), child=nodes) def run_language_test_for(root, data, seed): random.seed(seed) def test(local_data): node = root while not isinstance(node, Terminal): if isinstance(node, Write): local_data.write(hbytes(node.value)) node = node.child else: assert isinstance(node, Branch) c = local_data.draw_bits(node.bits) try: node = node.children[c] except KeyError: if data is None: return node = node.children.setdefault(c, data.draw(nodes)) assert isinstance(node, Terminal) if node.status == Status.INTERESTING: local_data.mark_interesting(node.payload) elif node.status == Status.INVALID: local_data.mark_invalid() runner = ConjectureRunner( test, settings=settings( max_examples=1, database=None, suppress_health_check=HealthCheck.all(), verbosity=Verbosity.quiet, phases=list(Phase), ), ) try: runner.run() finally: if data is not None: note(root) assume(runner.interesting_examples) @settings( suppress_health_check=HealthCheck.all(), deadline=None, phases=set(Phase) - {Phase.shrink}, ) @given(st.data()) def test_explore_an_arbitrary_language(data): root = data.draw(writes | branches) seed = data.draw(st.integers(0, 2 ** 64 - 1)) run_language_test_for(root, data, seed) @pytest.mark.parametrize("seed, language", []) def test_run_specific_example(seed, language): """This test recreates individual languages generated with the main test. These are typically manually pruned down a bit - e.g. it's OK to remove VALID nodes because KeyError is treated as if it lead to one in this test (but not in the @given test). These tests are likely to be fairly fragile with respect to changes in the underlying engine. Feel free to delete examples if they start failing after a change. """ run_language_test_for(language, None, seed) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_fancy_repr.py000066400000000000000000000032401354103617500316210ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st def test_floats_is_floats(): assert repr(st.floats()) == u"floats()" def test_includes_non_default_values(): assert repr(st.floats(max_value=1.0)) == u"floats(max_value=1.0)" def foo(*args, **kwargs): pass # fmt: off # The linebreaks here can force our lambda repr code into specific paths, # so we tell Black to leave them as-is. def test_builds_repr(): assert repr(st.builds(foo, st.just(1), x=st.just(10))) == \ u'builds(foo, just(1), x=just(10))' def test_map_repr(): assert repr(st.integers().map(abs)) == u'integers().map(abs)' assert repr(st.integers().map(lambda x: x * 2)) == \ u'integers().map(lambda x: x * 2)' def test_filter_repr(): assert repr(st.integers().filter(lambda x: x != 3)) == \ u'integers().filter(lambda x: x != 3)' def test_flatmap_repr(): assert repr(st.integers().flatmap(lambda x: st.booleans())) == \ u'integers().flatmap(lambda x: st.booleans())' hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_filtering.py000066400000000000000000000045631354103617500314650ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import given from hypothesis.internal.compat import hrange from hypothesis.strategies import integers, lists @pytest.mark.parametrize( (u"specifier", u"condition"), [(integers(), lambda x: x > 1), (lists(integers()), bool)], ) def test_filter_correctly(specifier, condition): @given(specifier.filter(condition)) def test_is_filtered(x): assert condition(x) test_is_filtered() # A variety of strategies that generate the integers 1-20 inclusive, but might # differ in their support for special-case filtering. one_to_twenty_strategies = [ st.integers(1, 20), st.integers(0, 19).map(lambda x: x + 1), st.sampled_from(hrange(1, 21)), st.sampled_from(hrange(0, 20)).map(lambda x: x + 1), ] @pytest.mark.parametrize("base", one_to_twenty_strategies) @given( data=st.data(), forbidden_values=st.lists(st.integers(1, 20), max_size=19, unique=True), ) def test_chained_filters_agree(data, forbidden_values, base): def forbid(s, forbidden): """Helper function to avoid Python variable scoping issues.""" return s.filter(lambda x: x != forbidden) s = base for forbidden in forbidden_values: s = forbid(s, forbidden) x = data.draw(s) assert 1 <= x <= 20 assert x not in forbidden_values @pytest.mark.parametrize("base", one_to_twenty_strategies) def test_chained_filters_repr(base): def foo(x): return x != 0 def bar(x): return x != 2 filtered = base.filter(foo) chained = filtered.filter(bar) assert repr(chained) == "%r.filter(foo).filter(bar)" % (base,) assert repr(filtered) == "%r.filter(foo)" % (base,) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_find.py000066400000000000000000000043521354103617500304160ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import math import pytest from hypothesis import find, settings as Settings from hypothesis.errors import NoSuchExample from hypothesis.strategies import booleans, dictionaries, floats, integers, lists from tests.common.debug import minimal from tests.common.utils import checks_deprecated_behaviour def test_can_find_an_int(): assert minimal(integers(), lambda x: True) == 0 assert minimal(integers(), lambda x: x >= 13) == 13 def test_can_find_list(): x = minimal(lists(integers()), lambda x: sum(x) >= 10) assert sum(x) == 10 def test_can_find_nan(): minimal(floats(), math.isnan) def test_can_find_nans(): x = minimal(lists(floats()), lambda x: math.isnan(sum(x))) if len(x) == 1: assert math.isnan(x[0]) else: assert 2 <= len(x) <= 3 @checks_deprecated_behaviour def test_condition_is_name(): settings = Settings(max_examples=20) with pytest.raises(NoSuchExample) as e: find(booleans(), lambda x: False, settings=settings) assert "lambda x:" in e.value.args[0] with pytest.raises(NoSuchExample) as e: find(integers(), lambda x: "☃" in str(x), settings=settings) assert "lambda x:" in e.value.args[0] def bad(x): return False with pytest.raises(NoSuchExample) as e: find(integers(), bad, settings=settings) assert "bad" in e.value.args[0] def test_find_dictionary(): smallest = minimal( dictionaries(keys=integers(), values=integers()), lambda xs: any(kv[0] > kv[1] for kv in xs.items()), ) assert len(smallest) == 1 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_fixtures.py000066400000000000000000000016511354103617500313460ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import time from tests.common import TIME_INCREMENT def test_time_consistently_increments_in_tests(): x = time.time() y = time.time() z = time.time() assert y == x + TIME_INCREMENT assert z == y + TIME_INCREMENT hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_flatmap.py000066400000000000000000000062171354103617500311240ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import assume, given, settings from hypothesis.database import ExampleDatabase from hypothesis.internal.compat import Counter from hypothesis.strategies import ( booleans, builds, floats, integers, just, lists, text, tuples, ) from tests.common.debug import minimal ConstantLists = integers().flatmap(lambda i: lists(just(i))) OrderedPairs = integers(1, 200).flatmap(lambda e: tuples(integers(0, e - 1), just(e))) @settings(max_examples=100) @given(ConstantLists) def test_constant_lists_are_constant(x): assume(len(x) >= 3) assert len(set(x)) == 1 @settings(max_examples=100) @given(OrderedPairs) def test_in_order(x): assert x[0] < x[1] def test_flatmap_retrieve_from_db(): constant_float_lists = floats(0, 1).flatmap(lambda x: lists(just(x))) track = [] db = ExampleDatabase() @given(constant_float_lists) @settings(database=db) def record_and_test_size(xs): if sum(xs) >= 1: track.append(xs) assert False with pytest.raises(AssertionError): record_and_test_size() assert track example = track[-1] track = [] with pytest.raises(AssertionError): record_and_test_size() assert track[0] == example def test_flatmap_does_not_reuse_strategies(): s = builds(list).flatmap(just) assert s.example() is not s.example() def test_flatmap_has_original_strategy_repr(): ints = integers() ints_up = ints.flatmap(lambda n: integers(min_value=n)) assert repr(ints) in repr(ints_up) def test_mixed_list_flatmap(): s = lists(booleans().flatmap(lambda b: booleans() if b else text())) def criterion(ls): c = Counter(type(l) for l in ls) return len(c) >= 2 and min(c.values()) >= 3 result = minimal(s, criterion) assert len(result) == 6 assert set(result) == {False, u""} @pytest.mark.parametrize("n", range(1, 10)) def test_can_shrink_through_a_binding(n): bool_lists = integers(0, 100).flatmap( lambda k: lists(booleans(), min_size=k, max_size=k) ) assert minimal(bool_lists, lambda x: x.count(True) >= n) == [True] * n @pytest.mark.parametrize("n", range(1, 10)) def test_can_delete_in_middle_of_a_binding(n): bool_lists = integers(1, 100).flatmap( lambda k: lists(booleans(), min_size=k, max_size=k) ) result = minimal(bool_lists, lambda x: x[0] and x[-1] and x.count(False) >= n) assert result == [True] + [False] * n + [True] hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_floating.py000066400000000000000000000065451354103617500313070ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """Tests for being able to generate weird and wonderful floating point numbers.""" from __future__ import absolute_import, division, print_function import math import sys from hypothesis import HealthCheck, assume, given, settings from hypothesis.strategies import data, floats, lists from tests.common.utils import fails TRY_HARDER = settings( max_examples=1000, suppress_health_check=[HealthCheck.filter_too_much] ) @given(floats()) @TRY_HARDER def test_is_float(x): assert isinstance(x, float) @fails @given(floats()) @TRY_HARDER def test_inversion_is_imperfect(x): assume(x != 0.0) y = 1.0 / x assert x * y == 1.0 @given(floats(-sys.float_info.max, sys.float_info.max)) def test_largest_range(x): assert not math.isinf(x) @given(floats()) @TRY_HARDER def test_negation_is_self_inverse(x): assume(not math.isnan(x)) y = -x assert -y == x @fails @given(lists(floats())) def test_is_not_nan(xs): assert not any(math.isnan(x) for x in xs) @fails @given(floats()) @TRY_HARDER def test_is_not_positive_infinite(x): assume(x > 0) assert not math.isinf(x) @fails @given(floats()) @TRY_HARDER def test_is_not_negative_infinite(x): assume(x < 0) assert not math.isinf(x) @fails @given(floats()) @TRY_HARDER def test_is_int(x): assume(not (math.isinf(x) or math.isnan(x))) assert x == int(x) @fails @given(floats()) @TRY_HARDER def test_is_not_int(x): assume(not (math.isinf(x) or math.isnan(x))) assert x != int(x) @fails @given(floats()) @TRY_HARDER def test_is_in_exact_int_range(x): assume(not (math.isinf(x) or math.isnan(x))) assert x + 1 != x # Tests whether we can represent subnormal floating point numbers. # This is essentially a function of how the python interpreter # was compiled. # Everything is terrible if math.ldexp(0.25, -1022) > 0: REALLY_SMALL_FLOAT = sys.float_info.min else: REALLY_SMALL_FLOAT = sys.float_info.min * 2 @fails @given(floats()) @TRY_HARDER def test_can_generate_really_small_positive_floats(x): assume(x > 0) assert x >= REALLY_SMALL_FLOAT @fails @given(floats()) @TRY_HARDER def test_can_generate_really_small_negative_floats(x): assume(x < 0) assert x <= -REALLY_SMALL_FLOAT @fails @given(floats()) @TRY_HARDER def test_can_find_floats_that_do_not_round_trip_through_strings(x): assert float(str(x)) == x @fails @given(floats()) @TRY_HARDER def test_can_find_floats_that_do_not_round_trip_through_reprs(x): assert float(repr(x)) == x finite_floats = floats(allow_infinity=False, allow_nan=False) @settings(deadline=None) @given(finite_floats, finite_floats, data()) def test_floats_are_in_range(x, y, data): x, y = sorted((x, y)) assume(x < y) t = data.draw(floats(x, y)) assert x <= t <= y hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_from_type_recipe.py000066400000000000000000000027741354103617500330370ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from hypothesis import given from hypothesis.searchstrategy.types import _global_type_lookup def everything_except(excluded_types): """Recipe copied from the docstring of ``from_type``""" return ( st.from_type(type) .flatmap(st.from_type) .filter(lambda x: not isinstance(x, excluded_types)) ) @given( excluded_types=st.lists( st.sampled_from( sorted( [x for x in _global_type_lookup if x.__module__ != "typing"], key=str ) ), min_size=1, max_size=3, unique=True, ).map(tuple), data=st.data(), ) def test_recipe_for_everything_except(excluded_types, data): value = data.draw(everything_except(excluded_types)) assert not isinstance(value, excluded_types) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_given_error_conditions.py000066400000000000000000000021421354103617500342430ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import HealthCheck, given, reject, settings from hypothesis.errors import Unsatisfiable from hypothesis.strategies import integers def test_raises_unsatisfiable_if_all_false(): @given(integers()) @settings(max_examples=50, suppress_health_check=HealthCheck.all()) def test_assume_false(x): reject() with pytest.raises(Unsatisfiable): test_assume_false() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_given_reuse.py000066400000000000000000000022571354103617500320130ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import given, strategies as st given_booleans = given(st.booleans()) @given_booleans def test_has_an_arg_named_x(x): pass @given_booleans def test_has_an_arg_named_y(y): pass given_named_booleans = given(z=st.text()) def test_fail_independently(): @given_named_booleans def test_z1(z): assert False @given_named_booleans def test_z2(z): pass with pytest.raises(AssertionError): test_z1() test_z2() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_imports.py000066400000000000000000000016501354103617500311710ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import * from hypothesis.strategies import * def test_can_star_import_from_hypothesis(): @given(lists(integers())) @settings(max_examples=10000, verbosity=Verbosity.quiet) def f(x): pass f() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_integer_ranges.py000066400000000000000000000040711354103617500324700ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import given from hypothesis.internal.conjecture.utils import integer_range from hypothesis.searchstrategy.strategies import SearchStrategy from hypothesis.strategies import integers from tests.common.debug import minimal class interval(SearchStrategy): def __init__(self, lower, upper, center=None): self.lower = lower self.upper = upper self.center = center def do_draw(self, data): return integer_range(data, self.lower, self.upper, center=self.center) @pytest.mark.parametrize("inter", [(0, 5, 10), (-10, 10, 10), (0, 1, 1), (1, 1, 2)]) def test_intervals_shrink_to_center(inter): lower, center, upper = inter s = interval(lower, upper, center) assert minimal(s, lambda x: True) == center if lower < center: assert minimal(s, lambda x: x < center) == center - 1 if center < upper: assert minimal(s, lambda x: x > center) == center + 1 def test_bounded_integers_distribution_of_bit_width_issue_1387_regression(): values = [] @given(integers(0, 1e100)) def test(x): values.append(x) test() # We draw from a shaped distribution up to 128bit ~7/8 of the time, and # uniformly the rest. So we should get some very large but not too many. huge = sum(x > 1e97 for x in values) assert huge != 0 assert huge <= 0.3 * len(values) # expected ~1/8 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_labels.py000066400000000000000000000026721354103617500307430ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st def test_labels_are_cached(): x = st.integers() assert x.label is x.label def test_labels_are_distinct(): assert st.integers().label != st.text().label @st.composite def foo(draw): pass @st.composite def bar(draw): pass def test_different_composites_have_different_labels(): assert foo().label != bar().label def test_one_of_label_is_distinct(): a = st.integers() b = st.booleans() assert st.one_of(a, b).label != st.one_of(b, a).label def test_lists_label_by_element(): assert st.lists(st.integers()).label != st.lists(st.booleans()).label def test_label_of_deferred_strategy_is_well_defined(): recursive = st.deferred(lambda: st.lists(recursive)) recursive.label hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_large_examples.py000066400000000000000000000015621354103617500324660ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from tests.common.debug import find_any def test_can_generate_large_lists_with_min_size(): find_any(st.lists(st.integers(), min_size=400)) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_limits.py000066400000000000000000000017271354103617500310020ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import given, settings, strategies as st def test_max_examples_are_respected(): counter = [0] @given(st.random_module(), st.integers()) @settings(max_examples=100) def test(rnd, i): counter[0] += 1 test() assert counter == [100] hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_modify_inner_test.py000066400000000000000000000034171354103617500332200ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from functools import wraps import pytest from hypothesis import given, strategies as st def always_passes(*args, **kwargs): """Stand-in for a fixed version of an inner test. For example, pytest-trio would take the inner test, wrap it in an async-to-sync converter, and use the new func (not always_passes). """ @given(st.integers()) def test_can_replace_inner_test(x): assert False, "This should be replaced" test_can_replace_inner_test.hypothesis.inner_test = always_passes def decorator(func): """An example of a common decorator pattern.""" @wraps(func) def inner(*args, **kwargs): return func(*args, **kwargs) return inner @decorator @given(st.integers()) def test_can_replace_when_decorated(x): assert False, "This should be replaced" test_can_replace_when_decorated.hypothesis.inner_test = always_passes @pytest.mark.parametrize("x", [1, 2]) @given(y=st.integers()) def test_can_replace_when_parametrized(x, y): assert False, "This should be replaced" test_can_replace_when_parametrized.hypothesis.inner_test = always_passes hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_nesting.py000066400000000000000000000024241354103617500311430ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from pytest import raises import hypothesis.strategies as st from hypothesis import Verbosity, given, settings from tests.common.utils import no_shrink def test_nesting_1(): @given(st.integers(0, 100)) @settings(max_examples=5, database=None, deadline=None) def test_blah(x): @given(st.integers()) @settings( max_examples=100, phases=no_shrink, database=None, verbosity=Verbosity.quiet ) def test_nest(y): if y >= x: raise ValueError() with raises(ValueError): test_nest() test_blah() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_pretty_repr.py000066400000000000000000000061211354103617500320510ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from hypothesis import given, settings from hypothesis.control import reject from hypothesis.errors import HypothesisDeprecationWarning, InvalidArgument from hypothesis.internal.compat import OrderedDict def foo(x): pass def bar(x): pass def baz(x): pass fns = [foo, bar, baz] def builds_ignoring_invalid(target, *args, **kwargs): def splat(value): try: result = target(*value[0], **value[1]) result.validate() return result except (HypothesisDeprecationWarning, InvalidArgument): reject() return st.tuples(st.tuples(*args), st.fixed_dictionaries(kwargs)).map(splat) size_strategies = dict( min_size=st.integers(min_value=0, max_value=100), max_size=st.integers(min_value=0, max_value=100) | st.none(), ) values = st.integers() | st.text() Strategies = st.recursive( st.one_of( st.sampled_from( [ st.none(), st.booleans(), st.randoms(), st.complex_numbers(), st.randoms(), st.fractions(), st.decimals(), ] ), st.builds(st.just, values), st.builds(st.sampled_from, st.lists(values, min_size=1)), builds_ignoring_invalid(st.floats, st.floats(), st.floats()), ), lambda x: st.one_of( builds_ignoring_invalid(st.lists, x, **size_strategies), builds_ignoring_invalid(st.sets, x, **size_strategies), builds_ignoring_invalid(lambda v: st.tuples(*v), st.lists(x)), builds_ignoring_invalid(lambda v: st.one_of(*v), st.lists(x, min_size=1)), builds_ignoring_invalid( st.dictionaries, x, x, dict_class=st.sampled_from([dict, OrderedDict]), **size_strategies ), st.builds(lambda s, f: s.map(f), x, st.sampled_from(fns)), ), ) strategy_globals = {k: getattr(st, k) for k in dir(st)} strategy_globals["OrderedDict"] = OrderedDict strategy_globals["inf"] = float("inf") strategy_globals["nan"] = float("nan") strategy_globals["foo"] = foo strategy_globals["bar"] = bar strategy_globals["baz"] = baz @given(Strategies) @settings(max_examples=2000) def test_repr_evals_to_thing_with_same_repr(strategy): r = repr(strategy) via_eval = eval(r, strategy_globals) r2 = repr(via_eval) assert r == r2 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_randomization.py000066400000000000000000000032021354103617500323450ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import random from pytest import raises import hypothesis.strategies as st from hypothesis import Verbosity, find, given, settings from tests.common.utils import checks_deprecated_behaviour, no_shrink @checks_deprecated_behaviour def test_seeds_off_random(): s = settings(phases=no_shrink, database=None) r = random.getstate() x = find(st.integers(), lambda x: True, settings=s) random.setstate(r) y = find(st.integers(), lambda x: True, settings=s) assert x == y def test_nesting_with_control_passes_health_check(): @given(st.integers(0, 100), st.random_module()) @settings(max_examples=5, database=None, deadline=None) def test_blah(x, rnd): @given(st.integers()) @settings( max_examples=100, phases=no_shrink, database=None, verbosity=Verbosity.quiet ) def test_nest(y): assert y < x with raises(AssertionError): test_nest() test_blah() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_recursive.py000066400000000000000000000071441354103617500315070ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from hypothesis import settings from tests.common.debug import find_any, minimal from tests.common.utils import flaky def test_can_generate_with_large_branching(): def flatten(x): if isinstance(x, list): return sum(map(flatten, x), []) else: return [x] size = 20 xs = minimal( st.recursive( st.integers(), lambda x: st.lists(x, min_size=size // 2), max_leaves=size * 2, ), lambda x: isinstance(x, list) and len(flatten(x)) >= size, timeout_after=None, ) assert flatten(xs) == [0] * size def test_can_generate_some_depth_with_large_branching(): def depth(x): if x and isinstance(x, list): return 1 + max(map(depth, x)) else: return 1 xs = minimal( st.recursive(st.integers(), st.lists), lambda x: depth(x) > 1, timeout_after=None, ) assert xs in ([0], [[]]) def test_can_find_quite_broad_lists(): def breadth(x): if isinstance(x, list): return sum(map(breadth, x)) else: return 1 target = 10 broad = minimal( st.recursive(st.booleans(), lambda x: st.lists(x, max_size=target // 2)), lambda x: breadth(x) >= target, settings=settings(max_examples=10000), timeout_after=None, ) assert breadth(broad) == target def test_drawing_many_near_boundary(): target = 4 ls = minimal( st.lists( st.recursive( st.booleans(), lambda x: st.lists( x, min_size=2 * (target - 1), max_size=2 * target ).map(tuple), max_leaves=2 * target - 1, ) ), lambda x: len(set(x)) >= target, timeout_after=None, ) assert len(ls) == target def test_can_use_recursive_data_in_sets(): nested_sets = st.recursive(st.booleans(), st.frozensets, max_leaves=3) find_any(nested_sets, settings=settings(deadline=None)) def flatten(x): if isinstance(x, bool): return frozenset((x,)) else: result = frozenset() for t in x: result |= flatten(t) if len(result) == 2: break return result x = find_any(nested_sets, lambda x: len(flatten(x)) == 2, settings(deadline=None)) assert x in ( frozenset((False, True)), frozenset((False, frozenset((True,)))), frozenset((frozenset((False, True)),)), ) @flaky(max_runs=2, min_passes=1) def test_can_form_sets_of_recursive_data(): size = 3 trees = st.sets( st.recursive( st.booleans(), lambda x: st.lists(x, min_size=size).map(tuple), max_leaves=20, ) ) xs = minimal(trees, lambda x: len(x) >= size, timeout_after=None) assert len(xs) == size hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_regex.py000066400000000000000000000053361354103617500306130ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import re import string from functools import reduce import hypothesis.strategies as st from hypothesis import assume, given, reject from hypothesis.searchstrategy.regex import base_regex_strategy @st.composite def charset(draw): negated = draw(st.booleans()) chars = draw(st.text(string.ascii_letters + string.digits, min_size=1)) if negated: return u"[^%s]" % (chars,) else: return u"[%s]" % (chars,) COMBINED_MATCHER = re.compile(u"[?+*]{2}") @st.composite def conservative_regex(draw): result = draw( st.one_of( st.just(u"."), st.sampled_from([re.escape(c) for c in string.printable]), charset(), CONSERVATIVE_REGEX.map(lambda s: u"(%s)" % (s,)), CONSERVATIVE_REGEX.map(lambda s: s + u"+"), CONSERVATIVE_REGEX.map(lambda s: s + u"?"), CONSERVATIVE_REGEX.map(lambda s: s + u"*"), st.lists(CONSERVATIVE_REGEX, min_size=1, max_size=3).map(u"|".join), st.lists(CONSERVATIVE_REGEX, min_size=1, max_size=3).map(u"".join), ) ) assume(COMBINED_MATCHER.search(result) is None) control = sum(result.count(c) for c in "?+*") assume(control <= 3) return result CONSERVATIVE_REGEX = conservative_regex() FLAGS = st.sets(st.sampled_from([getattr(re, "A", 0), re.I, re.M, re.S])).map( lambda flag_set: reduce(int.__or__, flag_set, 0) ) @given(st.data()) def test_conservative_regex_are_correct_by_construction(data): pattern = re.compile(data.draw(CONSERVATIVE_REGEX), flags=data.draw(FLAGS)) result = data.draw(base_regex_strategy(pattern)) assert pattern.search(result) is not None @given(st.data()) def test_fuzz_stuff(data): pattern = data.draw( st.text(min_size=1, max_size=5) | st.binary(min_size=1, max_size=5) | CONSERVATIVE_REGEX.filter(bool) ) flags = data.draw(FLAGS) try: regex = re.compile(pattern, flags=flags) except re.error: reject() ex = data.draw(st.from_regex(regex)) assert regex.search(ex) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_regressions.py000066400000000000000000000032201354103617500320320ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import given from hypothesis._settings import note_deprecation from hypothesis.errors import HypothesisDeprecationWarning def test_note_deprecation_blames_right_code_issue_652(): msg = "this is an arbitrary deprecation warning message" @st.composite def deprecated_strategy(draw): draw(st.none()) note_deprecation(msg, since="RELEASEDAY") @given(deprecated_strategy()) def f(x): pass with pytest.warns(HypothesisDeprecationWarning) as log: f() assert len(log) == 1 record, = log # We got the warning we expected, from the right file assert isinstance(record.message, HypothesisDeprecationWarning) assert record.message.args == (msg,) assert record.filename == __file__ @given( x=st.one_of(st.just(0) | st.just(1)), y=st.one_of(st.just(0) | st.just(1) | st.just(2)), ) def test_performance_issue_2027(x, y): pass hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_sampled_from.py000066400000000000000000000037351354103617500321520ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import given from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import hrange from tests.common.utils import counts_calls, fails_with @pytest.mark.parametrize("n", [100, 10 ** 5, 10 ** 6, 2 ** 25]) def test_filter_large_lists(n): filter_limit = 100 * 10000 @counts_calls def cond(x): assert cond.calls < filter_limit return x % 2 != 0 s = st.sampled_from(hrange(n)).filter(cond) @given(s) def run(x): assert x % 2 != 0 run() assert cond.calls < filter_limit def rare_value_strategy(n, target): def forbid(s, forbidden): """Helper function to avoid Python variable scoping issues.""" return s.filter(lambda x: x != forbidden) s = st.sampled_from(hrange(n)) for i in hrange(n): if i != target: s = forbid(s, i) return s @given(rare_value_strategy(n=128, target=80)) def test_chained_filters_find_rare_value(x): assert x == 80 @fails_with(InvalidArgument) @given(st.sets(st.sampled_from(range(10)), min_size=11)) def test_unsat_sets_of_samples(x): assert False @given(st.sets(st.sampled_from(range(50)), min_size=50)) def test_efficient_sets_of_samples(x): assert x == set(range(50)) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_sets.py000066400000000000000000000022551354103617500304540ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import given, settings from hypothesis.strategies import floats, integers, sets from tests.common.debug import find_any def test_can_draw_sets_of_hard_to_find_elements(): rarebool = floats(0, 1).map(lambda x: x <= 0.05) find_any(sets(rarebool, min_size=2), settings=settings(deadline=None)) @given(sets(integers(), max_size=0)) def test_empty_sets(x): assert x == set() @given(sets(integers(), max_size=2)) def test_bounded_size_sets(x): assert len(x) <= 2 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_sharing.py000066400000000000000000000037031354103617500311300ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from hypothesis import given from tests.common.debug import find_any, minimal x = st.shared(st.integers()) @given(x, x) def test_sharing_is_by_instance_by_default(a, b): assert a == b @given(st.shared(st.integers(), key="hi"), st.shared(st.integers(), key="hi")) def test_different_instances_with_the_same_key_are_shared(a, b): assert a == b def test_different_instances_are_not_shared(): find_any( st.tuples(st.shared(st.integers()), st.shared(st.integers())), lambda x: x[0] != x[1], ) def test_different_keys_are_not_shared(): find_any( st.tuples(st.shared(st.integers(), key=1), st.shared(st.integers(), key=2)), lambda x: x[0] != x[1], ) def test_keys_and_default_are_not_shared(): find_any( st.tuples(st.shared(st.integers(), key=1), st.shared(st.integers())), lambda x: x[0] != x[1], ) def test_can_simplify_shared_lists(): xs = minimal( st.lists(st.shared(st.integers())), lambda x: len(x) >= 10 and x[0] != 0 ) assert xs == [1] * 10 def test_simplify_shared_linked_to_size(): xs = minimal(st.lists(st.shared(st.integers())), lambda t: sum(t) >= 1000) assert sum(xs[:-1]) < 1000 assert (xs[0] - 1) * len(xs) < 1000 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_simple_numbers.py000066400000000000000000000145631354103617500325270ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import math import sys import pytest from hypothesis import given from hypothesis.strategies import floats, integers, lists from tests.common.debug import minimal from tests.common.utils import checks_deprecated_behaviour def test_minimize_negative_int(): assert minimal(integers(), lambda x: x < 0) == -1 assert minimal(integers(), lambda x: x < -1) == -2 def test_positive_negative_int(): assert minimal(integers(), lambda x: x > 0) == 1 assert minimal(integers(), lambda x: x > 1) == 2 boundaries = pytest.mark.parametrize( u"boundary", sorted( [2 ** i for i in range(10)] + [2 ** i - 1 for i in range(10)] + [2 ** i + 1 for i in range(10)] + [10 ** i for i in range(6)] ), ) @boundaries def test_minimizes_int_down_to_boundary(boundary): assert minimal(integers(), lambda x: x >= boundary) == boundary @boundaries def test_minimizes_int_up_to_boundary(boundary): assert minimal(integers(), lambda x: x <= -boundary) == -boundary @boundaries def test_minimizes_ints_from_down_to_boundary(boundary): def is_good(x): assert x >= boundary - 10 return x >= boundary assert minimal(integers(min_value=boundary - 10), is_good) == boundary assert minimal(integers(min_value=boundary), lambda x: True) == boundary def test_minimizes_negative_integer_range_upwards(): assert minimal(integers(min_value=-10, max_value=-1)) == -1 @boundaries def test_minimizes_integer_range_to_boundary(boundary): assert minimal(integers(boundary, boundary + 100), lambda x: True) == boundary def test_single_integer_range_is_range(): assert minimal(integers(1, 1), lambda x: True) == 1 def test_minimal_small_number_in_large_range(): assert minimal(integers((-2 ** 32), 2 ** 32), lambda x: x >= 101) == 101 def test_minimal_small_sum_float_list(): xs = minimal(lists(floats(), min_size=10), lambda x: sum(x) >= 1.0) assert sum(xs) <= 2.0 def test_minimals_boundary_floats(): def f(x): print(x) return True assert -1 <= minimal(floats(min_value=-1, max_value=1), f) <= 1 def test_minimal_non_boundary_float(): x = minimal(floats(min_value=1, max_value=9), lambda x: x > 2) assert 2 < x < 3 def test_minimal_float_is_zero(): assert minimal(floats(), lambda x: True) == 0.0 def test_negative_floats_simplify_to_zero(): assert minimal(floats(), lambda x: x <= -1.0) == -1.0 def test_minimal_infinite_float_is_positive(): assert minimal(floats(), math.isinf) == float(u"inf") def test_can_minimal_infinite_negative_float(): assert minimal(floats(), lambda x: x < -sys.float_info.max) def test_can_minimal_float_on_boundary_of_representable(): minimal(floats(), lambda x: x + 1 == x and not math.isinf(x)) def test_minimize_nan(): assert math.isnan(minimal(floats(), math.isnan)) def test_minimize_very_large_float(): t = sys.float_info.max / 2 assert t <= minimal(floats(), lambda x: x >= t) < float(u"inf") def is_integral(value): try: return int(value) == value except (OverflowError, ValueError): return False def test_can_minimal_float_far_from_integral(): minimal( floats(), lambda x: not (math.isnan(x) or math.isinf(x) or is_integral(x * (2 ** 32))), ) def test_list_of_fractional_float(): assert set( minimal( lists(floats(), min_size=5), lambda x: len([t for t in x if t >= 1.5]) >= 5, timeout_after=60, ) ).issubset([1.5, 2.0]) def test_minimal_fractional_float(): assert minimal(floats(), lambda x: x >= 1.5) in (1.5, 2.0) def test_minimizes_lists_of_negative_ints_up_to_boundary(): result = minimal( lists(integers(), min_size=10), lambda x: len([t for t in x if t <= -1]) >= 10, timeout_after=60, ) assert result == [-1] * 10 @pytest.mark.parametrize( (u"left", u"right"), [(0.0, 5e-324), (-5e-324, 0.0), (-5e-324, 5e-324), (5e-324, 1e-323)], ) def test_floats_in_constrained_range(left, right): @given(floats(left, right)) def test_in_range(r): assert left <= r <= right test_in_range() def test_bounds_are_respected(): assert minimal(floats(min_value=1.0), lambda x: True) == 1.0 assert minimal(floats(max_value=-1.0), lambda x: True) == -1.0 @pytest.mark.parametrize("k", range(10)) def test_floats_from_zero_have_reasonable_range(k): n = 10 ** k assert minimal(floats(min_value=0.0), lambda x: x >= n) == float(n) assert minimal(floats(max_value=0.0), lambda x: x <= -n) == float(-n) def test_explicit_allow_nan(): minimal(floats(allow_nan=True), math.isnan) def test_one_sided_contains_infinity(): minimal(floats(min_value=1.0), math.isinf) minimal(floats(max_value=1.0), math.isinf) @given(floats(min_value=0.0, allow_infinity=False)) def test_no_allow_infinity_upper(x): assert not math.isinf(x) @given(floats(max_value=0.0, allow_infinity=False)) def test_no_allow_infinity_lower(x): assert not math.isinf(x) class TestFloatsAreFloats(object): @given(floats()) def test_unbounded(self, arg): assert isinstance(arg, float) @checks_deprecated_behaviour @given(floats(min_value=0, max_value=2 ** 64 - 1)) def test_int_int(self, arg): assert isinstance(arg, float) @given(floats(min_value=0, max_value=float(2 ** 64 - 1))) def test_int_float(self, arg): assert isinstance(arg, float) @checks_deprecated_behaviour @given(floats(min_value=float(0), max_value=2 ** 64 - 1)) def test_float_int(self, arg): assert isinstance(arg, float) @given(floats(min_value=float(0), max_value=float(2 ** 64 - 1))) def test_float_float(self, arg): assert isinstance(arg, float) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_simple_strings.py000066400000000000000000000016721354103617500325420ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import unicodedata from hypothesis import given, settings from hypothesis.strategies import text @given(text(min_size=1, max_size=1)) @settings(max_examples=2000) def test_does_not_generate_surrogates(t): assert unicodedata.category(t) != u"Cs" hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_skipping.py000066400000000000000000000033511354103617500313200ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import unittest import pytest from hypothesis import given from hypothesis.core import skip_exceptions_to_reraise from hypothesis.strategies import integers from tests.common.utils import capture_out @pytest.mark.parametrize("skip_exception", skip_exceptions_to_reraise()) def test_no_falsifying_example_if_unittest_skip(skip_exception): """If a ``SkipTest`` exception is raised during a test, Hypothesis should not continue running the test and shrink process, nor should it print anything about falsifying examples.""" class DemoTest(unittest.TestCase): @given(xs=integers()) def test_to_be_skipped(self, xs): if xs == 0: raise skip_exception else: assert xs == 0 with capture_out() as o: suite = unittest.defaultTestLoader.loadTestsFromTestCase(DemoTest) unittest.TextTestRunner().run(suite) assert "Falsifying example" not in o.getvalue() def test_skipping_is_cached(): assert skip_exceptions_to_reraise() is skip_exceptions_to_reraise() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_strategy_state.py000066400000000000000000000141421354103617500325360ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hashlib import math from random import Random from hypothesis import Verbosity, assume, settings from hypothesis.database import ExampleDatabase from hypothesis.internal.compat import PYPY from hypothesis.internal.floats import float_to_int, int_to_float, is_negative from hypothesis.stateful import Bundle, RuleBasedStateMachine, rule from hypothesis.strategies import ( binary, booleans, complex_numbers, data, decimals, floats, fractions, integers, just, lists, none, sampled_from, text, tuples, ) AVERAGE_LIST_LENGTH = 2 def clamp(lower, value, upper): """Given a value and optional lower/upper bounds, 'clamp' the value so that it satisfies lower <= value <= upper.""" if (lower is not None) and (upper is not None) and (lower > upper): raise ValueError("Cannot clamp with lower > upper: %r > %r" % (lower, upper)) if lower is not None: value = max(lower, value) if upper is not None: value = min(value, upper) return value class HypothesisSpec(RuleBasedStateMachine): def __init__(self): super(HypothesisSpec, self).__init__() self.database = None strategies = Bundle(u"strategy") strategy_tuples = Bundle(u"tuples") objects = Bundle(u"objects") basic_data = Bundle(u"basic") varied_floats = Bundle(u"varied_floats") def teardown(self): self.clear_database() @rule() def clear_database(self): if self.database is not None: self.database.close() self.database = None @rule() def set_database(self): self.teardown() self.database = ExampleDatabase() @rule( target=strategies, spec=sampled_from( ( integers(), booleans(), floats(), complex_numbers(), fractions(), decimals(), text(), binary(), none(), tuples(), ) ), ) def strategy(self, spec): return spec @rule(target=strategies, values=lists(integers() | text(), min_size=1)) def sampled_from_strategy(self, values): return sampled_from(values) @rule(target=strategies, spec=strategy_tuples) def strategy_for_tupes(self, spec): return tuples(*spec) @rule(target=strategies, source=strategies, level=integers(1, 10), mixer=text()) def filtered_strategy(s, source, level, mixer): def is_good(x): return bool( Random( hashlib.md5((mixer + repr(x)).encode(u"utf-8")).digest() ).randint(0, level) ) return source.filter(is_good) @rule(target=strategies, elements=strategies) def list_strategy(self, elements): return lists(elements) @rule(target=strategies, left=strategies, right=strategies) def or_strategy(self, left, right): return left | right @rule(target=varied_floats, source=floats()) def float(self, source): return source @rule(target=varied_floats, source=varied_floats, offset=integers(-100, 100)) def adjust_float(self, source, offset): return int_to_float(clamp(0, float_to_int(source) + offset, 2 ** 64 - 1)) @rule(target=strategies, left=varied_floats, right=varied_floats) def float_range(self, left, right): for f in (math.isnan, math.isinf): for x in (left, right): assume(not f(x)) left, right = sorted((left, right)) assert left <= right # exclude deprecated case where left = 0.0 and right = -0.0 assume(left or right or not (is_negative(right) and not is_negative(left))) return floats(left, right) @rule( target=strategies, source=strategies, result1=strategies, result2=strategies, mixer=text(), p=floats(0, 1), ) def flatmapped_strategy(self, source, result1, result2, mixer, p): assume(result1 is not result2) def do_map(value): rep = repr(value) random = Random(hashlib.md5((mixer + rep).encode(u"utf-8")).digest()) if random.random() <= p: return result1 else: return result2 return source.flatmap(do_map) @rule(target=strategies, value=objects) def just_strategy(self, value): return just(value) @rule(target=strategy_tuples, source=strategies) def single_tuple(self, source): return (source,) @rule(target=strategy_tuples, left=strategy_tuples, right=strategy_tuples) def cat_tuples(self, left, right): return left + right @rule(target=objects, strat=strategies, data=data()) def get_example(self, strat, data): data.draw(strat) @rule(target=strategies, left=integers(), right=integers()) def integer_range(self, left, right): left, right = sorted((left, right)) return integers(left, right) @rule(strat=strategies) def repr_is_good(self, strat): assert u" at 0x" not in repr(strat) MAIN = __name__ == u"__main__" TestHypothesis = HypothesisSpec.TestCase TestHypothesis.settings = settings( TestHypothesis.settings, stateful_step_count=10 if PYPY else 50, verbosity=max(TestHypothesis.settings.verbosity, Verbosity.verbose), max_examples=10000 if MAIN else 200, ) if MAIN: TestHypothesis().runTest() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_testdecorators.py000066400000000000000000000037701354103617500325460ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import HealthCheck, given, reject, settings from hypothesis.errors import InvalidArgument, Unsatisfiable from hypothesis.strategies import integers from tests.common.utils import raises def test_contains_the_test_function_name_in_the_exception_string(): look_for_one = settings(max_examples=1, suppress_health_check=HealthCheck.all()) @given(integers()) @look_for_one def this_has_a_totally_unique_name(x): reject() with raises(Unsatisfiable) as e: this_has_a_totally_unique_name() assert this_has_a_totally_unique_name.__name__ in e.value.args[0] class Foo(object): @given(integers()) @look_for_one def this_has_a_unique_name_and_lives_on_a_class(self, x): reject() with raises(Unsatisfiable) as e: Foo().this_has_a_unique_name_and_lives_on_a_class() assert (Foo.this_has_a_unique_name_and_lives_on_a_class.__name__) in e.value.args[0] def test_signature_mismatch_error_message(): # Regression test for issue #1978 @settings(max_examples=2) @given(x=integers()) def bad_test(): pass try: bad_test() except InvalidArgument as e: assert ( str(e) == "bad_test() got an unexpected keyword argument 'x', " "from `x=integers()` in @given" ) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_threading.py000066400000000000000000000020211354103617500314320ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import threading import hypothesis.strategies as st from hypothesis import given def test_can_run_given_in_thread(): has_run_successfully = [False] @given(st.integers()) def test(n): has_run_successfully[0] = True t = threading.Thread(target=test) t.start() t.join() assert has_run_successfully[0] hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_unusual_settings_configs.py000066400000000000000000000023301354103617500346140ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from hypothesis import HealthCheck, Verbosity, assume, given, settings @settings(max_examples=1, database=None) @given(st.integers()) def test_single_example(n): pass @settings( max_examples=1, database=None, suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.too_slow], verbosity=Verbosity.debug, ) @given(st.integers()) def test_hard_to_find_single_example(n): # Numbers are arbitrary, just deliberately unlikely to hit this too soon. assume(n % 50 == 11) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/nocover/test_uuids.py000066400000000000000000000023721354103617500306270ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import given from tests.common.debug import minimal @given(st.lists(st.uuids())) def test_are_unique(ls): assert len(set(ls)) == len(ls) def test_retains_uniqueness_in_simplify(): ts = minimal(st.lists(st.uuids()), lambda x: len(x) >= 5) assert len(ts) == len(set(ts)) == 5 @pytest.mark.parametrize("version", (1, 2, 3, 4, 5)) def test_can_generate_specified_version(version): @given(st.uuids(version=version)) def inner(uuid): assert version == uuid.version inner() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/numpy/000077500000000000000000000000001354103617500255565ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/numpy/__init__.py000066400000000000000000000012751354103617500276740ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/numpy/test_argument_validation.py000066400000000000000000000135121354103617500332250ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import numpy import pytest import hypothesis.extra.numpy as nps import hypothesis.strategies as st from hypothesis import given from hypothesis.errors import InvalidArgument from tests.common.utils import checks_deprecated_behaviour def e(a, **kwargs): return (a, kwargs) @pytest.mark.parametrize( ("function", "kwargs"), [ e(nps.array_dtypes, min_size=2, max_size=1), e(nps.array_dtypes, min_size=-1), e(nps.array_shapes, min_side=2, max_side=1), e(nps.array_shapes, min_dims=3, max_dims=2), e(nps.array_shapes, min_dims=-1), e(nps.array_shapes, min_side=-1), e(nps.array_shapes, min_side="not an int"), e(nps.array_shapes, max_side="not an int"), e(nps.array_shapes, min_dims="not an int"), e(nps.array_shapes, max_dims="not an int"), e(nps.array_shapes, min_dims=33), e(nps.array_shapes, max_dims=33), e(nps.arrays, dtype=float, shape=(0.5,)), e(nps.arrays, dtype=object, shape=1), e(nps.arrays, dtype=float, shape=1, fill=3), e(nps.byte_string_dtypes, min_len=-1), e(nps.byte_string_dtypes, min_len=2, max_len=1), e(nps.datetime64_dtypes, max_period=11), e(nps.datetime64_dtypes, min_period=11), e(nps.datetime64_dtypes, min_period="Y", max_period="M"), e(nps.timedelta64_dtypes, max_period=11), e(nps.timedelta64_dtypes, min_period=11), e(nps.timedelta64_dtypes, min_period="Y", max_period="M"), e(nps.unicode_string_dtypes, min_len=-1), e(nps.unicode_string_dtypes, min_len=2, max_len=1), e(nps.unsigned_integer_dtypes, endianness=3), e(nps.unsigned_integer_dtypes, sizes=()), e(nps.unsigned_integer_dtypes, sizes=(3,)), e(nps.from_dtype, dtype="float64"), e(nps.from_dtype, dtype=float), e(nps.from_dtype, dtype=numpy.int8), e(nps.from_dtype, dtype=1), e(nps.valid_tuple_axes, ndim=-1), e(nps.valid_tuple_axes, ndim=2, min_size=-1), e(nps.valid_tuple_axes, ndim=2, min_size=3, max_size=10), e(nps.valid_tuple_axes, ndim=2, min_size=2, max_size=1), e(nps.valid_tuple_axes, ndim=2.0, min_size=2, max_size=1), e(nps.valid_tuple_axes, ndim=2, min_size=1.0, max_size=2), e(nps.valid_tuple_axes, ndim=2, min_size=1, max_size=2.0), e(nps.valid_tuple_axes, ndim=2, min_size=1, max_size=3), e(nps.broadcastable_shapes, shape="a"), e(nps.broadcastable_shapes, shape=(2, 2), min_side="a"), e(nps.broadcastable_shapes, shape=(2, 2), min_dims="a"), e(nps.broadcastable_shapes, shape=(2, 2), max_side="a"), e(nps.broadcastable_shapes, shape=(2, 2), max_dims="a"), e(nps.broadcastable_shapes, shape=(2, 2), min_side=-1), e(nps.broadcastable_shapes, shape=(2, 2), min_dims=-1), e(nps.broadcastable_shapes, shape=(2, 2), min_dims=33, max_dims=None), e(nps.broadcastable_shapes, shape=(2, 2), min_dims=1, max_dims=33), e(nps.broadcastable_shapes, shape=(2, 2), min_side=1, max_side=0), e(nps.broadcastable_shapes, shape=(2, 2), min_dims=1, max_dims=0), e( nps.broadcastable_shapes, # max_side too small shape=(5, 1), min_dims=2, max_dims=4, min_side=2, max_side=3, ), e( nps.broadcastable_shapes, # min_side too large shape=(0, 1), min_dims=2, max_dims=4, min_side=2, max_side=3, ), e( nps.broadcastable_shapes, # default max_dims unsatisfiable shape=(5, 3, 2, 1), min_dims=3, max_dims=None, min_side=2, max_side=3, ), e( nps.broadcastable_shapes, # default max_dims unsatisfiable shape=(0, 3, 2, 1), min_dims=3, max_dims=None, min_side=2, max_side=3, ), e(nps.integer_array_indices, shape=()), e(nps.integer_array_indices, shape=(2, 0)), e(nps.integer_array_indices, shape="a"), e(nps.integer_array_indices, shape=(2,), result_shape=(2, 2)), e(nps.integer_array_indices, shape=(2,), dtype=float), ], ) def test_raise_invalid_argument(function, kwargs): with pytest.raises(InvalidArgument): function(**kwargs).example() @nps.defines_dtype_strategy def bad_dtype_strategy(): return st.just([("f1", "int32"), ("f1", "int32")]) @given(st.data()) def test_bad_dtype_strategy(capsys, data): s = bad_dtype_strategy() with pytest.raises(ValueError): data.draw(s) val = s.wrapped_strategy.mapped_strategy.value assert capsys.readouterr().out.startswith( "Got invalid dtype value=%r from strategy=just(%r), function=" % (val, val) ) @checks_deprecated_behaviour @given(st.data()) def test_byte_string_dtype_len_0(data): s = nps.byte_string_dtypes(min_len=0, max_len=0) assert data.draw(s).itemsize == 1 @checks_deprecated_behaviour @given(st.data()) def test_unicode_string_dtype_len_0(data): s = nps.unicode_string_dtypes(min_len=0, max_len=0) assert data.draw(s).itemsize == 4 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/numpy/test_fill_values.py000066400000000000000000000035001354103617500314720ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from hypothesis import given from hypothesis.extra.numpy import arrays from tests.common.debug import find_any, minimal @given(arrays(object, 100, st.builds(list))) def test_generated_lists_are_distinct(ls): assert len(set(map(id, ls))) == len(ls) @st.composite def distinct_integers(draw): used = draw(st.shared(st.builds(set), key="distinct_integers.used")) i = draw(st.integers(0, 2 ** 64 - 1).filter(lambda x: x not in used)) used.add(i) return i @given(arrays("uint64", 10, distinct_integers())) def test_does_not_reuse_distinct_integers(arr): assert len(set(arr)) == len(arr) def test_may_reuse_distinct_integers_if_asked(): find_any( arrays("uint64", 10, distinct_integers(), fill=distinct_integers()), lambda x: len(set(x)) < len(x), ) def test_minimizes_to_fill(): result = minimal(arrays(float, 10, fill=st.just(3.0))) assert (result == 3.0).all() @given( arrays( dtype=float, elements=st.floats().filter(bool), shape=(3, 3, 3), fill=st.just(1.0), ) ) def test_fills_everything(x): assert x.all() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/numpy/test_gen_data.py000066400000000000000000000635261354103617500307450ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import sys import numpy as np import pytest import six import hypothesis.extra.numpy as nps import hypothesis.strategies as st from hypothesis import HealthCheck, assume, given, note, settings from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import binary_type, text_type from hypothesis.searchstrategy import SearchStrategy from tests.common.debug import find_any, minimal from tests.common.utils import checks_deprecated_behaviour, fails_with, flaky STANDARD_TYPES = list( map( np.dtype, [ u"int8", u"int16", u"int32", u"int64", u"uint8", u"uint16", u"uint32", u"uint64", u"float", u"float16", u"float32", u"float64", u"complex64", u"complex128", u"datetime64", u"timedelta64", bool, text_type, binary_type, ], ) ) @given(nps.nested_dtypes()) def test_strategies_for_standard_dtypes_have_reusable_values(dtype): assert nps.from_dtype(dtype).has_reusable_values @pytest.mark.parametrize(u"t", STANDARD_TYPES) def test_produces_instances(t): @given(nps.from_dtype(t)) def test_is_t(x): assert isinstance(x, t.type) assert x.dtype.kind == t.kind test_is_t() @given(nps.arrays(float, ())) def test_empty_dimensions_are_arrays(x): assert isinstance(x, np.ndarray) assert x.dtype.kind == u"f" @given(nps.arrays(float, (1, 0, 1))) def test_can_handle_zero_dimensions(x): assert x.shape == (1, 0, 1) @given(nps.arrays(u"uint32", (5, 5))) def test_generates_unsigned_ints(x): assert (x >= 0).all() @given(st.data()) def test_can_handle_long_shapes(data): """We can eliminate this test once we drop Py2 support.""" for tt in six.integer_types: X = data.draw(nps.arrays(float, (tt(5),))) assert X.shape == (5,) X = data.draw(nps.arrays(float, (tt(5), tt(5)))) assert X.shape == (5, 5) @given(nps.arrays(int, (1,))) def test_assert_fits_in_machine_size(x): pass def test_generates_and_minimizes(): assert (minimal(nps.arrays(float, (2, 2))) == np.zeros(shape=(2, 2))).all() def test_can_minimize_large_arrays(): x = minimal( nps.arrays(u"uint32", 100), lambda x: np.any(x) and not np.all(x), timeout_after=60, ) assert np.logical_or(x == 0, x == 1).all() assert np.count_nonzero(x) in (1, len(x) - 1) @flaky(max_runs=50, min_passes=1) def test_can_minimize_float_arrays(): x = minimal(nps.arrays(float, 50), lambda t: t.sum() >= 1.0) assert x.sum() in (1, 50) class Foo(object): pass foos = st.tuples().map(lambda _: Foo()) def test_can_create_arrays_of_composite_types(): arr = minimal(nps.arrays(object, 100, foos)) for x in arr: assert isinstance(x, Foo) @given(st.lists(st.integers()), st.data()) def test_can_create_zero_dim_arrays_of_lists(x, data): arr = data.draw(nps.arrays(object, (), elements=st.just(x))) assert arr.shape == () assert arr.dtype == np.dtype(object) assert arr.item() == x def test_can_create_arrays_of_tuples(): arr = minimal( nps.arrays(object, 10, st.tuples(st.integers(), st.integers())), lambda x: all(t0 != t1 for t0, t1 in x), ) assert all(a in ((1, 0), (0, 1)) for a in arr) @given(nps.arrays(object, (2, 2), st.tuples(st.integers()))) def test_does_not_flatten_arrays_of_tuples(arr): assert isinstance(arr[0][0], tuple) @given(nps.arrays(object, (2, 2), st.lists(st.integers(), min_size=1, max_size=1))) def test_does_not_flatten_arrays_of_lists(arr): assert isinstance(arr[0][0], list) @given(nps.array_shapes()) def test_can_generate_array_shapes(shape): assert isinstance(shape, tuple) assert all(isinstance(i, int) for i in shape) @settings(deadline=None, max_examples=10) @given(st.integers(0, 10), st.integers(0, 9), st.integers(0), st.integers(0)) def test_minimise_array_shapes(min_dims, dim_range, min_side, side_range): smallest = minimal( nps.array_shapes( min_dims, min_dims + dim_range, min_side, min_side + side_range ) ) assert len(smallest) == min_dims and all(k == min_side for k in smallest) @pytest.mark.parametrize( "kwargs", [dict(min_side=100), dict(min_dims=15), dict(min_dims=32)] ) def test_interesting_array_shapes_argument(kwargs): nps.array_shapes(**kwargs).example() @given(nps.scalar_dtypes()) def test_can_generate_scalar_dtypes(dtype): assert isinstance(dtype, np.dtype) @given( nps.nested_dtypes( subtype_strategy=st.one_of( nps.scalar_dtypes(), nps.byte_string_dtypes(), nps.unicode_string_dtypes() ) ) ) def test_can_generate_compound_dtypes(dtype): assert isinstance(dtype, np.dtype) @given( nps.nested_dtypes( subtype_strategy=st.one_of( nps.scalar_dtypes(), nps.byte_string_dtypes(), nps.unicode_string_dtypes() ) ).flatmap(lambda dt: nps.arrays(dtype=dt, shape=1)) ) def test_can_generate_data_compound_dtypes(arr): # This is meant to catch the class of errors which prompted PR #2085 assert isinstance(arr, np.ndarray) @given(nps.nested_dtypes(max_itemsize=400), st.data()) def test_infer_strategy_from_dtype(dtype, data): # Given a dtype assert isinstance(dtype, np.dtype) # We can infer a strategy strat = nps.from_dtype(dtype) assert isinstance(strat, SearchStrategy) # And use it to fill an array of that dtype data.draw(nps.arrays(dtype, 10, strat)) @given(nps.nested_dtypes()) def test_np_dtype_is_idempotent(dtype): assert dtype == np.dtype(dtype) def test_minimise_scalar_dtypes(): assert minimal(nps.scalar_dtypes()) == np.dtype(u"bool") def test_minimise_nested_types(): assert minimal(nps.nested_dtypes()) == np.dtype(u"bool") def test_minimise_array_strategy(): smallest = minimal( nps.arrays( nps.nested_dtypes(max_itemsize=200), nps.array_shapes(max_dims=3, max_side=3), ) ) assert smallest.dtype == np.dtype(u"bool") and not smallest.any() @given(nps.array_dtypes(allow_subarrays=False)) def test_can_turn_off_subarrays(dt): for field, _ in dt.fields.values(): assert field.shape == () @pytest.mark.parametrize("byteorder", ["<", ">"]) @given(data=st.data()) def test_can_restrict_endianness(data, byteorder): dtype = data.draw(nps.integer_dtypes(byteorder, sizes=(16, 32, 64))) if byteorder == ("<" if sys.byteorder == "little" else ">"): assert dtype.byteorder == "=" else: assert dtype.byteorder == byteorder @given(nps.integer_dtypes(sizes=8)) def test_can_specify_size_as_an_int(dt): assert dt.itemsize == 1 @given(st.data()) def test_can_draw_arrays_from_scalars(data): dt = data.draw(nps.scalar_dtypes()) result = data.draw(nps.arrays(dtype=dt, shape=())) assert isinstance(result, np.ndarray) assert result.dtype == dt @given(st.data()) def test_can_cast_for_scalars(data): # Note: this only passes with castable datatypes, certain dtype # combinations will result in an error if numpy is not able to cast them. dt_elements = np.dtype(data.draw(st.sampled_from(["bool", "i2"]))) dt_desired = np.dtype( data.draw(st.sampled_from(["i2", "float32", "float64"])) ) result = data.draw( nps.arrays(dtype=dt_desired, elements=nps.from_dtype(dt_elements), shape=()) ) assert isinstance(result, np.ndarray) assert result.dtype == dt_desired @given(st.data()) def test_can_cast_for_arrays(data): # Note: this only passes with castable datatypes, certain dtype # combinations will result in an error if numpy is not able to cast them. dt_elements = np.dtype(data.draw(st.sampled_from(["bool", "i2"]))) dt_desired = np.dtype( data.draw(st.sampled_from(["i2", "float32", "float64"])) ) result = data.draw( nps.arrays( dtype=dt_desired, elements=nps.from_dtype(dt_elements), shape=(1, 2, 3) ) ) assert isinstance(result, np.ndarray) assert result.dtype == dt_desired @given(st.data()) def test_unicode_string_dtypes_generate_unicode_strings(data): dt = data.draw(nps.unicode_string_dtypes()) result = data.draw(nps.from_dtype(dt)) assert isinstance(result, text_type) @given(st.data()) def test_byte_string_dtypes_generate_unicode_strings(data): dt = data.draw(nps.byte_string_dtypes()) result = data.draw(nps.from_dtype(dt)) assert isinstance(result, binary_type) @given(nps.arrays(dtype="int8", shape=st.integers(0, 20), unique=True)) def test_array_values_are_unique(arr): assert len(set(arr)) == len(arr) @given( nps.arrays( elements=st.just(0.0), dtype=float, fill=st.just(float("nan")), shape=st.integers(0, 20), unique=True, ) ) def test_array_values_are_unique_high_collision(arr): assert (arr == 0.0).sum() <= 1 def test_may_fill_with_nan_when_unique_is_set(): find_any( nps.arrays( dtype=float, elements=st.floats(allow_nan=False), shape=10, unique=True, fill=st.just(float("nan")), ), lambda x: np.isnan(x).any(), ) @given( nps.arrays( dtype=float, elements=st.floats(allow_nan=False), shape=10, unique=True, fill=st.just(float("nan")), ) ) def test_is_still_unique_with_nan_fill(xs): assert len(set(xs)) == len(xs) @fails_with(InvalidArgument) @given( nps.arrays( dtype=float, elements=st.floats(allow_nan=False), shape=10, unique=True, fill=st.just(0.0), ) ) def test_may_not_fill_with_non_nan_when_unique_is_set(arr): pass @fails_with(InvalidArgument) @given(nps.arrays(dtype="U", shape=10, unique=True, fill=st.just(u""))) def test_may_not_fill_with_non_nan_when_unique_is_set_and_type_is_not_number(arr): pass @given( st.data(), st.builds( "{}[{}]".format, st.sampled_from(("datetime64", "timedelta64")), st.sampled_from(nps.TIME_RESOLUTIONS), ).map(np.dtype), ) def test_inferring_from_time_dtypes_gives_same_dtype(data, dtype): ex = data.draw(nps.from_dtype(dtype)) assert dtype == ex.dtype @given(st.data(), nps.byte_string_dtypes() | nps.unicode_string_dtypes()) def test_inferred_string_strategies_roundtrip(data, dtype): # Check that we never generate too-long or nul-terminated strings, which # cannot be read back out of an array. arr = np.zeros(shape=1, dtype=dtype) ex = data.draw(nps.from_dtype(arr.dtype)) arr[0] = ex assert arr[0] == ex @given(st.data(), nps.scalar_dtypes()) def test_all_inferred_scalar_strategies_roundtrip(data, dtype): # We only check scalars here, because record/compound/nested dtypes always # give an array of np.void objects. We're interested in whether scalar # values are safe, not known type coercion. arr = np.zeros(shape=1, dtype=dtype) ex = data.draw(nps.from_dtype(arr.dtype)) assume(ex == ex) # If not, the roundtrip test *should* fail! (eg NaN) arr[0] = ex assert arr[0] == ex @pytest.mark.parametrize("fill", [False, True]) @checks_deprecated_behaviour @given(st.data()) def test_overflowing_integers_are_deprecated(fill, data): kw = dict(elements=st.just(300)) if fill: kw = dict(elements=st.nothing(), fill=kw["elements"]) arr = data.draw(nps.arrays(dtype="int8", shape=(1,), **kw)) assert arr[0] == (300 % 256) @pytest.mark.parametrize("fill", [False, True]) @pytest.mark.parametrize( "dtype,strat", [ ("float16", st.floats(min_value=65520, allow_infinity=False)), ("float32", st.floats(min_value=10 ** 40, allow_infinity=False)), ("complex64", st.complex_numbers(10 ** 300, allow_infinity=False)), ("U1", st.text(min_size=2, max_size=2)), ("S1", st.binary(min_size=2, max_size=2)), ], ) @checks_deprecated_behaviour @given(data=st.data()) def test_unrepresentable_elements_are_deprecated(fill, dtype, strat, data): if fill: kw = dict(elements=st.nothing(), fill=strat) else: kw = dict(elements=strat) arr = data.draw(nps.arrays(dtype=dtype, shape=(1,), **kw)) try: # This is a float or complex number, and has overflowed to infinity, # triggering our deprecation for overflow. assert np.isinf(arr[0]) except TypeError: # We tried to call isinf on a string. The string was generated at # length two, then truncated by the dtype of size 1 - deprecation # again. If the first character was \0 it is now the empty string. assert len(arr[0]) <= 1 @given(nps.arrays(dtype="float16", shape=(1,))) def test_inferred_floats_do_not_overflow(arr): pass @given( nps.arrays( dtype="float16", shape=10, unique=True, elements=st.integers(1, 9), fill=st.just(np.nan), ) ) def test_unique_array_with_fill_can_use_all_elements(arr): assume(len(set(arr)) == arr.size) @given(nps.arrays(dtype="uint8", shape=25, unique=True, fill=st.nothing())) def test_unique_array_without_fill(arr): # This test covers the collision-related branchs for fully dense unique arrays. # Choosing 25 of 256 possible elements means we're almost certain to see colisions # thanks to the 'birthday paradox', but finding unique elemennts is still easy. assume(len(set(arr)) == arr.size) @given(ndim=st.integers(0, 5), data=st.data()) def test_mapped_positive_axes_are_unique(ndim, data): min_size = data.draw(st.integers(0, ndim), label="min_size") max_size = data.draw(st.integers(min_size, ndim), label="max_size") axes = data.draw(nps.valid_tuple_axes(ndim, min_size, max_size), label="axes") assert len(set(axes)) == len({i if 0 < i else ndim + i for i in axes}) @given(ndim=st.integers(0, 5), data=st.data()) def test_length_bounds_are_satisfied(ndim, data): min_size = data.draw(st.integers(0, ndim), label="min_size") max_size = data.draw(st.integers(min_size, ndim), label="max_size") axes = data.draw(nps.valid_tuple_axes(ndim, min_size, max_size), label="axes") assert min_size <= len(axes) <= max_size @given(shape=nps.array_shapes(), data=st.data()) def test_axes_are_valid_inputs_to_sum(shape, data): x = np.zeros(shape, dtype="uint8") axes = data.draw(nps.valid_tuple_axes(ndim=len(shape)), label="axes") np.sum(x, axes) @settings(deadline=None, max_examples=10) @given(ndim=st.integers(0, 3), data=st.data()) def test_minimize_tuple_axes(ndim, data): min_size = data.draw(st.integers(0, ndim), label="min_size") max_size = data.draw(st.integers(min_size, ndim), label="max_size") smallest = minimal(nps.valid_tuple_axes(ndim, min_size, max_size)) assert len(smallest) == min_size and all(k > -1 for k in smallest) @settings(deadline=None, max_examples=10) @given(ndim=st.integers(0, 3), data=st.data()) def test_minimize_negative_tuple_axes(ndim, data): min_size = data.draw(st.integers(0, ndim), label="min_size") max_size = data.draw(st.integers(min_size, ndim), label="max_size") smallest = minimal( nps.valid_tuple_axes(ndim, min_size, max_size), lambda x: all(i < 0 for i in x) ) assert len(smallest) == min_size @settings(deadline=None, suppress_health_check=[HealthCheck.too_slow]) @given( shape=nps.array_shapes(min_side=0, max_side=4, min_dims=0, max_dims=3), data=st.data(), ) def test_broadcastable_shape_bounds_are_satisfied(shape, data): min_dim = data.draw(st.integers(0, 4), label="min_dim") max_dim = data.draw(st.one_of(st.none(), st.integers(min_dim, 4)), label="max_dim") min_side = data.draw(st.integers(0, 3), label="min_side") max_side = data.draw( st.one_of(st.none(), st.integers(min_side, 6)), label="max_side" ) try: bshape = data.draw( nps.broadcastable_shapes( shape, min_side=min_side, max_side=max_side, min_dims=min_dim, max_dims=max_dim, ), label="bshape", ) except InvalidArgument: assume(False) if max_dim is None: max_dim = max(len(shape), min_dim) + 2 if max_side is None: max_side = max(tuple(shape[::-1][:max_dim]) + (min_side,)) + 2 assert isinstance(bshape, tuple) and all(isinstance(s, int) for s in bshape) assert min_dim <= len(bshape) <= max_dim assert all(min_side <= s <= max_side for s in bshape) def _draw_valid_bounds(data, shape, max_dim, permit_none=True): if max_dim == 0 or not shape: return 0, None smallest_side = min(shape[::-1][:max_dim]) min_strat = ( st.sampled_from([1, smallest_side]) if smallest_side > 1 else st.just(smallest_side) ) min_side = data.draw(min_strat, label="min_side") largest_side = max(max(shape[::-1][:max_dim]), min_side) if permit_none: max_strat = st.one_of(st.none(), st.integers(largest_side, largest_side + 2)) else: max_strat = st.integers(largest_side, largest_side + 2) max_side = data.draw(max_strat, label="max_side") return min_side, max_side @settings(deadline=None, max_examples=1000) @given( shape=nps.array_shapes(min_dims=0, max_dims=6, min_side=1, max_side=5), data=st.data(), ) def test_broadcastable_shape_has_good_default_values(shape, data): # This test ensures that default parameters can always produce broadcast-compatible shapes broadcastable_shape = data.draw( nps.broadcastable_shapes(shape), label="broadcastable_shapes" ) a = np.zeros(shape, dtype="uint8") b = np.zeros(broadcastable_shape, dtype="uint8") np.broadcast(a, b) # error if drawn shape for b is not broadcast-compatible @settings(deadline=None) @given( min_dim=st.integers(0, 5), shape=nps.array_shapes(min_dims=0, max_dims=3, min_side=0, max_side=10), data=st.data(), ) def test_broadcastable_shape_can_broadcast(min_dim, shape, data): max_dim = data.draw(st.one_of(st.none(), st.integers(min_dim, 5)), label="max_dim") min_side, max_side = _draw_valid_bounds(data, shape, max_dim) broadcastable_shape = data.draw( nps.broadcastable_shapes( shape, min_side=min_side, max_side=max_side, min_dims=min_dim, max_dims=max_dim, ), label="broadcastable_shapes", ) a = np.zeros(shape, dtype="uint8") b = np.zeros(broadcastable_shape, dtype="uint8") np.broadcast(a, b) # error if drawn shape for b is not broadcast-compatible @settings(deadline=None, max_examples=10) @given( min_dim=st.integers(0, 5), shape=nps.array_shapes(min_dims=0, max_dims=3, min_side=0, max_side=5), data=st.data(), ) def test_minimize_broadcastable_shape(min_dim, shape, data): # Ensure aligned dimensions of broadcastable shape minimizes to `(1,) * min_dim` max_dim = data.draw(st.one_of(st.none(), st.integers(min_dim, 5)), label="max_dim") min_side, max_side = _draw_valid_bounds(data, shape, max_dim, permit_none=False) smallest = minimal( nps.broadcastable_shapes( shape, min_side=min_side, max_side=max_side, min_dims=min_dim, max_dims=max_dim, ) ) note("(smallest): {}".format(smallest)) n_leading = max(len(smallest) - len(shape), 0) n_aligned = max(len(smallest) - n_leading, 0) expected = [min_side] * n_leading + [ 1 if min_side <= 1 <= max_side else i for i in shape[len(shape) - n_aligned :] ] assert tuple(expected) == smallest @settings(deadline=None) @given(max_dim=st.integers(4, 6), data=st.data()) def test_broadcastable_shape_adjusts_max_dim_with_explicit_bounds(max_dim, data): # Ensures that `broadcastable_shapes` limits itself to satisfiable dimensions # Broadcastable values can only be drawn for dims 0-3 for these shapes shape = data.draw(st.sampled_from([(5, 3, 2, 1), (0, 3, 2, 1)]), label="shape") broadcastable_shape = data.draw( nps.broadcastable_shapes( shape, min_side=2, max_side=3, min_dims=3, max_dims=max_dim ), label="broadcastable_shapes", ) assert len(broadcastable_shape) == 3 a = np.zeros(shape, dtype="uint8") b = np.zeros(broadcastable_shape, dtype="uint8") np.broadcast(a, b) # error if drawn shape for b is not broadcast-compatible @settings(deadline=None, max_examples=10) @given(min_dim=st.integers(0, 4), min_side=st.integers(2, 3), data=st.data()) def test_broadcastable_shape_shrinking_with_singleton_out_of_bounds( min_dim, min_side, data ): max_dim = data.draw(st.one_of(st.none(), st.integers(min_dim, 4)), label="max_dim") max_side = data.draw( st.one_of(st.none(), st.integers(min_side, 6)), label="max_side" ) ndims = data.draw(st.integers(1, 4), label="ndim") shape = (1,) * ndims smallest = minimal( nps.broadcastable_shapes( shape, min_side=min_side, max_side=max_side, min_dims=min_dim, max_dims=max_dim, ) ) assert smallest == (min_side,) * min_dim @settings(deadline=None) @given( shape=nps.array_shapes(min_dims=0, max_dims=3, min_side=0, max_side=5), max_dims=st.integers(0, 6), data=st.data(), ) def test_broadcastable_shape_can_generate_arbitrary_ndims(shape, max_dims, data): # ensures that generates shapes can possess any length in [min_dims, max_dims] desired_ndim = data.draw(st.integers(0, max_dims), label="desired_ndim") min_dims = data.draw( st.one_of(st.none(), st.integers(0, desired_ndim)), label="min_dims" ) args = ( dict(min_dims=min_dims) if min_dims is not None else {} ) # check default arg behavior too find_any( nps.broadcastable_shapes(shape, min_side=0, max_dims=max_dims, **args), lambda x: len(x) == desired_ndim, settings(max_examples=10 ** 6), ) @settings(deadline=None) @given( shape=nps.array_shapes(min_dims=1, min_side=1), dtype=st.one_of(nps.unsigned_integer_dtypes(), nps.integer_dtypes()), data=st.data(), ) def test_advanced_integer_index_is_valid_with_default_result_shape(shape, dtype, data): index = data.draw(nps.integer_array_indices(shape, dtype=dtype)) x = np.zeros(shape) out = x[index] # raises if the index is invalid assert not np.shares_memory(x, out) # advanced indexing should not return a view assert all(dtype == x.dtype for x in index) @settings(deadline=None) @given( shape=nps.array_shapes(min_dims=1, min_side=1), min_dims=st.integers(0, 3), min_side=st.integers(0, 3), dtype=st.one_of(nps.unsigned_integer_dtypes(), nps.integer_dtypes()), data=st.data(), ) def test_advanced_integer_index_is_valid_and_satisfies_bounds( shape, min_dims, min_side, dtype, data ): max_side = data.draw(st.integers(min_side, min_side + 2), label="max_side") max_dims = data.draw(st.integers(min_dims, min_dims + 2), label="max_dims") index = data.draw( nps.integer_array_indices( shape, result_shape=nps.array_shapes( min_dims=min_dims, max_dims=max_dims, min_side=min_side, max_side=max_side, ), dtype=dtype, ) ) x = np.zeros(shape) out = x[index] # raises if the index is invalid assert all(min_side <= s <= max_side for s in out.shape) assert min_dims <= out.ndim <= max_dims assert not np.shares_memory(x, out) # advanced indexing should not return a view assert all(dtype == x.dtype for x in index) @settings(deadline=None) @given( shape=nps.array_shapes(min_dims=1, min_side=1), min_dims=st.integers(0, 3), min_side=st.integers(0, 3), dtype=st.sampled_from(["uint8", "int8"]), data=st.data(), ) def test_advanced_integer_index_minimizes_as_documented( shape, min_dims, min_side, dtype, data ): max_side = data.draw(st.integers(min_side, min_side + 2), label="max_side") max_dims = data.draw(st.integers(min_dims, min_dims + 2), label="max_dims") result_shape = nps.array_shapes( min_dims=min_dims, max_dims=max_dims, min_side=min_side, max_side=max_side ) smallest = minimal( nps.integer_array_indices(shape, result_shape=result_shape, dtype=dtype) ) desired = len(shape) * (np.zeros(min_dims * [min_side]),) assert len(smallest) == len(desired) for s, d in zip(smallest, desired): np.testing.assert_array_equal(s, d) @settings(deadline=None, max_examples=10) @given( shape=nps.array_shapes(min_dims=1, max_dims=2, min_side=1, max_side=3), data=st.data(), ) def test_advanced_integer_index_can_generate_any_pattern(shape, data): # ensures that generated index-arrays can be used to yield any pattern of elements from an array x = np.arange(np.product(shape)).reshape(shape) target = data.draw( nps.arrays( shape=nps.array_shapes(min_dims=1, max_dims=2, min_side=1, max_side=2), elements=st.sampled_from(x.flatten()), dtype=x.dtype, ), label="target", ) find_any( nps.integer_array_indices( shape, result_shape=st.just(target.shape), dtype=np.dtype("int8") ), lambda index: np.all(target == x[index]), settings(max_examples=10 ** 6), ) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/numpy/test_narrow_floats.py000066400000000000000000000037411354103617500320540ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import numpy as np import pytest from hypothesis import given from hypothesis.extra.numpy import arrays, from_dtype, integer_dtypes from hypothesis.strategies import data, floats, integers @pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64]) @pytest.mark.parametrize("low", [-2.0, -1.0, 0.0, 1.0]) @given(data()) def test_bad_float_exclude_min_in_array(dtype, low, data): elements = floats( low, low + 1, exclude_min=True, width=np.dtype(dtype).itemsize * 8 ) x = data.draw(arrays(dtype, shape=(1,), elements=elements), label="x") assert np.all(low < x) @given(floats(width=32)) def test_float32_exactly_representable(x): clipped = np.dtype("float32").type(x) if np.isnan(x): assert np.isnan(clipped) else: assert x == float(clipped) @given(floats(width=16)) def test_float16_exactly_representable(x): clipped = np.dtype("float16").type(x) if np.isnan(x): assert np.isnan(clipped) else: assert x == float(clipped) @given(data=data(), dtype=integer_dtypes()) def test_floor_ceil_lossless(data, dtype): # Regression test for issue #1667; ceil converting numpy integers # to float and back to int with loss of exact value. x = data.draw(from_dtype(dtype)) assert data.draw(integers(x, x)) == x hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/numpy/test_randomness.py000066400000000000000000000022721354103617500313430ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import numpy as np from hypothesis import given from hypothesis.strategies import none def test_numpy_prng_is_seeded(): first = [] prng_state = np.random.get_state() @given(none()) def inner(_): val = np.random.bytes(10) if not first: first.append(val) assert val == first[0], "Numpy random module should be reproducible" inner() np.testing.assert_array_equal( np.random.get_state()[1], prng_state[1], "State was not restored." ) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/numpy/test_sampled_from.py000066400000000000000000000025371354103617500316460ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import given from hypothesis.errors import InvalidArgument from hypothesis.extra import numpy as npst from hypothesis.strategies import data, sampled_from from tests.common.utils import fails_with @given( data(), npst.arrays(dtype=npst.scalar_dtypes(), shape=npst.array_shapes(max_dims=1)) ) def test_can_sample_1D_numpy_array_without_warning(data, arr): data.draw(sampled_from(arr)) @fails_with(InvalidArgument) @given( data(), npst.arrays( dtype=npst.scalar_dtypes(), shape=npst.array_shapes(min_dims=2, max_dims=5) ), ) def test_sampling_multi_dimensional_arrays_is_deprecated(data, arr): data.draw(sampled_from(arr)) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pandas/000077500000000000000000000000001354103617500256545ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pandas/__init__.py000066400000000000000000000012751354103617500277720ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pandas/helpers.py000066400000000000000000000033421354103617500276720ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import numpy as np PANDAS_TIME_DTYPES = tuple(map(np.dtype, ["M8[ns]", ">m8[ns]"])) def supported_by_pandas(dtype): """Checks whether the dtype is one that can be correctly handled by Pandas.""" # Pandas does not support non-native byte orders and things go amusingly # wrong in weird places if you try to use them. See # https://pandas.pydata.org/pandas-docs/stable/gotchas.html#byte-ordering-issues if dtype.byteorder not in ("|", "="): return False # Pandas only supports a limited range of timedelta and datetime dtypes # compared to the full range that numpy supports and will convert # everything to those types (possibly increasing precision in the course of # doing so, which can cause problems if this results in something which # does not fit into the desired word type. As a result we want to filter # out any timedelta or datetime dtypes that are not of the desired types. if dtype.kind in ("m", "M"): return dtype in PANDAS_TIME_DTYPES return True hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pandas/test_argument_validation.py000066400000000000000000000057221354103617500333270ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.extra.pandas as pdst import hypothesis.strategies as st from tests.common.arguments import argument_validation_test, e BAD_ARGS = [ e(pdst.data_frames), e(pdst.data_frames, pdst.columns(1, dtype="not a dtype")), e(pdst.data_frames, pdst.columns(1, elements="not a strategy")), e(pdst.data_frames, pdst.columns([[]])), e(pdst.data_frames, [], index=[]), e(pdst.data_frames, [], rows=st.fixed_dictionaries({"A": st.just(1)})), e(pdst.data_frames, pdst.columns(1)), e(pdst.data_frames, pdst.columns(1, dtype=float, fill=1)), e(pdst.data_frames, pdst.columns(1, dtype=float, elements=1)), e(pdst.data_frames, pdst.columns(1, fill=1, dtype=float)), e(pdst.data_frames, pdst.columns(["A", "A"], dtype=float)), e(pdst.data_frames, pdst.columns(1, elements=st.none(), dtype=int)), e(pdst.data_frames, 1), e(pdst.data_frames, [1]), e(pdst.data_frames, pdst.columns(1, dtype="category")), e( pdst.data_frames, pdst.columns(["A"], dtype=bool), rows=st.tuples(st.booleans(), st.booleans()), ), e( pdst.data_frames, pdst.columns(1, elements=st.booleans()), rows=st.tuples(st.booleans()), ), e(pdst.data_frames, rows=st.integers(), index=pdst.range_indexes(0, 0)), e(pdst.data_frames, rows=st.integers(), index=pdst.range_indexes(1, 1)), e(pdst.data_frames, pdst.columns(1, dtype=int), rows=st.integers()), e(pdst.indexes), e(pdst.indexes, dtype="category"), e(pdst.indexes, dtype="not a dtype"), e(pdst.indexes, elements="not a strategy"), e(pdst.indexes, elements=st.text(), dtype=float), e(pdst.indexes, elements=st.none(), dtype=int), e(pdst.indexes, dtype=int, max_size=0, min_size=1), e(pdst.indexes, dtype=int, unique="true"), e(pdst.indexes, dtype=int, min_size="0"), e(pdst.indexes, dtype=int, max_size="1"), e(pdst.range_indexes, 1, 0), e(pdst.range_indexes, min_size="0"), e(pdst.range_indexes, max_size="1"), e(pdst.series), e(pdst.series, dtype="not a dtype"), e(pdst.series, elements="not a strategy"), e(pdst.series, elements=st.none(), dtype=int), e(pdst.series, dtype="category"), e(pdst.series, index="not a strategy"), ] test_raise_invalid_argument = argument_validation_test(BAD_ARGS) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pandas/test_data_frame.py000066400000000000000000000155071354103617500313600ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import numpy as np import hypothesis.extra.numpy as npst import hypothesis.extra.pandas as pdst import hypothesis.strategies as st from hypothesis import HealthCheck, given, reject, settings from tests.common.debug import find_any from tests.pandas.helpers import supported_by_pandas @given(pdst.data_frames([pdst.column("a", dtype=int), pdst.column("b", dtype=float)])) def test_can_have_columns_of_distinct_types(df): assert df["a"].dtype == np.dtype(int) assert df["b"].dtype == np.dtype(float) @given( pdst.data_frames( [pdst.column(dtype=int)], index=pdst.range_indexes(min_size=1, max_size=5) ) ) def test_respects_size_bounds(df): assert 1 <= len(df) <= 5 @given(pdst.data_frames(pdst.columns(["A", "B"], dtype=float))) def test_can_specify_just_column_names(df): df["A"] df["B"] @given(pdst.data_frames(pdst.columns(2, dtype=float))) def test_can_specify_just_column_count(df): df[0] df[1] @given( pdst.data_frames( rows=st.fixed_dictionaries({"A": st.integers(1, 10), "B": st.floats()}) ) ) def test_gets_the_correct_data_shape_for_just_rows(table): assert table["A"].dtype == np.dtype("int64") assert table["B"].dtype == np.dtype(float) @given( pdst.data_frames( columns=pdst.columns(["A", "B"], dtype=int), rows=st.lists(st.integers(0, 1000), min_size=2, max_size=2).map(sorted), ) ) def test_can_specify_both_rows_and_columns_list(d): assert d["A"].dtype == np.dtype(int) assert d["B"].dtype == np.dtype(int) for _, r in d.iterrows(): assert r["A"] <= r["B"] @given( pdst.data_frames( columns=pdst.columns(["A", "B"], dtype=int), rows=st.lists(st.integers(0, 1000), min_size=2, max_size=2) .map(sorted) .map(tuple), ) ) def test_can_specify_both_rows_and_columns_tuple(d): assert d["A"].dtype == np.dtype(int) assert d["B"].dtype == np.dtype(int) for _, r in d.iterrows(): assert r["A"] <= r["B"] @given( pdst.data_frames( columns=pdst.columns(["A", "B"], dtype=int), rows=st.lists(st.integers(0, 1000), min_size=2, max_size=2).map( lambda x: {"A": min(x), "B": max(x)} ), ) ) def test_can_specify_both_rows_and_columns_dict(d): assert d["A"].dtype == np.dtype(int) assert d["B"].dtype == np.dtype(int) for _, r in d.iterrows(): assert r["A"] <= r["B"] @given( pdst.data_frames( [ pdst.column( "A", fill=st.just(float("nan")), dtype=float, elements=st.floats(allow_nan=False), ) ], rows=st.builds(dict), ) ) def test_can_fill_in_missing_elements_from_dict(df): assert np.isnan(df["A"]).all() @st.composite def column_strategy(draw): name = draw(st.none() | st.text()) dtype = draw(npst.scalar_dtypes().filter(supported_by_pandas)) pass_dtype = not draw(st.booleans()) if pass_dtype: pass_elements = not draw(st.booleans()) else: pass_elements = True if pass_elements: elements = npst.from_dtype(dtype) else: elements = None unique = draw(st.booleans()) fill = st.nothing() if draw(st.booleans()) else None return pdst.column( name=name, dtype=dtype, unique=unique, fill=fill, elements=elements ) @given(pdst.data_frames(pdst.columns(1, dtype=np.dtype("' def test_destructuring_not_allowed(): @given(integers()) def foo(a, (b, c)): pass with pytest.raises(InvalidArgument): foo() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/py2/test_from_type.py000066400000000000000000000020651354103617500305400ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hypothesis.strategies as st from hypothesis import given @given(st.from_type(int)) def test_from_int_is_int(x): assert isinstance(x, int) @given(st.from_type(long)) def test_from_long_is_long(x): assert isinstance(x, long) class OldStyleInitlessClass: pass def test_builds_old_style_initless_class(): st.builds(OldStyleInitlessClass).example() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/py2/test_generate_ints.py000066400000000000000000000017371354103617500313700ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from tests.common.debug import minimal @pytest.mark.parametrize("kwargs", [{}, {"min_value": 0}, {"max_value": 0}]) def test_generates_an_int_as_the_min_value(kwargs): n = minimal(st.integers(**kwargs)) assert isinstance(n, int) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/py2/test_validation.py000066400000000000000000000020231354103617500306600ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis.errors import InvalidArgument from hypothesis.internal.validation import check_type class OldStyleClass: pass def test_check_type_works_for_old_style_classes(): check_type(OldStyleClass, OldStyleClass()) with pytest.raises(InvalidArgument): check_type(OldStyleClass, "not an instance") hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/py3/000077500000000000000000000000001354103617500251215ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/py3/__init__.py000066400000000000000000000012751354103617500272370ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/py3/test_annotations.py000066400000000000000000000077221354103617500310770ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import sys import attr import pytest import hypothesis.strategies as st from hypothesis import given from hypothesis.errors import InvalidArgument from hypothesis.internal.compat import getfullargspec from hypothesis.internal.reflection import ( convert_positional_arguments, define_function_signature, get_pretty_function_description, ) @given(st.integers()) def test_has_an_annotation(i: int): pass def universal_acceptor(*args, **kwargs): return args, kwargs def has_annotation(a: int, *b, c=2) -> None: pass @pytest.mark.parametrize("f", [has_annotation, lambda *, a: a, lambda *, a=1: a]) def test_copying_preserves_argspec(f): af = getfullargspec(f) t = define_function_signature("foo", "docstring", af)(universal_acceptor) at = getfullargspec(t) assert af.args == at.args[: len(af.args)] assert af.varargs == at.varargs assert af.varkw == at.varkw assert len(af.defaults or ()) == len(at.defaults or ()) assert af.kwonlyargs == at.kwonlyargs assert af.kwonlydefaults == at.kwonlydefaults assert af.annotations == at.annotations @pytest.mark.parametrize( "lam,source", [ ((lambda *z, a: a), "lambda *z, a: a"), ((lambda *z, a=1: a), "lambda *z, a=1: a"), ((lambda *, a: a), "lambda *, a: a"), ((lambda *, a=1: a), "lambda *, a=1: a"), ], ) def test_py3only_lambda_formatting(lam, source): # Testing kwonly lambdas, with and without varargs and default values assert get_pretty_function_description(lam) == source def test_given_notices_missing_kwonly_args(): with pytest.raises(InvalidArgument): @given(a=st.none()) def reqs_kwonly(*, a, b): pass def test_converter_handles_kwonly_args(): def f(*, a, b=2): pass out = convert_positional_arguments(f, (), dict(a=1)) assert out == ((), dict(a=1, b=2)) def test_converter_notices_missing_kwonly_args(): def f(*, a, b=2): pass with pytest.raises(TypeError): assert convert_positional_arguments(f, (), dict()) def pointless_composite(draw: None, strat: bool, nothing: list) -> int: return 3 def return_annot() -> int: return 4 # per RFC 1149.5 / xckd 221 def first_annot(draw: None): pass def test_composite_edits_annotations(): spec_comp = getfullargspec(st.composite(pointless_composite)) assert spec_comp.annotations["return"] == int assert "nothing" in spec_comp.annotations assert "draw" not in spec_comp.annotations @pytest.mark.parametrize("nargs", [1, 2, 3]) def test_given_edits_annotations(nargs): spec_given = getfullargspec(given(*(nargs * [st.none()]))(pointless_composite)) assert spec_given.annotations.pop("return") is None assert len(spec_given.annotations) == 3 - nargs def a_converter(x) -> int: return int(x) @attr.s class Inferrables(object): annot_converter = attr.ib(converter=a_converter) @pytest.mark.skipif( sys.version_info[:2] <= (3, 5), reason="Too-old typing module can't get return value hint", ) @given(st.builds(Inferrables)) def test_attrs_inference_builds(c): pass @pytest.mark.skipif( sys.version_info[:2] <= (3, 5), reason="Too-old typing module can't get return value hint", ) @given(st.from_type(Inferrables)) def test_attrs_inference_from_type(c): pass hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/py3/test_asyncio.py000066400000000000000000000034701354103617500302030ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import asyncio import unittest from unittest import TestCase import pytest import hypothesis.strategies as st from hypothesis import assume, given from hypothesis.internal.compat import PYPY class TestAsyncio(TestCase): timeout = 5 def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) def tearDown(self): self.loop.close() def execute_example(self, f): error = None def g(): nonlocal error try: x = f() if x is not None: yield from x except BaseException as e: error = e coro = asyncio.coroutine(g) future = asyncio.wait_for(coro(), timeout=self.timeout) self.loop.run_until_complete(future) if error is not None: raise error @pytest.mark.skipif(PYPY, reason="Error in asyncio.new_event_loop()") @given(st.text()) @asyncio.coroutine def test_foo(self, x): assume(x) yield from asyncio.sleep(0.001) assert x if __name__ == "__main__": unittest.main() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/py3/test_composite_kwonlyargs.py000066400000000000000000000023601354103617500330150ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import given, strategies as st # Tests that convert_keyword_arguments in reflection.py can handle # composites that only have keyword-only arguments. # See https://github.com/HypothesisWorks/hypothesis/issues/1999 @st.composite def kwonlyargs_composites(draw, *, kwarg1=None): return draw(st.fixed_dictionaries({"kwarg1": st.just(kwarg1), "i": st.integers()})) @given( st.lists( st.one_of(kwonlyargs_composites(kwarg1="test")), unique_by=lambda x: x["i"] ) ) def test_composite_with_keyword_only_args(a): assert True hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/py3/test_functions.py000066400000000000000000000017571354103617500305540ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import given from hypothesis.strategies import functions def func(arg, *, kwonly_arg): pass @given(functions(func)) def test_functions_strategy_with_kwonly_args(f): with pytest.raises(TypeError): f(1, 2) f(1, kwonly_arg=2) f(kwonly_arg=2, arg=1) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/py3/test_lookup.py000066400000000000000000000363061354103617500300530ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import abc import collections import enum import io import string import sys import pytest import hypothesis.strategies as st from hypothesis import HealthCheck, assume, given, infer, settings from hypothesis.errors import InvalidArgument, ResolutionFailed, Unsatisfiable from hypothesis.internal.compat import ( ForwardRef, get_type_hints, integer_types, typing_root_type, ) from hypothesis.searchstrategy import types from hypothesis.strategies import from_type from tests.common.debug import minimal from tests.common.utils import fails_with typing = pytest.importorskip("typing") sentinel = object() generics = sorted( (t for t in types._global_type_lookup if isinstance(t, typing_root_type)), key=str ) @pytest.mark.parametrize("typ", generics) def test_resolve_typing_module(typ): @settings(suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much]) @given(from_type(typ)) def inner(ex): if typ in (typing.BinaryIO, typing.TextIO): assert isinstance(ex, io.IOBase) elif typ is typing.Tuple: # isinstance is incompatible with Tuple on early 3.5 assert ex == () elif isinstance(typ, typing._ProtocolMeta): pass elif typ is typing.Type and not isinstance(typing.Type, type): assert isinstance(ex, typing.TypeVar) else: try: assert isinstance(ex, typ) except TypeError: if sys.version_info[:2] < (3, 6): pytest.skip() raise inner() @pytest.mark.parametrize("typ", [typing.Any, typing.Union]) def test_does_not_resolve_special_cases(typ): with pytest.raises(InvalidArgument): from_type(typ).example() @pytest.mark.parametrize( "typ,instance_of", [(typing.Union[int, str], (int, str)), (typing.Optional[int], (int, type(None)))], ) def test_specialised_scalar_types(typ, instance_of): @given(from_type(typ)) def inner(ex): assert isinstance(ex, instance_of) inner() @pytest.mark.skipif(not hasattr(typing, "Type"), reason="requires this attr") def test_typing_Type_int(): assert from_type(typing.Type[int]).example() is int @pytest.mark.skipif(not hasattr(typing, "Type"), reason="requires this attr") def test_typing_Type_Union(): @given(from_type(typing.Type[typing.Union[str, list]])) def inner(ex): assert ex in (str, list) inner() @pytest.mark.parametrize( "typ,coll_type,instance_of", [ (typing.Set[int], set, int), (typing.FrozenSet[int], frozenset, int), (typing.Dict[int, int], dict, int), (typing.KeysView[int], type({}.keys()), int), (typing.ValuesView[int], type({}.values()), int), (typing.List[int], list, int), (typing.Tuple[int], tuple, int), (typing.Tuple[int, ...], tuple, int), (typing.Iterator[int], typing.Iterator, int), (typing.Sequence[int], typing.Sequence, int), (typing.Iterable[int], typing.Iterable, int), (typing.Mapping[int, None], typing.Mapping, int), (typing.Container[int], typing.Container, int), (typing.NamedTuple("A_NamedTuple", (("elem", int),)), tuple, int), ], ) def test_specialised_collection_types(typ, coll_type, instance_of): @given(from_type(typ)) def inner(ex): if sys.version_info[:2] >= (3, 6): assume(ex) assert isinstance(ex, coll_type) assert all(isinstance(elem, instance_of) for elem in ex) try: inner() except (ResolutionFailed, AssertionError): if sys.version_info[:2] < (3, 6): pytest.skip("Hard-to-reproduce bug (early version of typing?)") raise @pytest.mark.skipif(sys.version_info[:2] < (3, 6), reason="new addition") def test_36_specialised_collection_types(): @given(from_type(typing.DefaultDict[int, int])) def inner(ex): if sys.version_info[:2] >= (3, 6): assume(ex) assert isinstance(ex, collections.defaultdict) assert all(isinstance(elem, int) for elem in ex) assert all(isinstance(elem, int) for elem in ex.values()) inner() @pytest.mark.skipif(sys.version_info[:3] <= (3, 5, 1), reason="broken") def test_ItemsView(): @given(from_type(typing.ItemsView[int, int])) def inner(ex): # See https://github.com/python/typing/issues/177 if sys.version_info[:2] >= (3, 6): assume(ex) assert isinstance(ex, type({}.items())) assert all(isinstance(elem, tuple) and len(elem) == 2 for elem in ex) assert all(all(isinstance(e, int) for e in elem) for elem in ex) inner() def test_Optional_minimises_to_None(): assert minimal(from_type(typing.Optional[int]), lambda ex: True) is None @pytest.mark.parametrize("n", range(10)) def test_variable_length_tuples(n): type_ = typing.Tuple[int, ...] try: from_type(type_).filter(lambda ex: len(ex) == n).example() except Unsatisfiable: if sys.version_info[:2] < (3, 6): pytest.skip() raise @pytest.mark.skipif(sys.version_info[:3] <= (3, 5, 1), reason="broken") def test_lookup_overrides_defaults(): sentinel = object() try: st.register_type_strategy(int, st.just(sentinel)) @given(from_type(typing.List[int])) def inner_1(ex): assert all(elem is sentinel for elem in ex) inner_1() finally: st.register_type_strategy(int, st.integers()) st.from_type.__clear_cache() @given(from_type(typing.List[int])) def inner_2(ex): assert all(isinstance(elem, int) for elem in ex) inner_2() def test_register_generic_typing_strats(): # I don't expect anyone to do this, but good to check it works as expected try: # We register sets for the abstract sequence type, which masks subtypes # from supertype resolution but not direct resolution st.register_type_strategy( typing.Sequence, types._global_type_lookup[typing.Set] ) @given(from_type(typing.Sequence[int])) def inner_1(ex): assert isinstance(ex, set) @given(from_type(typing.Container[int])) def inner_2(ex): assert not isinstance(ex, typing.Sequence) @given(from_type(typing.List[int])) def inner_3(ex): assert isinstance(ex, list) inner_1() inner_2() inner_3() finally: types._global_type_lookup.pop(typing.Sequence) st.from_type.__clear_cache() @pytest.mark.parametrize( "typ", [ typing.Sequence, typing.Container, typing.Mapping, typing.Reversible, typing.SupportsBytes, typing.SupportsAbs, typing.SupportsComplex, typing.SupportsFloat, typing.SupportsInt, typing.SupportsRound, ], ) def test_resolves_weird_types(typ): from_type(typ).example() @pytest.mark.parametrize( "var,expected", [ (typing.TypeVar("V"), object), (typing.TypeVar("V", bound=int), int), (typing.TypeVar("V", int, str), (int, str)), ], ) @given(data=st.data()) def test_typevar_type_is_consistent(data, var, expected): strat = st.from_type(var) v1 = data.draw(strat) v2 = data.draw(strat) assume(v1 != v2) # Values may vary, just not types assert type(v1) == type(v2) assert isinstance(v1, expected) def annotated_func(a: int, b: int = 2, *, c: int, d: int = 4): return a + b + c + d def test_issue_946_regression(): # Turned type hints into kwargs even if the required posarg was passed st.builds(annotated_func, st.integers()).example() @pytest.mark.parametrize( "thing", [ annotated_func, # Works via typing.get_type_hints typing.NamedTuple("N", [("a", int)]), # Falls back to inspection int, # Fails; returns empty dict ], ) def test_can_get_type_hints(thing): assert isinstance(get_type_hints(thing), dict) def test_force_builds_to_infer_strategies_for_default_args(): # By default, leaves args with defaults and minimises to 2+4=6 assert minimal(st.builds(annotated_func), lambda ex: True) == 6 # Inferring integers() for args makes it minimise to zero assert minimal(st.builds(annotated_func, b=infer, d=infer), lambda ex: True) == 0 def non_annotated_func(a, b=2, *, c, d=4): pass def test_cannot_pass_infer_as_posarg(): with pytest.raises(InvalidArgument): st.builds(annotated_func, infer).example() def test_cannot_force_inference_for_unannotated_arg(): with pytest.raises(InvalidArgument): st.builds(non_annotated_func, a=infer, c=st.none()).example() with pytest.raises(InvalidArgument): st.builds(non_annotated_func, a=st.none(), c=infer).example() class UnknownType(object): def __init__(self, arg): pass class UnknownAnnotatedType(object): def __init__(self, arg: int): pass @given(st.from_type(UnknownAnnotatedType)) def test_builds_for_unknown_annotated_type(ex): assert isinstance(ex, UnknownAnnotatedType) def unknown_annotated_func(a: UnknownType, b=2, *, c: UnknownType, d=4): pass def test_raises_for_arg_with_unresolvable_annotation(): with pytest.raises(ResolutionFailed): st.builds(unknown_annotated_func).example() with pytest.raises(ResolutionFailed): st.builds(unknown_annotated_func, a=st.none(), c=infer).example() @given(a=infer, b=infer) def test_can_use_type_hints(a: int, b: float): assert isinstance(a, int) and isinstance(b, float) def test_error_if_has_unresolvable_hints(): @given(a=infer) def inner(a: UnknownType): pass with pytest.raises(InvalidArgument): inner() @pytest.mark.skipif(not hasattr(typing, "NewType"), reason="test for NewType") def test_resolves_NewType(): typ = typing.NewType("T", int) nested = typing.NewType("NestedT", typ) uni = typing.NewType("UnionT", typing.Optional[int]) assert isinstance(from_type(typ).example(), integer_types) assert isinstance(from_type(nested).example(), integer_types) assert isinstance(from_type(uni).example(), integer_types + (type(None),)) E = enum.Enum("E", "a b c") @given(from_type(E)) def test_resolves_enum(ex): assert isinstance(ex, E) @pytest.mark.skipif(not hasattr(enum, "Flag"), reason="test for Flag") @pytest.mark.parametrize("resolver", [from_type, st.sampled_from]) def test_resolves_flag_enum(resolver): # Storing all combinations takes O(2^n) memory. Using an enum of 52 # members in this test ensures that we won't try! F = enum.Flag("F", " ".join(string.ascii_letters)) # Filter to check that we can generate compound members of enum.Flags @given(resolver(F).filter(lambda ex: ex not in tuple(F))) def inner(ex): assert isinstance(ex, F) inner() class AnnotatedTarget(object): def __init__(self, a: int, b: int): pass def method(self, a: int, b: int): pass @pytest.mark.parametrize("target", [AnnotatedTarget, AnnotatedTarget(1, 2).method]) @pytest.mark.parametrize( "args,kwargs", [ ((), {}), ((1,), {}), ((1, 2), {}), ((), dict(a=1)), ((), dict(b=2)), ((), dict(a=1, b=2)), ], ) def test_required_args(target, args, kwargs): # Mostly checking that `self` (and only self) is correctly excluded st.builds( target, *map(st.just, args), **{k: st.just(v) for k, v in kwargs.items()} ).example() AnnotatedNamedTuple = typing.NamedTuple("AnnotatedNamedTuple", [("a", str)]) @given(st.builds(AnnotatedNamedTuple)) def test_infers_args_for_namedtuple_builds(thing): assert isinstance(thing.a, str) @given(st.from_type(AnnotatedNamedTuple)) def test_infers_args_for_namedtuple_from_type(thing): assert isinstance(thing.a, str) @given(st.builds(AnnotatedNamedTuple, a=st.none())) def test_override_args_for_namedtuple(thing): assert thing.a is None @pytest.mark.parametrize( "thing", [typing.Optional, typing.List, getattr(typing, "Type", typing.Set)] ) # check Type if it's available, otherwise Set is redundant but harmless def test_cannot_resolve_bare_forward_reference(thing): with pytest.raises(InvalidArgument): t = thing["int"] if type(getattr(t, "__args__", [None])[0]) != ForwardRef: assert sys.version_info[:2] == (3, 5) pytest.xfail("python 3.5 typing module is really weird") st.from_type(t).example() class Tree: def __init__(self, left: typing.Optional["Tree"], right: typing.Optional["Tree"]): self.left = left self.right = right def __repr__(self): return "Tree({}, {})".format(self.left, self.right) def test_resolving_recursive_type(): try: assert isinstance(st.builds(Tree).example(), Tree) except ResolutionFailed: assert sys.version_info[:2] == (3, 5) pytest.xfail("python 3.5 typing module may not resolve annotations") except TypeError: # TypeError raised if typing.get_type_hints(Tree.__init__) fails; see # https://github.com/HypothesisWorks/hypothesis-python/issues/1074 assert sys.version_info[:2] == (3, 5) pytest.skip("Could not find type hints to resolve") @given(from_type(typing.Tuple[()])) def test_resolves_empty_Tuple_issue_1583_regression(ex): # See e.g. https://github.com/python/mypy/commit/71332d58 assert ex == () def test_can_register_NewType(): Name = typing.NewType("Name", str) st.register_type_strategy(Name, st.just("Eric Idle")) assert st.from_type(Name).example() == "Eric Idle" @given(st.from_type(typing.Callable)) def test_resolves_bare_callable_to_function(f): val = f() assert val is None with pytest.raises(TypeError): f(1) @given(st.from_type(typing.Callable[[str], int])) def test_resolves_callable_with_arg_to_function(f): val = f("1") assert isinstance(val, int) @given(st.from_type(typing.Callable[..., int])) def test_resolves_ellipses_callable_to_function(f): val = f() assert isinstance(val, int) f(1) f(1, 2, 3) f(accepts_kwargs_too=1) class AbstractFoo(abc.ABC): @abc.abstractmethod def foo(self): pass class ConcreteFoo(AbstractFoo): def foo(self): pass @given(st.from_type(AbstractFoo)) def test_can_resolve_abstract_class(instance): assert isinstance(instance, ConcreteFoo) instance.foo() class AbstractBar(abc.ABC): @abc.abstractmethod def bar(self): pass @fails_with(ResolutionFailed) @given(st.from_type(AbstractBar)) def test_cannot_resolve_abstract_class_with_no_concrete_subclass(instance): assert False, "test body unreachable as strategy cannot resolve" hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/py3/test_mock.py000066400000000000000000000033511354103617500274650ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """Checks that @given, @mock.patch, and pytest fixtures work as expected.""" from __future__ import absolute_import, division, print_function import math from unittest import mock from _pytest.capture import CaptureFixture import hypothesis.strategies as st from hypothesis import given @given(thing=st.text()) @mock.patch("math.atan") def test_can_mock_inside_given_without_fixture(atan, thing): assert isinstance(atan, mock.MagicMock) assert isinstance(math.atan, mock.MagicMock) @mock.patch("math.atan") @given(thing=st.text()) def test_can_mock_outside_given_with_fixture(atan, capsys, thing): assert isinstance(atan, mock.MagicMock) assert isinstance(math.atan, mock.MagicMock) assert isinstance(capsys, CaptureFixture) @given(thing=st.text()) def test_can_mock_within_test_with_fixture(capsys, thing): assert isinstance(capsys, CaptureFixture) assert not isinstance(math.atan, mock.MagicMock) with mock.patch("math.atan") as atan: assert isinstance(atan, mock.MagicMock) assert isinstance(math.atan, mock.MagicMock) assert not isinstance(math.atan, mock.MagicMock) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/py3/test_traceback_elision.py000066400000000000000000000026701354103617500322000ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import traceback import pytest import hypothesis.strategies as st from hypothesis import Verbosity, given, settings @pytest.mark.parametrize("verbosity", [Verbosity.normal, Verbosity.debug]) def test_tracebacks_omit_hypothesis_internals(verbosity): @settings(verbosity=verbosity) @given(st.just(False)) def simplest_failure(x): raise ValueError() try: simplest_failure() except ValueError as e: tb = traceback.extract_tb(e.__traceback__) # Unless in debug mode, Hypothesis adds 1 frame - the least possible! # (4 frames: this one, simplest_failure, internal frame, assert False) if verbosity < Verbosity.debug: assert len(tb) == 4 else: assert len(tb) >= 5 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/py3/test_unicode_identifiers.py000066400000000000000000000030301354103617500325410ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import given, strategies as st from hypothesis.internal.reflection import get_pretty_function_description, proxies def test_can_copy_argspec_of_unicode_args(): def foo(μ): return μ @proxies(foo) def bar(μ): return foo(μ) assert bar(1) == 1 def test_can_copy_argspec_of_unicode_name(): def ā(): return 1 @proxies(ā) def bar(): return 2 assert bar() == 2 is_approx_π = lambda x: x == 3.1415 # noqa: E731 def test_can_handle_unicode_identifier_in_same_line_as_lambda_def(): assert get_pretty_function_description(is_approx_π) == "lambda x: x == 3.1415" def test_regression_issue_1700(): π = 3.1415 @given(st.floats(min_value=-π, max_value=π).filter(lambda x: abs(x) > 1e-5)) def test_nonzero(x): assert x != 0 test_nonzero() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pytest/000077500000000000000000000000001354103617500257365ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pytest/test_capture.py000066400000000000000000000101441354103617500310120ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis.internal.compat import PY2, WINDOWS, escape_unicode_characters, hunichr pytest_plugins = str("pytester") TESTSUITE = """ from hypothesis import given, settings, Verbosity from hypothesis.strategies import integers @settings(verbosity=Verbosity.verbose) @given(integers()) def test_should_be_verbose(x): pass """ @pytest.mark.parametrize("capture,expected", [("no", True), ("fd", False)]) def test_output_without_capture(testdir, capture, expected): script = testdir.makepyfile(TESTSUITE) result = testdir.runpytest(script, "--verbose", "--capture", capture) out = "\n".join(result.stdout.lines) assert "test_should_be_verbose" in out assert ("Trying example" in out) == expected assert result.ret == 0 UNICODE_EMITTING = """ import pytest from hypothesis import given, settings, Verbosity from hypothesis.strategies import text from hypothesis.internal.compat import PY3 import sys def test_emits_unicode(): @settings(verbosity=Verbosity.verbose) @given(text()) def test_should_emit_unicode(t): assert all(ord(c) <= 1000 for c in t) with pytest.raises(AssertionError): test_should_emit_unicode() """ @pytest.mark.xfail( WINDOWS, reason=("Encoding issues in running the subprocess, possibly pytest's fault"), ) @pytest.mark.skipif(PY2, reason="Output streams don't have encodings in python 2") def test_output_emitting_unicode(testdir, monkeypatch): monkeypatch.setenv("LC_ALL", "C") monkeypatch.setenv("LANG", "C") script = testdir.makepyfile(UNICODE_EMITTING) result = getattr(testdir, "runpytest_subprocess", testdir.runpytest)( script, "--verbose", "--capture=no" ) out = "\n".join(result.stdout.lines) assert "test_emits_unicode" in out assert hunichr(1001) in out or escape_unicode_characters(hunichr(1001)) in out assert result.ret == 0 def get_line_num(token, result, skip_n=0): skipped = 0 for i, line in enumerate(result.stdout.lines): if token in line: if skip_n == skipped: return i else: skipped += 1 assert False, "Token %r not found (skipped %r of planned %r skips)" % ( token, skipped, skip_n, ) TRACEBACKHIDE_HEALTHCHECK = """ from hypothesis import given, settings from hypothesis.strategies import integers import time @given(integers().map(lambda x: time.sleep(0.2))) def test_healthcheck_traceback_is_hidden(x): pass """ def test_healthcheck_traceback_is_hidden(testdir): script = testdir.makepyfile(TRACEBACKHIDE_HEALTHCHECK) result = testdir.runpytest(script, "--verbose") def_token = "__ test_healthcheck_traceback_is_hidden __" timeout_token = ": FailedHealthCheck" def_line = get_line_num(def_token, result) timeout_line = get_line_num(timeout_token, result) assert timeout_line - def_line == 6 COMPOSITE_IS_NOT_A_TEST = """ from hypothesis.strategies import composite @composite def test_data_factory(draw): assert False, 'Unreachable due to lazy construction' """ @pytest.mark.skipif(pytest.__version__[:3] == "3.0", reason="very very old") def test_deprecation_of_strategies_as_tests(testdir): script = testdir.makepyfile(COMPOSITE_IS_NOT_A_TEST) testdir.runpytest(script, "-Werror").assert_outcomes(failed=1) result = testdir.runpytest(script) result.assert_outcomes(passed=1) result.stdout.fnmatch_lines(["*HypothesisDeprecationWarning*"]) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pytest/test_compat.py000066400000000000000000000016061354103617500306350ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import given from hypothesis.strategies import booleans @given(booleans()) @pytest.mark.parametrize("hi", (1, 2, 3)) def test_parametrize_after_given(hi, i): pass hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pytest/test_doctest.py000066400000000000000000000017411354103617500310170ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function pytest_plugins = "pytester" func_with_doctest = """ def hi(): ''' >>> i = 5 >>> i-1 4 ''' """ def test_can_run_doctests(testdir): script = testdir.makepyfile(func_with_doctest) result = testdir.runpytest(script, "--doctest-modules") assert result.ret == 0 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pytest/test_fixtures.py000066400000000000000000000043511354103617500312230ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesis import example, given from hypothesis.strategies import integers from tests.common.utils import fails try: from unittest.mock import Mock, create_autospec except ImportError: from mock import Mock, create_autospec @pytest.fixture def infinity(): return float("inf") @pytest.fixture def mock_fixture(): return Mock() @pytest.fixture def spec_fixture(): class Foo: def __init__(self): pass def bar(self): return "baz" return create_autospec(Foo) @given(integers()) def test_can_mix_fixture_and_positional_strategy(infinity, xs): # Hypothesis fills arguments from the right, so if @given() uses # positional arguments then any strategies need to be on the right. assert xs <= infinity @given(xs=integers()) def test_can_mix_fixture_and_keyword_strategy(xs, infinity): assert xs <= infinity @example(xs=0) @given(xs=integers()) def test_can_mix_fixture_example_and_keyword_strategy(xs, infinity): assert xs <= infinity @fails @given(integers()) def test_can_inject_mock_via_fixture(mock_fixture, xs): """A negative test is better for this one - this condition uncovers a bug whereby the mock fixture is executed instead of the test body and always succeeds. If this test fails, then we know we've run the test body instead of the mock. """ assert False @given(integers()) def test_can_inject_autospecced_mock_via_fixture(spec_fixture, xs): spec_fixture.bar.return_value = float("inf") assert xs <= spec_fixture.bar() hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pytest/test_mark.py000066400000000000000000000032201354103617500302760ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function pytest_plugins = str("pytester") TESTSUITE = """ from hypothesis import given from hypothesis.strategies import integers @given(integers()) def test_foo(x): pass def test_bar(): pass """ def test_can_select_mark(testdir): script = testdir.makepyfile(TESTSUITE) result = testdir.runpytest(script, "--verbose", "--strict", "-m", "hypothesis") out = "\n".join(result.stdout.lines) assert "1 passed, 1 deselected" in out UNITTEST_TESTSUITE = """ from hypothesis import given from hypothesis.strategies import integers from unittest import TestCase class TestStuff(TestCase): @given(integers()) def test_foo(self, x): pass def test_bar(self): pass """ def test_can_select_mark_on_unittest(testdir): script = testdir.makepyfile(UNITTEST_TESTSUITE) result = testdir.runpytest(script, "--verbose", "--strict", "-m", "hypothesis") out = "\n".join(result.stdout.lines) assert "1 passed, 1 deselected" in out hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pytest/test_profiles.py000066400000000000000000000027201354103617500311730ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis.extra.pytestplugin import LOAD_PROFILE_OPTION from hypothesis.version import __version__ pytest_plugins = str("pytester") CONFTEST = """ from hypothesis._settings import settings settings.register_profile("test", settings(max_examples=1)) """ TESTSUITE = """ from hypothesis import given from hypothesis.strategies import integers from hypothesis._settings import settings def test_this_one_is_ok(): assert settings().max_examples == 1 """ def test_runs_reporting_hook(testdir): script = testdir.makepyfile(TESTSUITE) testdir.makeconftest(CONFTEST) result = testdir.runpytest(script, LOAD_PROFILE_OPTION, "test") out = "\n".join(result.stdout.lines) assert "1 passed" in out assert "max_examples=1" in out assert __version__ in out hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pytest/test_pytest_detection.py000066400000000000000000000022341354103617500327360ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """This module provides the core primitives of Hypothesis, such as given.""" from __future__ import absolute_import, division, print_function import subprocess import sys import hypothesis.core as core def test_is_running_under_pytest(): assert core.running_under_pytest FILE_TO_RUN = """ import hypothesis.core as core assert not core.running_under_pytest """ def test_is_not_running_under_pytest(tmpdir): pyfile = tmpdir.join("test.py") pyfile.write(FILE_TO_RUN) subprocess.check_call([sys.executable, str(pyfile)]) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pytest/test_reporting.py000066400000000000000000000024021354103617500313560ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function pytest_plugins = str("pytester") TESTSUITE = """ from hypothesis import given from hypothesis.strategies import lists, integers @given(integers()) def test_this_one_is_ok(x): pass @given(lists(integers())) def test_hi(xs): assert False """ def test_runs_reporting_hook(testdir): script = testdir.makepyfile(TESTSUITE) result = testdir.runpytest(script, "--verbose") out = "\n".join(result.stdout.lines) assert "test_this_one_is_ok" in out assert "Captured stdout call" not in out assert "Falsifying example" in out assert result.ret != 0 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pytest/test_runs.py000066400000000000000000000016751354103617500303470ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import given from hypothesis.strategies import integers from tests.common.utils import fails @given(integers()) def test_ints_are_ints(x): pass @fails @given(integers()) def test_ints_are_floats(x): assert isinstance(x, float) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pytest/test_seeding.py000066400000000000000000000062661354103617500307770ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import re import pytest from hypothesis.internal.compat import hrange pytest_plugins = str("pytester") TEST_SUITE = """ from hypothesis import given, settings, assume import hypothesis.strategies as st first = None @settings(database=None) @given(st.integers()) def test_fails_once(some_int): assume(abs(some_int) > 10000) global first if first is None: first = some_int assert some_int != first """ CONTAINS_SEED_INSTRUCTION = re.compile(r"--hypothesis-seed=\d+", re.MULTILINE) @pytest.mark.parametrize("seed", [0, 42, "foo"]) def test_runs_repeatably_when_seed_is_set(seed, testdir): script = testdir.makepyfile(TEST_SUITE) results = [ testdir.runpytest( script, "--verbose", "--strict", "--hypothesis-seed", str(seed) ) for _ in hrange(2) ] for r in results: for l in r.stdout.lines: assert "--hypothesis-seed" not in l failure_lines = [l for r in results for l in r.stdout.lines if "some_int=" in l] assert len(failure_lines) == 2 assert failure_lines[0] == failure_lines[1] HEALTH_CHECK_FAILURE = """ import os from hypothesis import given, strategies as st, assume, reject RECORD_EXAMPLES = if os.path.exists(RECORD_EXAMPLES): target = None with open(RECORD_EXAMPLES, 'r') as i: seen = set(map(int, i.read().strip().split("\\n"))) else: target = open(RECORD_EXAMPLES, 'w') @given(st.integers()) def test_failure(i): if target is None: assume(i not in seen) else: target.write("%s\\n" % (i,)) reject() """ def test_repeats_healthcheck_when_following_seed_instruction(testdir, tmpdir): health_check_test = HEALTH_CHECK_FAILURE.replace( "", repr(str(tmpdir.join("seen"))) ) script = testdir.makepyfile(health_check_test) initial = testdir.runpytest(script, "--verbose", "--strict") match = CONTAINS_SEED_INSTRUCTION.search("\n".join(initial.stdout.lines)) initial_output = "\n".join(initial.stdout.lines) match = CONTAINS_SEED_INSTRUCTION.search(initial_output) assert match is not None rerun = testdir.runpytest(script, "--verbose", "--strict", match.group(0)) rerun_output = "\n".join(rerun.stdout.lines) assert "FailedHealthCheck" in rerun_output assert "--hypothesis-seed" not in rerun_output rerun2 = testdir.runpytest(script, "--verbose", "--strict", "--hypothesis-seed=10") rerun2_output = "\n".join(rerun2.stdout.lines) assert "FailedHealthCheck" not in rerun2_output hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pytest/test_skipping.py000066400000000000000000000035021354103617500311730ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest pytest_plugins = str("pytester") PYTEST_TESTSUITE = """ from hypothesis import given from hypothesis.strategies import integers import pytest @given(xs=integers()) def test_to_be_skipped(xs): # We always try the simplest example first, raising a Skipped exception # which we know to propagate immediately... if xs == 0: pytest.skip() # But the pytest 3.0 internals don't have such an exception, so we keep # going and raise a MultipleFailures error. Ah well. else: assert xs == 0 """ @pytest.mark.skipif( pytest.__version__.startswith("3.0"), reason="Pytest 3.0 predates a Skipped exception type, so we can't hook into it.", ) def test_no_falsifying_example_if_pytest_skip(testdir): """If ``pytest.skip() is called during a test, Hypothesis should not continue running the test and shrink process, nor should it print anything about falsifying examples.""" script = testdir.makepyfile(PYTEST_TESTSUITE) result = testdir.runpytest(script, "--verbose", "--strict", "-m", "hypothesis") out = "\n".join(result.stdout.lines) assert "Falsifying example" not in out hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/pytest/test_statistics.py000066400000000000000000000065451354103617500315530ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from distutils.version import LooseVersion import pytest from hypothesis.extra.pytestplugin import PRINT_STATISTICS_OPTION pytest_plugins = "pytester" TESTSUITE = """ from hypothesis import HealthCheck, given, settings, assume from hypothesis.strategies import integers import time import warnings from hypothesis.errors import HypothesisDeprecationWarning warnings.simplefilter('always', HypothesisDeprecationWarning) @given(integers()) def test_all_valid(x): pass @settings(max_examples=100, suppress_health_check=HealthCheck.all()) @given(integers()) def test_iterations(x): assume(x == 0) """ def test_does_not_run_statistics_by_default(testdir): script = testdir.makepyfile(TESTSUITE) result = testdir.runpytest(script) out = "\n".join(result.stdout.lines) assert "Hypothesis Statistics" not in out def test_prints_statistics_given_option(testdir): script = testdir.makepyfile(TESTSUITE) result = testdir.runpytest(script, PRINT_STATISTICS_OPTION) out = "\n".join(result.stdout.lines) assert "Hypothesis Statistics" in out assert "max_examples=100" in out assert "< 10% of examples satisfied assumptions" in out @pytest.mark.skipif(LooseVersion(pytest.__version__) < "3.5", reason="too old") def test_prints_statistics_given_option_under_xdist(testdir): script = testdir.makepyfile(TESTSUITE) result = testdir.runpytest(script, PRINT_STATISTICS_OPTION, "-n", "2") out = "\n".join(result.stdout.lines) assert "Hypothesis Statistics" in out assert "max_examples=100" in out assert "< 10% of examples satisfied assumptions" in out UNITTEST_TESTSUITE = """ from hypothesis import given from hypothesis.strategies import integers from unittest import TestCase class TestStuff(TestCase): @given(integers()) def test_all_valid(self, x): pass """ def test_prints_statistics_for_unittest_tests(testdir): script = testdir.makepyfile(UNITTEST_TESTSUITE) result = testdir.runpytest(script, PRINT_STATISTICS_OPTION) out = "\n".join(result.stdout.lines) assert "Hypothesis Statistics" in out assert "TestStuff::test_all_valid" in out assert "max_examples=100" in out STATEFUL_TESTSUITE = """ from hypothesis.stateful import RuleBasedStateMachine, rule class Stuff(RuleBasedStateMachine): @rule() def step(self): pass TestStuff = Stuff.TestCase """ def test_prints_statistics_for_stateful_tests(testdir): script = testdir.makepyfile(STATEFUL_TESTSUITE) result = testdir.runpytest(script, PRINT_STATISTICS_OPTION) out = "\n".join(result.stdout.lines) assert "Hypothesis Statistics" in out assert "TestStuff::runTest" in out assert "max_examples=100" in out hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/quality/000077500000000000000000000000001354103617500260765ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/quality/__init__.py000066400000000000000000000012751354103617500302140ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/quality/test_deferred_strategies.py000066400000000000000000000031461354103617500335250ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesis import strategies as st from tests.common.debug import minimal def test_large_branching_tree(): tree = st.deferred(lambda: st.integers() | st.tuples(tree, tree, tree, tree, tree)) assert minimal(tree) == 0 assert minimal(tree, lambda x: isinstance(x, tuple)) == (0,) * 5 def test_non_trivial_json(): json = st.deferred(lambda: st.none() | st.floats() | st.text() | lists | objects) lists = st.lists(json) objects = st.dictionaries(st.text(), json) assert minimal(json) is None small_list = minimal(json, lambda x: isinstance(x, list) and x) assert small_list == [None] x = minimal(json, lambda x: isinstance(x, dict) and isinstance(x.get(""), list)) assert x == {"": []} def test_self_recursive_lists(): x = st.deferred(lambda: st.lists(x)) assert minimal(x) == [] assert minimal(x, bool) == [[]] assert minimal(x, lambda x: len(x) > 1) == [[], []] hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/quality/test_discovery_ability.py000066400000000000000000000235231354103617500332400ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER # -*- coding: utf-8 -*- """Statistical tests over the forms of the distributions in the standard set of definitions. These tests all take the form of a classic hypothesis test with the null hypothesis being that the probability of some event occurring when drawing data from the distribution produced by some specifier is >= REQUIRED_P """ from __future__ import absolute_import, division, print_function import collections import math import re import hypothesis.internal.reflection as reflection from hypothesis import settings as Settings from hypothesis.errors import UnsatisfiedAssumption from hypothesis.internal.conjecture.engine import ( ConjectureRunner as ConConjectureRunner, ) from hypothesis.strategies import ( booleans, floats, integers, just, lists, one_of, sampled_from, sets, text, tuples, ) from tests.common.utils import no_shrink RUNS = 100 REQUIRED_RUNS = 50 INITIAL_LAMBDA = re.compile(u"^lambda[^:]*:\\s*") def strip_lambda(s): return INITIAL_LAMBDA.sub(u"", s) class HypothesisFalsified(AssertionError): pass def define_test(specifier, predicate, condition=None): def run_test(): if condition is None: def _condition(x): return True condition_string = u"" else: _condition = condition condition_string = strip_lambda( reflection.get_pretty_function_description(condition) ) def test_function(data): try: value = data.draw(specifier) except UnsatisfiedAssumption: data.mark_invalid() if not _condition(value): data.mark_invalid() if predicate(value): data.mark_interesting() successes = 0 for _ in range(RUNS): runner = ConConjectureRunner( test_function, settings=Settings(max_examples=100, phases=no_shrink) ) runner.run() if runner.interesting_examples: successes += 1 if successes >= REQUIRED_RUNS: return event = reflection.get_pretty_function_description(predicate) if condition is not None: event += "|" event += condition_string description = (u"P(%s) ~ %d / %d = %.2f < %.2f") % ( event, successes, RUNS, successes / RUNS, (REQUIRED_RUNS / RUNS), ) raise HypothesisFalsified(description + u" rejected") return run_test test_can_produce_zero = define_test(integers(), lambda x: x == 0) test_can_produce_large_magnitude_integers = define_test( integers(), lambda x: abs(x) > 1000 ) test_can_produce_large_positive_integers = define_test(integers(), lambda x: x > 1000) test_can_produce_large_negative_integers = define_test(integers(), lambda x: x < -1000) def long_list(xs): return len(xs) >= 20 test_can_produce_unstripped_strings = define_test(text(), lambda x: x != x.strip()) test_can_produce_stripped_strings = define_test(text(), lambda x: x == x.strip()) test_can_produce_multi_line_strings = define_test(text(), lambda x: u"\n" in x) test_can_produce_ascii_strings = define_test( text(), lambda x: all(ord(c) <= 127 for c in x) ) test_can_produce_long_strings_with_no_ascii = define_test( text(min_size=5), lambda x: all(ord(c) > 127 for c in x) ) test_can_produce_short_strings_with_some_non_ascii = define_test( text(), lambda x: any(ord(c) > 127 for c in x), condition=lambda x: len(x) <= 3 ) test_can_produce_positive_infinity = define_test(floats(), lambda x: x == float(u"inf")) test_can_produce_negative_infinity = define_test( floats(), lambda x: x == float(u"-inf") ) test_can_produce_nan = define_test(floats(), math.isnan) test_can_produce_floats_near_left = define_test(floats(0, 1), lambda t: t < 0.2) test_can_produce_floats_near_right = define_test(floats(0, 1), lambda t: t > 0.8) test_can_produce_floats_in_middle = define_test(floats(0, 1), lambda t: 0.2 <= t <= 0.8) test_can_produce_long_lists = define_test(lists(integers()), long_list) test_can_produce_short_lists = define_test(lists(integers()), lambda x: len(x) <= 10) test_can_produce_the_same_int_twice = define_test( lists(integers()), lambda t: len(set(t)) < len(t) ) def distorted_value(x): c = collections.Counter(x) return min(c.values()) * 3 <= max(c.values()) def distorted(x): return distorted_value(map(type, x)) test_sampled_from_large_number_can_mix = define_test( lists(sampled_from(range(50)), min_size=50), lambda x: len(set(x)) >= 25 ) test_sampled_from_often_distorted = define_test( lists(sampled_from(range(5))), distorted_value, condition=lambda x: len(x) >= 3 ) test_non_empty_subset_of_two_is_usually_large = define_test( sets(sampled_from((1, 2))), lambda t: len(t) == 2 ) test_subset_of_ten_is_sometimes_empty = define_test( sets(integers(1, 10)), lambda t: len(t) == 0 ) test_mostly_sensible_floats = define_test(floats(), lambda t: t + 1 > t) test_mostly_largish_floats = define_test( floats(), lambda t: t + 1 > 1, condition=lambda x: x > 0 ) test_ints_can_occasionally_be_really_large = define_test( integers(), lambda t: t >= 2 ** 63 ) test_mixing_is_sometimes_distorted = define_test( lists(booleans() | tuples()), distorted, condition=lambda x: len(set(map(type, x))) == 2, ) test_mixes_2_reasonably_often = define_test( lists(booleans() | tuples()), lambda x: len(set(map(type, x))) > 1, condition=bool ) test_partial_mixes_3_reasonably_often = define_test( lists(booleans() | tuples() | just(u"hi")), lambda x: 1 < len(set(map(type, x))) < 3, condition=bool, ) test_mixes_not_too_often = define_test( lists(booleans() | tuples()), lambda x: len(set(map(type, x))) == 1, condition=bool ) test_integers_are_usually_non_zero = define_test(integers(), lambda x: x != 0) test_integers_are_sometimes_zero = define_test(integers(), lambda x: x == 0) test_integers_are_often_small = define_test(integers(), lambda x: abs(x) <= 100) test_integers_are_often_small_but_not_that_small = define_test( integers(), lambda x: 50 <= abs(x) <= 255 ) # This series of tests checks that the one_of() strategy flattens branches # correctly. We assert that the probability of any branch is >= 0.1, # approximately (1/8 = 0.125), regardless of how heavily nested it is in the # strategy. # This first strategy chooses an integer between 0 and 7 (inclusive). one_of_nested_strategy = one_of( just(0), one_of( just(1), just(2), one_of(just(3), just(4), one_of(just(5), just(6), just(7))) ), ) for i in range(8): exec( """test_one_of_flattens_branches_%d = define_test( one_of_nested_strategy, lambda x: x == %d )""" % (i, i) ) xor_nested_strategy = just(0) | ( just(1) | just(2) | (just(3) | just(4) | (just(5) | just(6) | just(7))) ) for i in range(8): exec( """test_xor_flattens_branches_%d = define_test( xor_nested_strategy, lambda x: x == %d )""" % (i, i) ) # This strategy tests interactions with `map()`. They generate integers # from the set {1, 4, 6, 16, 20, 24, 28, 32}. def double(x): return x * 2 one_of_nested_strategy_with_map = one_of( just(1), one_of( (just(2) | just(3)).map(double), one_of( (just(4) | just(5)).map(double), one_of((just(6) | just(7) | just(8)).map(double)), ).map(double), ), ) for i in (1, 4, 6, 16, 20, 24, 28, 32): exec( """test_one_of_flattens_map_branches_%d = define_test( one_of_nested_strategy_with_map, lambda x: x == %d )""" % (i, i) ) # This strategy tests interactions with `flatmap()`. It generates lists # of length 0-7 (inclusive) in which every element is `None`. one_of_nested_strategy_with_flatmap = just(None).flatmap( lambda x: one_of( just([x] * 0), just([x] * 1), one_of( just([x] * 2), just([x] * 3), one_of(just([x] * 4), just([x] * 5), one_of(just([x] * 6), just([x] * 7))), ), ) ) for i in range(8): exec( """test_one_of_flattens_flatmap_branches_%d = define_test( one_of_nested_strategy_with_flatmap, lambda x: len(x) == %d )""" % (i, i) ) xor_nested_strategy_with_flatmap = just(None).flatmap( lambda x: ( just([x] * 0) | just([x] * 1) | ( just([x] * 2) | just([x] * 3) | (just([x] * 4) | just([x] * 5) | (just([x] * 6) | just([x] * 7))) ) ) ) for i in range(8): exec( """test_xor_flattens_flatmap_branches_%d = define_test( xor_nested_strategy_with_flatmap, lambda x: len(x) == %d )""" % (i, i) ) # This strategy tests interactions with `filter()`. It generates the even # integers {0, 2, 4, 6} in equal measures. one_of_nested_strategy_with_filter = one_of( just(0), just(1), one_of(just(2), just(3), one_of(just(4), just(5), one_of(just(6), just(7)))), ).filter(lambda x: x % 2 == 0) for i in range(4): exec( """test_one_of_flattens_filter_branches_%d = define_test( one_of_nested_strategy_with_filter, lambda x: x == 2 * %d )""" % (i, i) ) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/quality/test_float_shrinking.py000066400000000000000000000037721354103617500327010ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesis.strategies as st from hypothesis import HealthCheck, Verbosity, assume, example, given, settings from hypothesis.internal.compat import ceil from tests.common.debug import minimal def test_shrinks_to_simple_floats(): assert minimal(st.floats(), lambda x: x > 1) == 2.0 assert minimal(st.floats(), lambda x: x > 0) == 1.0 @pytest.mark.parametrize("n", [1, 2, 3, 8, 10]) def test_can_shrink_in_variable_sized_context(n): x = minimal(st.lists(st.floats(), min_size=n), any) assert len(x) == n assert x.count(0.0) == n - 1 assert 1 in x @example(1.7976931348623157e308) @example(1.5) @given(st.floats(min_value=0, allow_infinity=False, allow_nan=False)) @settings(deadline=None, suppress_health_check=HealthCheck.all()) def test_shrinks_downwards_to_integers(f): g = minimal(st.floats(), lambda x: x >= f, settings(verbosity=Verbosity.quiet)) assert g == ceil(f) @example(1) @given(st.integers(1, 2 ** 16 - 1)) @settings(deadline=None, suppress_health_check=HealthCheck.all(), max_examples=10) def test_shrinks_downwards_to_integers_when_fractional(b): g = minimal( st.floats(), lambda x: assume((0 < x < (2 ** 53)) and int(x) != x) and x >= b, settings=settings(verbosity=Verbosity.quiet, max_examples=10 ** 6), ) assert g == b + 0.5 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/quality/test_integers.py000066400000000000000000000074261354103617500313400ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from random import Random import hypothesis.strategies as st from hypothesis import ( HealthCheck, Phase, Verbosity, assume, example, given, reject, settings, ) from hypothesis.internal.compat import hbytes from hypothesis.internal.conjecture.data import ConjectureData, Status, StopTest from hypothesis.internal.conjecture.engine import ConjectureRunner from hypothesis.searchstrategy.numbers import WideRangeIntStrategy @st.composite def problems(draw): while True: buf = bytearray(draw(st.binary(min_size=16, max_size=16))) while buf and not buf[-1]: buf.pop() try: d = ConjectureData.for_buffer(buf) k = d.draw(st.integers()) stop = d.draw_bits(8) if stop > 0 and k > 0: return (draw(st.integers(0, k - 1)), hbytes(d.buffer)) except (StopTest, IndexError): pass @example((2, b"\x00\x00\n\x01")) @example((1, b"\x00\x00\x06\x01")) @example(problem=(32768, b"\x03\x01\x00\x00\x00\x00\x00\x01\x00\x02\x01")) @settings( suppress_health_check=HealthCheck.all(), deadline=None, max_examples=10, verbosity=Verbosity.normal, ) @given(problems()) def test_always_reduces_integers_to_smallest_suitable_sizes(problem): n, blob = problem blob = hbytes(blob) try: d = ConjectureData.for_buffer(blob) k = d.draw(st.integers()) stop = blob[len(d.buffer)] except (StopTest, IndexError): reject() assume(k > n) assume(stop > 0) def f(data): k = data.draw(st.integers()) data.output = repr(k) if data.draw_bits(8) == stop and k >= n: data.mark_interesting() runner = ConjectureRunner( f, random=Random(0), settings=settings( suppress_health_check=HealthCheck.all(), phases=(Phase.shrink,), database=None, verbosity=Verbosity.debug, ), database_key=None, ) runner.cached_test_function(blob) assert runner.interesting_examples v, = runner.interesting_examples.values() shrinker = runner.new_shrinker(v, lambda x: x.status == Status.INTERESTING) shrinker.fixate_shrink_passes(["minimize_individual_blocks"]) v = shrinker.shrink_target m = ConjectureData.for_buffer(v.buffer).draw(st.integers()) assert m == n # Upper bound on the length needed is calculated as follows: # * We have an initial byte at the beginning to decide the length of the # integer. # * We have a terminal byte as the stop value. # * The rest is the integer payload. This should be n. Including the sign # bit, n needs (1 + n.bit_length()) / 8 bytes (rounded up). But we only # have power of two sizes, so it may be up to a factor of two more than # that. bits_needed = 1 + n.bit_length() actual_bits_needed = min( [s for s in WideRangeIntStrategy.sizes if s >= bits_needed] ) bytes_needed = actual_bits_needed // 8 # 3 extra bytes: two for the sampler, one for the capping value. assert len(v.buffer) == 3 + bytes_needed hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/quality/test_poisoned_lists.py000066400000000000000000000063571354103617500325600ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from random import Random import pytest import hypothesis.internal.conjecture.utils as cu import hypothesis.strategies as st from hypothesis import settings from hypothesis.internal.compat import ceil, hrange from hypothesis.internal.conjecture.engine import ( ConjectureData, ConjectureRunner, uniform, ) from hypothesis.searchstrategy import SearchStrategy POISON = "POISON" class Poisoned(SearchStrategy): def __init__(self, poison_chance): SearchStrategy.__init__(self) self.__poison_chance = poison_chance self.__ints = st.integers(0, 10) def do_draw(self, data): if cu.biased_coin(data, self.__poison_chance): return POISON else: return data.draw(self.__ints) class LinearLists(SearchStrategy): def __init__(self, elements, size): SearchStrategy.__init__(self) self.__length = st.integers(0, size) self.__elements = elements def do_draw(self, data): return [data.draw(self.__elements) for _ in hrange(data.draw(self.__length))] class Matrices(SearchStrategy): def __init__(self, elements, size): SearchStrategy.__init__(self) self.__length = st.integers(0, ceil(size ** 0.5)) self.__elements = elements def do_draw(self, data): n = data.draw(self.__length) m = data.draw(self.__length) return [data.draw(self.__elements) for _ in hrange(n * m)] class TrialRunner(ConjectureRunner): def generate_new_examples(self): def draw_bytes(data, n): return uniform(self.random, n) while not self.interesting_examples: self.test_function(self.new_conjecture_data(draw_bytes)) LOTS = 10 ** 6 TRIAL_SETTINGS = settings(max_examples=LOTS, database=None) @pytest.mark.parametrize( "seed", [2282791295271755424, 1284235381287210546, 14202812238092722246, 26097] ) @pytest.mark.parametrize("size", [5, 10, 20]) @pytest.mark.parametrize("p", [0.01, 0.1]) @pytest.mark.parametrize("strategy_class", [LinearLists, Matrices]) def test_minimal_poisoned_containers(seed, size, p, strategy_class, monkeypatch): elements = Poisoned(p) strategy = strategy_class(elements, size) def test_function(data): v = data.draw(strategy) data.output = repr(v) if POISON in v: data.mark_interesting() runner = TrialRunner(test_function, random=Random(seed), settings=TRIAL_SETTINGS) runner.run() v, = runner.interesting_examples.values() result = ConjectureData.for_buffer(v.buffer).draw(strategy) assert len(result) == 1 hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/quality/test_poisoned_trees.py000066400000000000000000000106701354103617500325350ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from random import Random import pytest import hypothesis.internal.conjecture.utils as cu from hypothesis import HealthCheck, settings from hypothesis.internal.compat import hbytes, hrange from hypothesis.internal.conjecture.engine import ( ConjectureData, ConjectureRunner, uniform, ) from hypothesis.searchstrategy import SearchStrategy POISON = "POISON" MAX_INT = 2 ** 32 - 1 class PoisonedTree(SearchStrategy): """Generates variable sized tuples with an implicit tree structure. The actual result is flattened out, but the hierarchy is implicit in the data. """ def __init__(self, p): SearchStrategy.__init__(self) self.__p = p def do_draw(self, data): if cu.biased_coin(data, self.__p): return data.draw(self) + data.draw(self) else: # We draw n as two separate calls so that it doesn't show up as a # single block. If it did, the heuristics that allow us to move # blocks around would fire and it would move right, which would # then allow us to shrink it more easily. n = (data.draw_bits(16) << 16) | data.draw_bits(16) if n == MAX_INT: return (POISON,) else: return (None,) LOTS = 10 ** 6 TEST_SETTINGS = settings( database=None, suppress_health_check=HealthCheck.all(), max_examples=LOTS, deadline=None, ) @pytest.mark.parametrize("size", [2, 5, 10]) @pytest.mark.parametrize("seed", [0, 15993493061449915028]) def test_can_reduce_poison_from_any_subtree(size, seed): """This test validates that we can minimize to any leaf node of a binary tree, regardless of where in the tree the leaf is.""" random = Random(seed) # Initially we create the minimal tree of size n, regardless of whether it # is poisoned (which it won't be - the poison event essentially never # happens when drawing uniformly at random). # Choose p so that the expected size of the tree is equal to the desired # size. p = 1.0 / (2.0 - 1.0 / size) strat = PoisonedTree(p) def test_function(data): v = data.draw(strat) if len(v) >= size: data.mark_interesting() runner = ConjectureRunner(test_function, random=random, settings=TEST_SETTINGS) while not runner.interesting_examples: runner.test_function( runner.new_conjecture_data(lambda data, n: uniform(random, n)) ) runner.shrink_interesting_examples() data, = runner.interesting_examples.values() assert len(ConjectureData.for_buffer(data.buffer).draw(strat)) == size starts = [b.start for b in data.blocks if b.length == 2] assert len(starts) % 2 == 0 for i in hrange(0, len(starts), 2): # Now for each leaf position in the tree we try inserting a poison # value artificially. Additionally, we add a marker to the end that # must be preserved. The marker means that we are not allow to rely on # discarding the end of the buffer to get the desired shrink. u = starts[i] marker = hbytes([1, 2, 3, 4]) def test_function_with_poison(data): v = data.draw(strat) m = data.draw_bytes(len(marker)) if POISON in v and m == marker: data.mark_interesting() runner = ConjectureRunner( test_function_with_poison, random=random, settings=TEST_SETTINGS ) runner.cached_test_function( data.buffer[:u] + hbytes([255]) * 4 + data.buffer[u + 4 :] + marker ) assert runner.interesting_examples runner.shrink_interesting_examples() shrunk, = runner.interesting_examples.values() assert ConjectureData.for_buffer(shrunk.buffer).draw(strat) == (POISON,) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/quality/test_shrink_quality.py000066400000000000000000000216231354103617500325610ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from collections import namedtuple from fractions import Fraction from functools import reduce import pytest from hypothesis import assume, settings from hypothesis.internal.compat import OrderedDict, hrange from hypothesis.strategies import ( booleans, builds, dictionaries, fixed_dictionaries, fractions, frozensets, integers, just, lists, none, sampled_from, sets, text, tuples, ) from tests.common.debug import minimal from tests.common.utils import flaky def test_integers_from_minimizes_leftwards(): assert minimal(integers(min_value=101)) == 101 def test_minimal_fractions_1(): assert minimal(fractions()) == Fraction(0) def test_minimal_fractions_2(): assert minimal(fractions(), lambda x: x >= 1) == Fraction(1) def test_minimal_fractions_3(): assert minimal(lists(fractions()), lambda s: len(s) >= 5) == [Fraction(0)] * 5 def test_minimize_string_to_empty(): assert minimal(text()) == u"" def test_minimize_one_of(): for _ in hrange(100): assert minimal(integers() | text() | booleans()) in (0, u"", False) def test_minimize_mixed_list(): mixed = minimal(lists(integers() | text()), lambda x: len(x) >= 10) assert set(mixed).issubset({0, u""}) def test_minimize_longer_string(): assert minimal(text(), lambda x: len(x) >= 10) == u"0" * 10 def test_minimize_longer_list_of_strings(): assert minimal(lists(text()), lambda x: len(x) >= 10) == [u""] * 10 def test_minimize_3_set(): assert minimal(sets(integers()), lambda x: len(x) >= 3) in ({0, 1, 2}, {-1, 0, 1}) def test_minimize_3_set_of_tuples(): assert minimal(sets(tuples(integers())), lambda x: len(x) >= 2) == {(0,), (1,)} def test_minimize_sets_of_sets(): elements = integers(1, 100) size = 8 set_of_sets = minimal(sets(frozensets(elements), min_size=size)) assert frozenset() in set_of_sets assert len(set_of_sets) == size for s in set_of_sets: if len(s) > 1: assert any(s != t and t.issubset(s) for t in set_of_sets) def test_can_simplify_flatmap_with_bounded_left_hand_size(): assert ( minimal(booleans().flatmap(lambda x: lists(just(x))), lambda x: len(x) >= 10) == [False] * 10 ) def test_can_simplify_across_flatmap_of_just(): assert minimal(integers().flatmap(just)) == 0 def test_can_simplify_on_right_hand_strategy_of_flatmap(): assert minimal(integers().flatmap(lambda x: lists(just(x)))) == [] @flaky(min_passes=5, max_runs=5) def test_can_ignore_left_hand_side_of_flatmap(): assert ( minimal(integers().flatmap(lambda x: lists(integers())), lambda x: len(x) >= 10) == [0] * 10 ) def test_can_simplify_on_both_sides_of_flatmap(): assert ( minimal(integers().flatmap(lambda x: lists(just(x))), lambda x: len(x) >= 10) == [0] * 10 ) def test_flatmap_rectangles(): lengths = integers(min_value=0, max_value=10) def lists_of_length(n): return lists(sampled_from("ab"), min_size=n, max_size=n) xs = minimal( lengths.flatmap(lambda w: lists(lists_of_length(w))), lambda x: ["a", "b"] in x, settings=settings(database=None, max_examples=2000), ) assert xs == [["a", "b"]] @flaky(min_passes=5, max_runs=5) @pytest.mark.parametrize("dict_class", [dict, OrderedDict]) def test_dictionary(dict_class): assert ( minimal(dictionaries(keys=integers(), values=text(), dict_class=dict_class)) == dict_class() ) x = minimal( dictionaries(keys=integers(), values=text(), dict_class=dict_class), lambda t: len(t) >= 3, ) assert isinstance(x, dict_class) assert set(x.values()) == {u""} for k in x: if k < 0: assert k + 1 in x if k > 0: assert k - 1 in x def test_minimize_single_element_in_silly_large_int_range(): ir = integers(-(2 ** 256), 2 ** 256) assert minimal(ir, lambda x: x >= -(2 ** 255)) == 0 def test_minimize_multiple_elements_in_silly_large_int_range(): desired_result = [0] * 20 ir = integers(-(2 ** 256), 2 ** 256) x = minimal(lists(ir), lambda x: len(x) >= 20, timeout_after=20) assert x == desired_result def test_minimize_multiple_elements_in_silly_large_int_range_min_is_not_dupe(): ir = integers(0, 2 ** 256) target = list(range(20)) x = minimal( lists(ir), lambda x: (assume(len(x) >= 20) and all(x[i] >= target[i] for i in target)), timeout_after=60, ) assert x == target def test_find_large_union_list(): size = 10 def large_mostly_non_overlapping(xs): union = reduce(set.union, xs) return len(union) >= size result = minimal( lists(sets(integers(), min_size=1), min_size=1), large_mostly_non_overlapping, timeout_after=120, ) assert len(result) == 1 union = reduce(set.union, result) assert len(union) == size assert max(union) == min(union) + len(union) - 1 @pytest.mark.parametrize("n", [0, 1, 10, 100, 1000]) @pytest.mark.parametrize( "seed", [13878544811291720918, 15832355027548327468, 12901656430307478246] ) def test_containment(n, seed): iv = minimal( tuples(lists(integers()), integers()), lambda x: x[1] in x[0] and x[1] >= n, timeout_after=60, ) assert iv == ([n], n) def test_duplicate_containment(): ls, i = minimal( tuples(lists(integers()), integers()), lambda s: s[0].count(s[1]) > 1, timeout_after=100, ) assert ls == [0, 0] assert i == 0 @pytest.mark.parametrize("seed", [11, 28, 37]) def test_reordering_bytes(seed): ls = minimal(lists(integers()), lambda x: sum(x) >= 10 and len(x) >= 3) assert ls == sorted(ls) def test_minimize_long_list(): assert ( minimal(lists(booleans(), min_size=50), lambda x: len(x) >= 70) == [False] * 70 ) def test_minimize_list_of_longish_lists(): size = 5 xs = minimal( lists(lists(booleans())), lambda x: len([t for t in x if any(t) and len(t) >= 2]) >= size, ) assert len(xs) == size for x in xs: assert x == [False, True] def test_minimize_list_of_fairly_non_unique_ints(): xs = minimal(lists(integers()), lambda x: len(set(x)) < len(x)) assert len(xs) == 2 def test_list_with_complex_sorting_structure(): xs = minimal( lists(lists(booleans())), lambda x: [list(reversed(t)) for t in x] > x and len(x) > 3, ) assert len(xs) == 4 def test_list_with_wide_gap(): xs = minimal(lists(integers()), lambda x: x and (max(x) > min(x) + 10 > 0)) assert len(xs) == 2 xs.sort() assert xs[1] == 11 + xs[0] def test_minimize_namedtuple(): T = namedtuple(u"T", (u"a", u"b")) tab = minimal(builds(T, integers(), integers()), lambda x: x.a < x.b) assert tab.b == tab.a + 1 def test_minimize_dict(): tab = minimal( fixed_dictionaries({u"a": booleans(), u"b": booleans()}), lambda x: x[u"a"] or x[u"b"], ) assert not (tab[u"a"] and tab[u"b"]) def test_minimize_list_of_sets(): assert minimal( lists(sets(booleans())), lambda x: len(list(filter(None, x))) >= 3 ) == ([{False}] * 3) def test_minimize_list_of_lists(): assert minimal( lists(lists(integers())), lambda x: len(list(filter(None, x))) >= 3 ) == ([[0]] * 3) def test_minimize_list_of_tuples(): xs = minimal(lists(tuples(integers(), integers())), lambda x: len(x) >= 2) assert xs == [(0, 0), (0, 0)] def test_minimize_multi_key_dicts(): assert minimal(dictionaries(keys=booleans(), values=booleans()), bool) == { False: False } def test_multiple_empty_lists_are_independent(): x = minimal(lists(lists(none(), max_size=0)), lambda t: len(t) >= 2) u, v = x assert u is not v def test_can_find_sets_unique_by_incomplete_data(): size = 5 ls = minimal( lists(tuples(integers(), integers()), unique_by=max), lambda x: len(x) >= size ) assert len(ls) == size values = sorted(list(map(max, ls))) assert values[-1] - values[0] == size - 1 for u, v in ls: assert u <= 0 @pytest.mark.parametrize(u"n", range(10)) def test_lists_forced_near_top(n): assert minimal( lists(integers(), min_size=n, max_size=n + 2), lambda t: len(t) == n + 2 ) == [0] * (n + 2) hypothesis-hypothesis-python-4.36.2/hypothesis-python/tests/quality/test_zig_zagging.py000066400000000000000000000065031354103617500320120ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import random from math import log import hypothesis.strategies as st from hypothesis import HealthCheck, Phase, Verbosity, assume, example, given, settings from hypothesis.internal.compat import ceil, hbytes, int_from_bytes from hypothesis.internal.conjecture.data import ConjectureData from hypothesis.internal.conjecture.engine import ConjectureRunner @st.composite def problem(draw): b = hbytes(draw(st.binary(min_size=1, max_size=8))) m = int_from_bytes(b) * 256 assume(m > 0) marker = draw(st.binary(max_size=8)) bound = draw(st.integers(0, m - 1)) return (b, marker, bound) base_settings = settings( database=None, deadline=None, suppress_health_check=HealthCheck.all(), max_examples=10, verbosity=Verbosity.normal, phases=(Phase.explicit, Phase.generate), ) @example((b"\x10\x00\x00\x00\x00\x00", b"", 2861143707951135)) @example((b"\x05Cn", b"%\x1b\xa0\xfa", 12394667)) @example((b"\x179 f", b"\xf5|", 24300326997)) @example((b"\x05*\xf5\xe5\nh", b"", 1076887621690235)) @example((b"=", b"", 2508)) @example((b"\x01\x00", b"", 20048)) @example((b"\x01", b"", 0)) @example((b"\x02", b"", 258)) @example((b"\x08", b"", 1792)) @example((b"\x0c", b"", 0)) @example((b"\x01", b"", 1)) @settings( base_settings, verbosity=Verbosity.normal, phases=( # We disable shrinking for this test because when it fails it's a sign # that the shrinker is working really badly, so it ends up very slow! Phase.explicit, Phase.generate, ), max_examples=20, ) @given(problem()) def test_avoids_zig_zag_trap(p): b, marker, lower_bound = p random.seed(0) b = hbytes(b) marker = hbytes(marker) n_bits = 8 * (len(b) + 1) def test_function(data): m = data.draw_bits(n_bits) if m < lower_bound: data.mark_invalid() n = data.draw_bits(n_bits) if data.draw_bytes(len(marker)) != marker: data.mark_invalid() if abs(m - n) == 1: data.mark_interesting() runner = ConjectureRunner( test_function, database_key=None, settings=settings(base_settings, phases=(Phase.generate, Phase.shrink)), ) runner.cached_test_function(b + hbytes([0]) + b + hbytes([1]) + marker) assert runner.interesting_examples runner.run() v, = runner.interesting_examples.values() data = ConjectureData.for_buffer(v.buffer) m = data.draw_bits(n_bits) n = data.draw_bits(n_bits) assert m == lower_bound if m == 0: assert n == 1 else: assert n == m - 1 budget = 2 * n_bits * ceil(log(n_bits, 2)) + 2 assert runner.shrinks <= budget hypothesis-hypothesis-python-4.36.2/hypothesis-python/tox.ini000066400000000000000000000076721354103617500245730ustar00rootroot00000000000000[tox] envlist = py{35,36,37}-{brief,prettyquick,full,custom} toxworkdir={env:TOX_WORK_DIR:.tox} [testenv] deps = -r../requirements/test.txt whitelist_externals= bash passenv= HOME LC_ALL COVERAGE_FILE TOXENV setenv= brief: HYPOTHESIS_PROFILE=speedy commands = full: bash scripts/basic-test.sh brief: python -m pytest tests/cover/test_testdecorators.py {posargs} prettyquick: python -m pytest tests/cover/ custom: python -m pytest {posargs} [testenv:py27-full] deps = -r../requirements/py2.txt whitelist_externals= bash commands = bash scripts/basic-test.sh [testenv:pypy-full] deps = -r../requirements/py2.txt whitelist_externals= bash commands = bash scripts/basic-test.sh [testenv:quality] deps= -r../requirements/test.txt commands= python -m pytest tests/quality/ -n2 [testenv:quality2] basepython=python2.7 deps= -r../requirements/py2.txt commands= python -m pytest tests/quality/ [testenv:py27typing] basepython=python2.7 deps= -r../requirements/py2.txt -r../requirements/typing.txt commands= python -m pytest tests/cover/ -n2 [testenv:unicode] basepython=python2.7 deps = unicode-nazi setenv= UNICODENAZI=true PYTHONPATH=. commands= python scripts/unicodechecker.py [testenv:pandas19] deps = -r../requirements/test.txt pandas~=0.19.2 commands = python -m pytest tests/pandas -n2 [testenv:pandas20] deps = -r../requirements/test.txt pandas~=0.20.3 commands = python -m pytest tests/pandas -n2 [testenv:pandas21] deps = -r../requirements/test.txt pandas~=0.21.0 commands = python -m pytest tests/pandas -n2 [testenv:pandas22] deps = -r../requirements/test.txt pandas~=0.22.0 commands = python -m pytest tests/pandas -n2 [testenv:pandas23] deps = -r../requirements/test.txt pandas~=0.23.0 commands = python -m pytest tests/pandas -n2 [testenv:pandas24] deps = -r../requirements/test.txt pandas~=0.24.0 commands = python -m pytest tests/pandas -n2 [testenv:pandas25] deps = -r../requirements/test.txt pandas~=0.25.0 commands = python -m pytest tests/pandas -n2 [testenv:django111] commands = pip install .[pytz] pip install django~=1.11.7 python -m tests.django.manage test tests.django [testenv:django20] commands = pip install .[pytz] pip install django~=2.0.1 python -m tests.django.manage test tests.django [testenv:django21] commands = pip install .[pytz] pip install django~=2.1.0 python -m tests.django.manage test tests.django [testenv:django22] commands = pip install .[pytz] pip install django~=2.2.0 python -m tests.django.manage test tests.django [testenv:nose] deps = nose commands= nosetests tests/cover/test_testdecorators.py [testenv:pytest30] deps = -r../requirements/test.txt commands= pip install pytest==3.0 pytest-xdist==1.24 pytest-forked==0.2 python -m pytest tests/pytest tests/cover/test_testdecorators.py [testenv:coverage] deps = -r../requirements/test.txt -r../requirements/coverage.txt whitelist_externals= rm setenv= HYPOTHESIS_INTERNAL_COVERAGE=true commands = rm -f branch-check python -m coverage --version python -m coverage debug sys python -m coverage run --rcfile=.coveragerc -m pytest -n0 --strict tests/cover tests/datetime tests/py3 tests/numpy tests/pandas tests/lark --ff {posargs} python -m coverage report -m --fail-under=100 --show-missing --skip-covered python scripts/validate_branch_check.py [testenv:pypy-with-tracer] setenv= HYPOTHESIS_PROFILE=with_coverage basepython=pypy deps = -r../requirements/py2.txt commands = python -m pytest tests/cover/test_testdecorators.py tests/nocover/test_coverage.py -n 0 {posargs} [testenv:examples3] deps= -r../requirements/test.txt commands= python -m pytest examples [testenv:examples2] basepython=python2.7 deps= -r../requirements/py2.txt commands= python -m pytest examples hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/000077500000000000000000000000001354103617500227045ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/.gitignore000066400000000000000000000001631354103617500246740ustar00rootroot00000000000000*.sw* spec/examples.txt target/ **/*.rs.bk Cargo.lock *.so coverage .yardoc isolated doc *.gem secrets secrets.tar hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/.rspec000066400000000000000000000000361354103617500240200ustar00rootroot00000000000000--require spec_helper --color hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/.rubocop.yml000066400000000000000000000011471354103617500251610ustar00rootroot00000000000000Metrics/BlockLength: ExcludedMethods: ['describe', 'context'] Documentation: Enabled: false Metrics/MethodLength: Enabled: false Metrics/CyclomaticComplexity: Enabled: false Metrics/PerceivedComplexity: Enabled: false Metrics/AbcSize: Enabled: false Lint/RescueException: Enabled: false Style/MixinUsage: Enabled: false Style/MultilineBlockChain: Enabled: false Style/MethodMissing: Enabled: false Style/MultilineBlockChain: Enabled: false Metrics/ModuleLength: Enabled: false Metrics/BlockLength: Enabled: false Lint/HandleExceptions: Enabled: false Style/GuardClause: Enabled: false hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/.ruby-version000066400000000000000000000000061354103617500253450ustar00rootroot000000000000002.4.2 hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/CHANGELOG.md000066400000000000000000000063031354103617500245170ustar00rootroot00000000000000# Hypothesis for Ruby 0.2.0 (2018-10-24) This release adds an example database to Hypothesis for Ruby. This means that when a test fails, it will automatically reuse the previously shown example when you rerun it, without having to manually pass a seed. # Hypothesis for Ruby 0.1.2 (2018-09-24) This release makes the code useable via a direct require. I.e. no need for rubygems or any special LOAD_PATH. For example, if the base directory were in /opt, you'd just say: require "/opt/hypothesis/hypothesis-ruby/lib/hypothesis" # Hypothesis for Ruby 0.1.1 (2018-08-31) This release fixes minor documentation issues. Thanks to Tessa Bradbury for this contribution. # Hypothesis for Ruby 0.1.0 (2018-07-16) This release adds support for reporting multiple exceptions when Hypothesis finds more than one way for the test to fail. # Hypothesis for Ruby 0.0.15 (2018-06-25) This release fixes an occasional `RuntimeError` that could occur when shrinking a failing test. # Hypothesis for Ruby 0.0.14 (2018-06-25) This release updates the release date to the correct date, as part of fixing a bug which caused the last couple of releases (0.0.11, 0.0.12, and 0.0.13) to have an incorrect date. # Hypothesis for Ruby 0.0.13 (2018-06-25) This release moves the core Rust engine into the separate Conjecture crate. It should have no user visible effect. # Hypothesis for Ruby 0.0.12 (2018-06-23) This release is the beginning of splitting out the Rust core of Hypothesis Ruby into a separate `conjecture` crate for the non-Ruby-specific components of it. It should have no user visible impact. # Hypothesis for Ruby 0.0.11 (2018-06-22) This release has no user-visible changes other than updating the gemspec's homepage attribute. ## Hypothesis for Ruby 0.0.10 (2018-04-26) This release is another update to shrinking: * Cases where the value may be simplified without necessarily becoming smaller will have better results. * Duplicated values can now sometimes be simultaneously shrunk. ## Hypothesis for Ruby 0.0.9 (2018-04-20) This improves Hypothesis for Ruby's shrinking to be much closer to Hypothesis for Python's. It's still far from complete, and even in cases where it has the same level of quality it will often be significantly slower, but examples should now be much more consistent, especially in cases where you are using e.g. `built_as`. ## Hypothesis for Ruby 0.0.8 (2018-02-20) This release fixes the dependency on Rake to be in a more sensible range. ## Hypothesis for Ruby 0.0.7 (2018-02-19) This release updates an error in the README. ## Hypothesis for Ruby 0.0.6 (2018-02-19) This release just updates the gem description. ## Hypothesis for Ruby 0.0.5 (2018-02-19) This is a trivial release to test the release automation. It should have no user visible impact. ## Hypothesis for Ruby 0.0.3 (2018-02-19) This is an initial developer preview of Hypothesis for Ruby. It's ready to use, but isn't yet stable and has significant limitations. It is mostly released so that people can easily give feedback on the API and implementation, and is likely to change substantially before a stable release. Note that while there were some earlier release numbers internally, these were pulled. This is the first official release. hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/Cargo.toml000066400000000000000000000003431354103617500246340ustar00rootroot00000000000000[package] name = "hypothesis-ruby" version = "0.1.0" authors = ["David R. MacIver "] [lib] crate-type = ["cdylib"] [dependencies] helix = '0.7.5' rand = '0.3' conjecture = { path = '../conjecture-rust' } hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/Gemfile000066400000000000000000000003441354103617500242000ustar00rootroot00000000000000# frozen_string_literal: true source 'https://rubygems.org' gemspec gem 'minitest', '~> 5.8.4' gem 'rspec', '~> 3.0' gem 'rubocop', '~> 0.51.0' gem 'simplecov', '~> 0.15.1' gem 'yard', '~> 0.9.12' gem 'redcarpet', '~> 3.4.0' hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/Gemfile.lock000066400000000000000000000030221354103617500251230ustar00rootroot00000000000000PATH remote: . specs: hypothesis-specs (0.1.2) helix_runtime (~> 0.7.0) rake (>= 10.0, < 13.0) GEM remote: https://rubygems.org/ specs: ast (2.4.0) diff-lcs (1.3) docile (1.1.5) helix_runtime (0.7.5) rake (>= 10.0) thor (>= 0.19.4, < 2.0) tomlrb (~> 1.2.4) json (2.1.0) minitest (5.8.5) parallel (1.12.1) parser (2.5.1.0) ast (~> 2.4.0) powerpack (0.1.1) rainbow (2.2.2) rake rake (12.3.1) redcarpet (3.4.0) rspec (3.7.0) rspec-core (~> 3.7.0) rspec-expectations (~> 3.7.0) rspec-mocks (~> 3.7.0) rspec-core (3.7.1) rspec-support (~> 3.7.0) rspec-expectations (3.7.0) diff-lcs (>= 1.2.0, < 2.0) rspec-support (~> 3.7.0) rspec-mocks (3.7.0) diff-lcs (>= 1.2.0, < 2.0) rspec-support (~> 3.7.0) rspec-support (3.7.1) rubocop (0.51.0) parallel (~> 1.10) parser (>= 2.3.3.1, < 3.0) powerpack (~> 0.1) rainbow (>= 2.2.2, < 3.0) ruby-progressbar (~> 1.7) unicode-display_width (~> 1.0, >= 1.0.1) ruby-progressbar (1.9.0) simplecov (0.15.1) docile (~> 1.1.0) json (>= 1.8, < 3) simplecov-html (~> 0.10.0) simplecov-html (0.10.2) thor (0.20.0) tomlrb (1.2.7) unicode-display_width (1.3.2) yard (0.9.12) PLATFORMS ruby DEPENDENCIES hypothesis-specs! minitest (~> 5.8.4) redcarpet (~> 3.4.0) rspec (~> 3.0) rubocop (~> 0.51.0) simplecov (~> 0.15.1) yard (~> 0.9.12) BUNDLED WITH 1.16.5 hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/LICENSE.txt000066400000000000000000000006411354103617500245300ustar00rootroot00000000000000Copyright (c) 2018, David R. MacIver All code in this repository except where explicitly noted otherwise is released under the Mozilla Public License v 2.0. You can obtain a copy at https://mozilla.org/MPL/2.0/. Some code in this repository may come from other projects. Where applicable, the original copyright and license are noted and any modifications made are released dual licensed with the original license. hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/README.markdown000066400000000000000000000054731354103617500254160ustar00rootroot00000000000000# Hypothesis for Ruby Hypothesis is a powerful, flexible, and easy to use library for *property-based testing*. In property-based testing, in contrast to traditional *example-based testing*, a test is written not against a single example but as a statement that should hold for any of a range of possible values. ## Usage In Hypothesis for Ruby, a test looks something like this: ```ruby require "hypothesis" RSpec.configure do |config| config.include(Hypothesis) config.include(Hypothesis::Possibilities) end RSpec.describe "removing an element from a list" do it "results in the element no longer being in the list" do hypothesis do # Or lists(of: integers, min_size: 1), but this lets us # demonstrate assume. values = any array(of: integers) # If this is not true then the test will stop here. assume values.size > 0 to_remove = any element_of(values) values.delete_at(values.index(to_remove)) # Will fail if the value was duplicated in the list. expect(values.include?(to_remove)).to be false end end end ``` This would then fail with: ``` 1) removing an element from a list results in the element no longer being in the list Failure/Error: expect(values.include?(to_remove)).to be false Given #1: [0, 0] Given #2: 0 expected false got true ``` The use of RSpec here is incidental: Hypothesis for Ruby works just as well with minitest, and should work with anything else you care to use. ## Getting Started Hypothesis is available on rubygems.org as a developer preview. If you want to try it today you can use the current development branch by adding the following to your Gemfile: ```ruby gem 'hypothesis-specs' ``` The API is still in flux, so be warned that you should expect it to break on upgrades! Right now this is really more to allow you to try it out and provide feedback than something you should expect to rely on. The more feedback we get, the sooner it will get there! Note that in order to use Hypothesis for Ruby, you will need a rust toolchain installed. Please go to [https://www.rustup.rs](https://www.rustup.rs) and follow the instructions if you do not already have one. ## Project Status Hypothesis for Ruby is currently in an *early alpha* stage. It works, and has a solid core set of features, but you should expect to find rough edges, it is far from feature complete, and the API makes no promises of backwards compatibility. Right now you should consider it to be more in the spirit of a developer preview. You can and should try it out, and hopefully you will find all sorts of interesting bugs in your code by doing so! But you'll probably find interesting bugs in Hypothesis too, and we'd appreciate you reporting them, as well as any just general usability issues or points of confusion you have. hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/Rakefile000066400000000000000000000033371354103617500243570ustar00rootroot00000000000000# frozen_string_literal: true require 'rubygems' require 'helix_runtime/build_task' require 'date' require 'open3' begin require 'rspec/core/rake_task' RSpec::Core::RakeTask.new(:spec) require 'rake/testtask' Rake::TestTask.new(minitests: :build) do |t| t.test_files = FileList['minitests/**/test_*.rb'] t.verbose = true end task test: %i[build spec minitests] rescue LoadError end HelixRuntime::BuildTask.new def rubocop(fix:) sh "bundle exec rubocop #{'-a' if fix} lib spec minitests " \ 'Rakefile hypothesis-specs.gemspec' end task :checkformat do rubocop(fix: false) end task :format do rubocop(fix: true) end begin require 'yard' YARD::Rake::YardocTask.new(:runyard) do |t| t.files = [ 'lib/hypothesis.rb', 'lib/hypothesis/errors.rb', 'lib/hypothesis/possible.rb' ] t.options = ['--markup=markdown', '--no-private'] end task doc: :runyard do YARD::Registry.load objs = YARD::Registry.select do |o| is_private = false t = o until t.root? if t.visibility != :public is_private = true break end t = t.parent end !is_private && o.docstring.blank? end objs.sort_by! { |o| o.name.to_s } unless objs.empty? abort "Undocumented objects: #{objs.map(&:name).join(', ')}" end end rescue LoadError end GEMSPEC = 'hypothesis-specs.gemspec' RELEASE_FILE = 'RELEASE.md' CHANGELOG = 'CHANGELOG.md' def run_for_output(*args) out, result = Open3.capture2(*args) abort if result.exitstatus != 0 out.strip end task :clean do sh 'git clean -fdx lib' sh 'rm -rf hypothesis-specs*.gem' sh 'rm -rf ../target' end task gem: :clean do sh 'gem build hypothesis-specs.gemspec' end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/docs/000077500000000000000000000000001354103617500236345ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/docs/debt.md000066400000000000000000000104261354103617500250770ustar00rootroot00000000000000# A Series of Unfortunate Implementation Choices ## In Which The Narrator Seeks To Justify Himself This project is currently in a somewhat expeditionary state, where its goal is not to produce wonderful software that will stand the test of time, but instead to prove its concept valid and get something working enough for me to decide whether it's worth it to continue down this route, and to decide whether it's worth it to continue funding it. As such, whenever presented with the question "Do we want it good or do we want it soon?" I am mostly choosing soon. BUT I am optimistic about the long-term viability of this project, and I do not wish to find future-David cursing the very name of past-David. In aid of squaring this particular circle, I am choosing to document every terrible thing that I knowingly do. The goals of this documentation are: * To make me feel bad, so that I'm less likely to do things that are awful but not actually needed. * To explain the reasoning to future-me and those who come after. * To make explicit the conditions under which the awful hack may be removed. ## Awful Hacks ### Panicky Clones Engine is currently set up to implement Clone but to panic when you call it. This is because [Helix seems to needlessly derive the Clone trait](https://github.com/tildeio/helix/issues/143). Can be removed when: That issue is fixed, or an alternative workaround is suggested. ### Threads as a Control Flow Mechanism Rather than attempt to encode the generation state machine explicitly, which was proving to be absolutely awful, I decided to continue to write it synchronously. The Rust side of the equation does not control calling the test function, which makes this tricky (and having the asynchronous interface as the main API entry point is a long term good anyway). The ideal way of doing this would be with something lightweight, like a coroutines. The ideal way of doing coroutines would be [Rust generators](https://doc.rust-lang.org/nightly/unstable-book/language-features/generators.html). Unfortunately this suffers from two problems: * It requires rust nightly. I would be prepared to live with this, but it's sub-par. * The current implementation is one-way only: resume does not take an argument. Alternate things tried: * [libfringe](https://github.com/edef1c/libfringe) seems lovely, but also requires rust-nightly and the released version doesn't actually build on rust nightly * I didn't really look into [may](https://github.com/Xudong-Huang/may/) after a) getting vaguely warned off it and b) Honestly having coroutine implementation exhaustion at this point. So at this point I said "Screw it, threads work on stable, and the context switching overhead isn't going to be *that* large compared to all the other mess that's in this chain, so..." So, yeah, that's why the main loop runs in a separate thread and communicates with the main thread via a synchronous channel. Can be removed when one of: * Generators are on stable and support resuming with an argument. * libfringe works on stable * Either of the above but on unstable, and my frustration with threading bugs (but fearless concurrency, David!) outweighs my desire to not use nightly. ### Monkey-patching Helix for our Build I was very very bored of Helix's build support [not actually failing the rake task when the build fails](https://github.com/tildeio/helix/issues/133), so I've monkey-patched their build system in our Rakefile in order to make it error properly in this case. Can be removed when: The linked issue is fixed. ### Stable identifiers from RSpec Another "I did terrible things to RSpec" entry, sorry. RSpec's design here is totally reasonable and sensible and honestly probably *is* how you should pass state around, but seems to make it impossible to get access to the Example object from inside an it block without actually being the definer of the block. See `hypothesis_stable_identifier` for details, but basically I couldn't figure out how to get the name of a currently executing spec in RSPec from a helper function without some fairly brutal hacks where we extract information badly from self.inspect, because it's there stored as a string that gets passed in for inspection. Can be removed when: Someone shows me a better way, or a feature is added to RSpec to make this easier. hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/docs/design.md000066400000000000000000000117511354103617500254340ustar00rootroot00000000000000# Design Notes The current goals of the Hypothesis for Ruby project are: * To provide a useful but not wholly feature complete version of [Hypothesis](https://hypothesis.works/) for Ruby, that works with RSpec (and ideally minitest, but if that at any point proves to be a lot of work this may be dropped. It's not an explicit requirement, but supporting it now makes it much easier to find the right shape of the project design). * To provide a mostly feature complete version of the Conjecture engine that powers Hypothesis in Rust, as decoupled from that Ruby front-end as possible. Hypothesis for Ruby is not intended to be an exact feature for feature copy of the Python version. It will have a lot of the same underlying functionality, but with a number of changes driven by: * Trying to make it feel as "ruby native" as possible. * The ability to design an API from scratch that lacks many of the constraints imposed both by the earlier much more limited functionality of Hypothesis and the specifics of Python decorators and test frameworks. ## Differences The most fundamental API differences between Hypothesis for Python and Hypothesis for Ruby are: * In Python we do a whole giant song and dance about exposing functions for the test runner to call, while in Ruby we just have a function which repeatedly calls a block and then fails. * In Python you specify a bunch of given parameters up front, and then if you want values inline in the test you [explicitly opt in to it](https://hypothesis.readthedocs.io/en/latest/data.html#drawing-interactively-in-tests), while in Ruby this is not only the default but the only way to get those values. * Strategies are called Possibles because strategy is a terrible name that was originally intended to be internal and then leaked into the public API because I wasn't thinking hard about naming. * Many of the Possible implementations have different names than the corresponding names in hypothesis-python. There is also a weird dual naming convention for Possibles where there is both e.g. `integers` and `integer` as aliases for each other. So for example: ```ruby RSPec.describe "integer addition" do it "commutes" do hypothesis do m = any integer n = any integer expect(m + n).to eq(n + m) end end end ``` ```python @given(integers(), integers()) def test_integers_commute(m, n): assert m + n == n + m ``` The in-line style is slightly more verbose, but vastly more flexible and (I think) reads better. Also mixing in-line and up-front styles looks weird, and if we're going to have just one then the in-line approach is a strict superset of the functionality of the other. The main reason for these differences are: * Ruby blocks (and their relation to testing) make this approach much more natural. * This functionality was not actually possible when the Hypothesis for Python API was originally designed, which informed the way its API looks. ## Deliberate omissions The following are currently *not* part of the intended feature set of Hypothesis for Ruby: * Calls to `hypothesis` may not be nested. * There will be no equivalent to the [stateful testing](https://hypothesis.readthedocs.io/en/latest/stateful.html) (but the very interactive nature of tests in the Ruby API means that the generic state machine stuff is just something you can write in your normal tests). * Testing will not be coverage guided (to be fair, it's barely coverage guided in the Python version right now...) * There will probably not be a health check system as part of the initial release, or if there is it will be much more basic. * Any equivalent to [`@reproduce_failure`](https://hypothesis.readthedocs.io/en/latest/reproducing.html#reproducing-an-example-with-with-reproduce-failure) ## Possible omissions The following will be in this initial project on a "time permitting" basis: If everything else is going well and we've got plenty of time, I'll do them, but I'm currently anticipating a tightish schedule so these are probably for a future release: * Reporting multiple failing examples per test (this will definitely be supported in the core engine, and if it's easy to support it then it will also be included in the front-end. I currently think it will be easy, but if it's not it will be dropped). * [adding explicit examples](https://hypothesis.readthedocs.io/en/latest/reproducing.html#providing-explicit-examples). ## Current Project State The current state is best described as "nascent" - it demonstrates a lot of the right moving parts, but has rough edges that you will hit almost immediately if you try to use it. Those rough edges need to be filed off before it can be built. Things that don't work yet but will: * The Possible library is limited, and most of what is there is bad. * The shrinker is *very* primitive in comparison to in Python. * The example database does not yet exist. * It can't actually be installed as a gem! Note that even once it is installable you will need a rust compiler and cargo. hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/ext/000077500000000000000000000000001354103617500235045ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/ext/Makefile000066400000000000000000000000751354103617500251460ustar00rootroot00000000000000all: cd .. rake build clean: rm -rf ../target install: ; hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/ext/extconf.rb000066400000000000000000000002031354103617500254720ustar00rootroot00000000000000if !system('cargo --version') raise 'Hypothesis requires cargo to be installed (https://www.rust-lang.org/)' end require 'rake' hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/hypothesis-specs.gemspec000066400000000000000000000014541354103617500275670ustar00rootroot00000000000000# frozen_string_literal: true Gem::Specification.new do |s| s.name = 'hypothesis-specs' s.version = '0.2.0' s.date = '2018-10-24' s.description = <<~DESCRIPTION Hypothesis is a powerful, flexible, and easy to use library for property-based testing. DESCRIPTION s.summary = s.description s.authors = ['David R. Maciver'] s.email = 'david@drmaciver.com' s.files = Dir['{ext/*,src/**/*,lib/**/*}'] + [ 'Cargo.toml', 'LICENSE.txt', 'README.markdown', 'Rakefile', 'CHANGELOG.md' ] s.homepage = 'https://github.com/HypothesisWorks/hypothesis/tree/master/hypothesis-ruby' s.license = 'MPL-2.0' s.extensions = Dir['ext/extconf.rb'] s.add_dependency 'helix_runtime', '~> 0.7.0' s.add_runtime_dependency 'rake', '>= 10.0', '< 13.0' end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/lib/000077500000000000000000000000001354103617500234525ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/lib/hypothesis.rb000066400000000000000000000226021354103617500262000ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'hypothesis/junkdrawer' require_relative 'hypothesis/errors' require_relative 'hypothesis/possible' require_relative 'hypothesis/testcase' require_relative 'hypothesis/engine' require_relative 'hypothesis/world' # This is the main module for using Hypothesis. # It is expected that you will include this in your # tests, but its methods are also available on the # module itself. # # The main entry point for using this is the # {Hypothesis#hypothesis} method. All of the other # methods make sense only inside blocks passed to # it. module Hypothesis # @!visibility private HYPOTHESIS_LOCATION = File.dirname(__FILE__) # @!visibility private def hypothesis_stable_identifier # Attempt to get a "stable identifier" for any any # call into hypothesis. We use these to create # database keys (or will when we have a database) that # are stable across runs, so that when a test that # previously failed is rerun, we can fetch and reuse # the previous examples. # Note that essentially any answer to this method is # "fine" in that the failure mode is that sometiems we # just won't run the same test, but it's nice to keep # this as stable as possible if the code isn't changing. # Minitest makes it nice and easy to create a stable # test identifier, because it follows the classic xunit # pattern where a test is just a method invocation on a # fresh test class instance and it's easy to find out # which invocation that was. return "#{self.class.name}::#{@NAME}" if defined? @NAME # If we are running in an rspec example then, sadly, # rspec take the entirely unreasonable stance that # the correct way to pass data to a test is by passing # it as a function argument. Honestly, what is this, # Haskell? Ahem. Perfectly reasonable design decisions # on rspec's part, this creates some annoying difficulties # for us. We solve this through brute force and ignorance # by relying on the information we want being in the # inspect for the Example object, even if it's just there # as a string. begin is_rspec = is_a? RSpec::Core::ExampleGroup # We do our coverage testing inside rspec, so this will # never trigger! Though we also don't currently have a # test that covers it outside of rspec... # :nocov: rescue NameError is_rspec = false end # :nocov: if is_rspec return [ self.class.description, inspect.match(/"([^"]+)"/)[1] ].join(' ') end # Fallback time! We just walk the stack until we find the # entry point into code we control. This will typically be # where "hypothesis" was called. Thread.current.backtrace.each do |line| return line unless line.include?(Hypothesis::HYPOTHESIS_LOCATION) end # This should never happen unless something very strange is # going on. # :nocov: raise 'BUG: Somehow we have no caller!' # :nocov: end # Run a test using Hypothesis. # # For example: # # ```ruby # hypothesis do # x = any integer # y = any integer(min: x) # expect(y).to be >= x # end # ``` # # The arguments to `any` are `Possible` instances which # specify the range of value values for it to return. # # Typically you would include this inside some test in your # normal testing framework - e.g. in an rspec it block or a # minitest test method. # # This will run the block many times with integer values for # x and y, and each time it will pass because we specified that # y had a minimum value of x. # If we changed it to `expect(y).to be > x` we would see output # like the following: # # ``` # Failure/Error: expect(y).to be > x # # Given #1: 0 # Given #2: 0 # expected: > 0 # got: 0 # ``` # # In more detail: # # hypothesis calls its provided block many times. Each invocation # of the block is a *test case*. # A test case has three important features: # # * *givens* are the result of a call to self.any, and are the # values that make up the test case. These might be values such # as strings, integers, etc. or they might be values specific to # your application such as a User object. # * *assumptions*, where you call `self.assume(some_condition)`. If # an assumption fails (`some_condition` is false), then the test # case is considered invalid, and is discarded. # * *assertions* are anything that will raise an error if the test # case should be considered a failure. These could be e.g. RSpec # expectations or minitest matchers, but anything that throws an # exception will be treated as a failed assertion. # # A test case which satisfies all of its assumptions and assertions # is *valid*. A test-case which satisfies all of its assumptions but # fails one of its assertions is *failing*. # # A call to hypothesis does the following: # # 1. It first tries to *reuse* failing test cases for previous runs. # 2. If there were no previous failing test cases then it tries to # *generate* new failing test cases. # 3. If either of the first two phases found failing test cases then # it will *shrink* those failing test cases. # 4. Finally, it will *display* the shrunk failing test case by # the error from its failing assertion, modified to show the # givens of the test case. # # Reuse uses an internal representation of the test case, so examples # from previous runs will obey all of the usual invariants of generation. # However, this means that if you change your test then reuse may not # work. Test cases that have become invalid or passing will be cleaned # up automatically. # # Generation consists of randomly trying test cases until one of # three things has happened: # # 1. It has found a failing test case. At this point it will start # *shrinking* the test case (see below). # 2. It has found enough valid test cases. At this point it will # silently stop. # 3. It has found so many invalid test cases that it seems unlikely # that it will find any more valid ones in a reasonable amount of # time. At this point it will either silently stop or raise # `Hypothesis::Unsatisfiable` depending on how many valid # examples it found. # # *Shrinking* is when Hypothesis takes a failing test case and tries # to make it easier to understand. It does this by replacing the givens # in the test case with smaller and simpler values. These givens will # still come from the possible values, and will obey all the usual # constraints. # In general, shrinking is automatic and you shouldn't need to care # about the details of it. If the test case you're shown at the end # is messy or needlessly large, please file a bug explaining the problem! # # @param max_valid_test_cases [Integer] The maximum number of valid test # cases to run without finding a failing test case before stopping. # # @param database [String, nil, false] A path to a directory where Hypothesis # should store previously failing test cases. If it is nil, Hypothesis # will use a default of .hypothesis/examples in the current directory. # May also be set to false to disable the database functionality. def hypothesis(max_valid_test_cases: 200, database: nil, &block) unless World.current_engine.nil? raise UsageError, 'Cannot nest hypothesis calls' end begin World.current_engine = Engine.new( hypothesis_stable_identifier, max_examples: max_valid_test_cases, database: database ) World.current_engine.run(&block) ensure World.current_engine = nil end end # Supplies a value to be used in your hypothesis. # @note It is invalid to call this method outside of a hypothesis block. # @return [Object] A value provided by the possible argument. # @param possible [Possible] A possible that specifies the possible values # to return. # @param name [String, nil] An optional name to show next to the result on # failure. This can be helpful if you have a lot of givens in your # hypothesis, as it makes it easier to keep track of which is which. def any(possible, name: nil, &block) if World.current_engine.nil? raise UsageError, 'Cannot call any outside of a hypothesis block' end World.current_engine.current_source.any( possible, name: name, &block ) end # Specify an assumption of your test case. Only test cases which satisfy # their assumptions will treated as valid, and all others will be # discarded. # @note It is invalid to call this method outside of a hypothesis block. # @note Try to use this only with "easy" conditions. If the condition is # too hard to satisfy this can make your testing much worse, because # Hypothesis will have to retry the test many times and will struggle # to find "interesting" test cases. For example `assume(x != y)` is # typically fine, and `assume(x == y)` is rarely a good idea. # @param condition [Boolean] The condition to assume. If this is false, # the current test case will be treated as invalid and the block will # exit by throwing an exception. The next test case will then be run # as normal. def assume(condition) if World.current_engine.nil? raise UsageError, 'Cannot call assume outside of a hypothesis block' end World.current_engine.current_source.assume(condition) end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/lib/hypothesis/000077500000000000000000000000001354103617500256515ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/lib/hypothesis/engine.rb000066400000000000000000000062071354103617500274500ustar00rootroot00000000000000# frozen_string_literal: true require 'helix_runtime' require_relative '../hypothesis-ruby/native' require 'rspec/expectations' module Hypothesis DEFAULT_DATABASE_PATH = File.join(Dir.pwd, '.hypothesis', 'examples') class Engine include RSpec::Matchers attr_reader :current_source attr_accessor :is_find def initialize(name, options) seed = Random.rand(2**64 - 1) database = options.fetch(:database, nil) database = DEFAULT_DATABASE_PATH if database.nil? database = nil if database == false @core_engine = HypothesisCoreEngine.new( name, database, seed, options.fetch(:max_examples) ) @exceptions_to_tags = Hash.new { |h, k| h[k] = h.size } end def run loop do core = @core_engine.new_source break if core.nil? @current_source = TestCase.new(core) begin result = yield(@current_source) if is_find && result @core_engine.finish_interesting(core, 0) else @core_engine.finish_valid(core) end rescue UnsatisfiedAssumption @core_engine.finish_invalid(core) rescue DataOverflow @core_engine.finish_overflow(core) rescue Exception => e raise if is_find key = [ e.class, HypothesisJunkDrawer.find_first_relevant_line(e.backtrace) ] @core_engine.finish_interesting(core, @exceptions_to_tags[key]) end end if @core_engine.count_failing_examples.zero? raise Unsatisfiable if @core_engine.was_unsatisfiable @current_source = nil return end if is_find core = @core_engine.failing_example(0) @current_source = TestCase.new(core, record_draws: true) yield @current_source else exceptions = [] (0...@core_engine.count_failing_examples).each do |example| core = @core_engine.failing_example(example) @current_source = TestCase.new(core, print_draws: true) begin yield @current_source rescue Exception => e givens = @current_source.print_log given_str = givens.each_with_index.map do |(name, s), i| name = "##{i + 1}" if name.nil? "Given #{name}: #{s}" end.to_a if e.respond_to? :hypothesis_data e.hypothesis_data[0] = given_str else original_to_s = e.to_s original_inspect = e.inspect class < 0 # i = any element_of(ls) # [ls, i] # end # ``` # # @return [Possible] A Possible whose possible values are # any result from the passed block. def built_as(&block) Hypothesis::Possible::Implementations::CompositePossible.new(block) end alias values_built_as built_as # A Possible boolean value # @return [Possible] def booleans integers(min: 0, max: 1).map { |i| i == 1 } end alias boolean booleans # A Possible unicode codepoint. # @return [Possible] # @param min [Integer] The smallest codepoint to provide # @param max [Integer] The largest codepoint to provide def codepoints(min: 1, max: 1_114_111) base = integers(min: min, max: max) if min <= 126 from(integers(min: min, max: [126, max].min), base) else base end end alias codepoint codepoints # A Possible String # @return [Possible] # @param codepoints [Possible, nil] The Possible codepoints # that can be found in the string. If nil, # will default to self.codepoints. These # will be further filtered to ensure the generated string is # valid. # @param min_size [Integer] The smallest valid length for a # provided string # @param max_size [Integer] The largest valid length for a # provided string def strings(codepoints: nil, min_size: 0, max_size: 10) codepoints = self.codepoints if codepoints.nil? codepoints = codepoints.select do |i| begin [i].pack('U*').codepoints true rescue ArgumentError false end end arrays(of: codepoints, min_size: min_size, max_size: max_size).map do |ls| ls.pack('U*') end end alias string strings # A Possible Hash, where all possible values have a fixed # shape. # This is used for hashes where you know exactly what the # keys are, and different keys may have different possible values. # For example, hashes_of_shape(a: integers, b: booleans) # will give you values like `{a: 11, b: false}`. # @return [Possible] # @param hash [Hash] A hash describing the values to provide. # The keys will be present unmodified in the provided hashes, # mapping to their Possible value in the result. def hashes_of_shape(hash) built_as do result = {} hash.each { |k, v| result[k] = any(v) } result end end alias hash_of_shape hashes_of_shape # A Possible Hash of variable shape. # @return [Possible] # @param keys [Possible] the possible keys # @param values [Possible] the possible values def hashes_with(keys:, values:, min_size: 0, max_size: 10) built_as do result = {} rep = HypothesisCoreRepeatValues.new( min_size, max_size, (min_size + max_size) * 0.5 ) source = World.current_engine.current_source while rep.should_continue(source) key = any keys if result.include?(key) rep.reject else result[key] = any values end end result end end alias hash_with hashes_with # A Possible Arrays of a fixed shape. # This is used for arrays where you know exactly how many # elements there are, and different values may be possible # at different positions. # For example, arrays_of_shape(strings, integers) # will give you values like ["a", 1] # @return [Possible] # @param elements [Array] A variable number of Possible. # values. The provided array will have this many values, with # each value possible for the corresponding argument. If elements # contains an array it will be flattened first, so e.g. # arrays_of_shape(a, b) is equivalent to arrays_of_shape([a, b]) def arrays_of_shape(*elements) elements = elements.flatten built_as do elements.map { |e| any e }.to_a end end alias array_of_shape arrays_of_shape # A Possible Array of variable shape. # This is used for arrays where the size may vary and the same values # are possible at any position. # For example, arrays(of: booleans) might provide [false, true, false]. # @return [Possible] # @param of [Possible] The possible elements of the array. # @param min_size [Integer] The smallest valid size of a provided array # @param max_size [Integer] The largest valid size of a provided array def arrays(of:, min_size: 0, max_size: 10) built_as do result = [] rep = HypothesisCoreRepeatValues.new( min_size, max_size, (min_size + max_size) * 0.5 ) source = World.current_engine.current_source result.push any(of) while rep.should_continue(source) result end end alias array arrays # A Possible where the possible values are any one of a number # of other possible values. # For example, from(strings, integers) could provide either of "a" # or 1. # @note This has a slightly non-standard aliasing. It reads more # nicely if you write `any from(a, b, c)` but e.g. # `arrays(of: mix_of(a, b, c))`. # # @return [Possible] # @param components [Array] A number of Possible values, # where the result will include any value possible from any of # them. If components contains an # array it will be flattened first, so e.g. from(a, b) # is equivalent to from([a, b]) def from(*components) components = components.flatten indexes = from_hypothesis_core( HypothesisCoreBoundedIntegers.new(components.size - 1) ) built_as do i = any indexes any components[i] end end alias mix_of from # A Possible where any one of a fixed array of values is possible. # @note these values are provided as is, so if the provided # values are mutated in the test you should be careful to make # sure each test run gets a fresh value (if you use this Possible # in line in the test you don't need to worry about this, this # is only a problem if you define the Possible outside of your # hypothesis block). # @return [Possible] # @param values [Enumerable] A collection of possible values. def element_of(values) values = values.to_a indexes = from_hypothesis_core( HypothesisCoreBoundedIntegers.new(values.size - 1) ) built_as do values.fetch(any(indexes)) end end alias elements_of element_of # A Possible integer # @return [Possible] # @param min [Integer] The smallest value integer to provide. # @param max [Integer] The largest value integer to provide. def integers(min: nil, max: nil) base = from_hypothesis_core HypothesisCoreIntegers.new if min.nil? && max.nil? base elsif min.nil? built_as { max - any(base).abs } elsif max.nil? built_as { min + any(base).abs } else bounded = from_hypothesis_core( HypothesisCoreBoundedIntegers.new(max - min) ) if min.zero? bounded else built_as { min + any(bounded) } end end end alias integer integers private def from_hypothesis_core(core) Hypothesis::Possible::Implementations::PossibleFromCore.new( core ) end end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/lib/hypothesis/testcase.rb000066400000000000000000000022671354103617500300200ustar00rootroot00000000000000# frozen_string_literal: true module Hypothesis # A TestCase class provides a concrete representation of # an executing test case. You do not normally need to use this # within the body of the test, but it exists to be used as # an argument to {Hypothesis::Possibilities::built_as}. # @!visibility private class TestCase # @!visibility private attr_reader :draws, :print_log, :print_draws, :wrapped_data # @!visibility private def initialize(wrapped_data, print_draws: false, record_draws: false) @wrapped_data = wrapped_data @draws = [] if record_draws @print_log = [] if print_draws @depth = 0 end def assume(condition) raise UnsatisfiedAssumption unless condition end # @!visibility private def any(possible = nil, name: nil, &block) top_level = @depth.zero? begin @depth += 1 possible ||= block @wrapped_data.start_draw result = possible.provide(&block) @wrapped_data.stop_draw if top_level draws&.push(result) print_log&.push([name, result.inspect]) end result ensure @depth -= 1 end end end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/lib/hypothesis/world.rb000066400000000000000000000002121354103617500273200ustar00rootroot00000000000000# frozen_string_literal: true module Hypothesis module World class << self attr_accessor :current_engine end end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/minitests/000077500000000000000000000000001354103617500247235ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/minitests/test_multiple_failures.rb000066400000000000000000000010421354103617500320310ustar00rootroot00000000000000# frozen_string_literal: true require 'minitest/autorun' require 'hypothesis' class TestMultipleFailures < Minitest::Test include Hypothesis include Hypothesis::Possibilities def test_multiple_failures assert_raises(Hypothesis::MultipleExceptionError) do @initial = nil hypothesis do x = any integers if @initial.nil? if x >= 1000 @initial = x else next end end assert(x != @initial) raise 'Nope' end end end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/minitests/test_stable_identifier.rb000066400000000000000000000003611354103617500317630ustar00rootroot00000000000000# frozen_string_literal: true require 'minitest/autorun' require 'hypothesis' class TestIdentifiers < Minitest::Test include Hypothesis def test_abc assert_equal hypothesis_stable_identifier, 'TestIdentifiers::test_abc' end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/scripts/000077500000000000000000000000001354103617500243735ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/scripts/run-tests-isolated.sh000077500000000000000000000005021354103617500304750ustar00rootroot00000000000000#!/usr/bin/env bash set -e -o xtrace rm -rf isolated mkdir isolated bundle exec rake gem mv hypothesis-specs*.gem isolated cp -Rl .rspec spec isolated cd isolated mkdir gems export GEM_HOME="$PWD"/gems export GEM_PATH="$GEM_HOME" gem install ./hypothesis-specs*.gem gem install rspec simplecov gems/bin/rspec spec hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/secrets.tar.enc000066400000000000000000000240201354103617500256260ustar00rootroot00000000000000,??ΗG7:qZ&ߺGczyw kYi9p|kz;o ss! r]UY wͱ C /)4K= ?:0T`摰3G)I#P.WJF7< 5Oxb5j8ȋGjtpyO0uE}i_\-(IT`+!$*@6?xtw7P4^308#Uy쾽hϨ2i[7ּn2kё,LR"FB󤛒fx(dT 8N?۵F\0EAٵú!+wfE 0;K6Taa/]\Z5CX?8!8<7:k'Z3-ZbzO7ߚqܢE8?W̿hH+ބ_yAI20B+X"]_Q3._T@Ĺz&LBH y8pCJQS(+YKlRbԱk ^DҼHmg죹Bo]$a{?'*|;e31B.oH;6/>,;/H!,u|΋KZr[>Շ 6AZ ig_T;ۥ7?>%0LSQ=Z_VBdhFO =FKTͬPfuabuWH坌vLPʻO#*Aٸ._%b'{錏VPÒ7&kŠLL#qǔ PVۤԬݴdO6Èa14U>̣NUl܋o$Y)Lo{i9z5O⡔x(4~SھD@xxXh,p^Re[% ,LGN("B`UimEEqG4 BȐ8W[5O5L\R #BSL6Ȭ ^/:QbƤY> 41Gn).Җ`UNO@s"9xv6]7CKO,W2dnY :U8uR7[̾xT1x6XC-qM2#{;_o$ƑbYByC+5 [_9N\ayYR2~+ PݵfzYa aղ^r? NW 1 1=ix 1)p:dQLrOoi'xX 0fyY>Su| kwtG%d$qRk<(Xe竟4pGuq5 )+1xқFcDҰp`y_q4X2,O+7c$+.%rvz.g+2fi5"Tc cn-P sTKR1`RLc,ԅ'w9尯d>Md#MVkZ$-GΥG-i9dv gWc"bN D{~SD`vrLDd'W9Ip8ya +K}. ^A͹ˡs MxI$pU![J,|T"WjuenIC,utpc0! _ZYztt`{O.?sJoxW|gy6ھWGt% pɋCE=[j;?7Xw@Ja*qo|u-Ztުz9= :FNG Tő$=n3!sS.p{IIQ@e^>S&ԉ[[~=g7ANxRaO}'$'ʕ>`Oh.[Qu#.4uP>ɔ ~}N6¾sw'Z*!;~]w4"׎F@VSؗ[cs;}qO#\t)@gYK2%D d ?F$])v+ #%_'픸՝0'ᴞKRb8QYD35W”@Xń &BNʍ06F~ZPM=w Q¹zbZ:y\i/L3D 3S8XO.(7nXmwٍ^{P7j VomYc_'pnYQ (T}1S&+#+6Ex#(Mr6^穰[cpkrcH:w%0o4huBW᰺eJXD_#}b{HˑG~H,b_Ice|gMHWlZ?$FG^qe@.(k]}"|hW)nD%X>`D6XI4(mX7! Z;A??9Yk݉0xtTB h\w]_$٘  q^-9+7"hh[l[É0$ao Jsej\?_;6# rj BSyr]ILzj%Q&;G[qgֿjvG`M h.ID д+n9bTqm:!q܈F\ݩQ7wEAȌ]wO{DW4w[Bw(ssͯHic[lu{Mn:Q6!_ٹ;79*IWBc`ĽF*M^[%Lx3g>(./ h0%d`(GEQ#a רq,"`"R~L+7/NpuTSA)^kGgX{g`eғטeL_x7 Ok)}I[BzmRϖM_SFXp<UKX hqT l #SI{ 8U-%& )"RvaYEyP=7F=ZQ|kNۀ?(gs"yz-}*,g#}\W2]N@تdQ NX:&==^|%=^y$7 BC9+Mr]<$p4]b,ҍwߠşq}jBÂ\˲lH/^ե^Vs9GŁ}HU1z&& (YtzZ]A{vaB]S!s0⡽T@'AsF 1a b::xi)J};i;lh$;!(`jGvEt Ȇ6,^Y[qKV(eOѷFcIJ0:,̝ Y)0ZWvDu@`YHTSOg{;{M7e oCb *=q1̦>س7V_S._9#4,\f@@&a13Ȋ׊2f΂WI;(Цs$$N#4&nI~*cVi5.T}IL #IE֠Y62B%=nI‡[4* i18 .(3@pg ҀAfR}Z$d+[8L)O*/@T#ʼnR_>E%7ɢ"S?( XG3|VsK$ؗ4f _$.$1Fds;! ʌTsU}hV߅pL١ bgʈEfzЯW~w*%h7]I[[7+5X=?P GԠ+#vL2m/{Մq7Yjg@h#VKWeM@H|+ԩ@;qXY-}UC?STo)GUg[X[DQƠncy'B6kҪ$L%g5!訳j,~=j7 nI{aᝧ&3<:x ƛO93N**+>+YA!/xmt"Ls_k* ˙b3RUC" iF'9JP*y%? 5<q@ŏNf`Q(0R8/]N!㸱8M5 @XP.Dw4={IJvr?@ Ќ=vu0Z)(o )(Ӏy.=! {B'A@]^HMOp8nJ̌4>!%X]5ޅnu#5u(WcMl*@&Z#x\)PVɼ=Iqp)nJ neWTS-lEyA.iGuǭIHQ9j?쟚Ahw`:&GkЮ9zi Y\ KtF=H~7a\ZP6^L#GyLJN.ƒ#!jPhY(v^Xh#_iufqg)NҦs#]s?S_.ٴ*̘뾢IWS6ι1Qj fgpMtX˯CN,{FKZvim*hvJcf!p^^#.99BϪ,ke+GS^ ȴ$JXŝ e :f\~:k*g$@udhw4E^ױxQqDp6GKxP[`XJ[. O?fm%UoDHŗj#&5O۹;X38JDD5McE&O*C>E?5{\AIv+šӤ@;V2~\nİK9]Z P@[(ڍ+yD62uMQJ;[AKmه rnr=joP``.# ))W@wg\zmb&GW9a"`үDsu樬7q m2Wt ճ4sC)gÖDj(/὘iU 0S ÅD`?u*kZTUх,cHn#ʅ Pu !IlX,PkgԥWP/ʗsc 4]Ƚ~?֯!mXrSB 8U/4a\t qGvˢ/Ĝld9mO%UpAwD䫈ZܠO!ڣrDm qܘzT>!HIH}} X +08X8]+t Գ$E ޔەUU|AXPX@ǼVm/ITp~M|9aN7t8DIѝcbFēB:sʍ uBk<U2.pOdČl,sjs_M[J؈z<3H0-Ks\Khp%2"KsΒPۂ 쀇,WQ&s2Ic:s|pԁIS%Sar *yf4/ p_+YՙWtkC[h@^C\0%$v)e;Yۓ7͌}6_,5{K-7$Qw"W62,TơguWe!QU/ztqJؿXD WA6&7z6 L2 qs@4n\uNS4$Ob[3#2Url= ݩX@';(zĻ0cDQ'Uvrh BP1ÚEïe!=ljf٤WT"}ѢXih~l%3 5I2zZNy?[{|bN 5hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/000077500000000000000000000000001354103617500236365ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/arrays_spec.rb000066400000000000000000000005411354103617500264760ustar00rootroot00000000000000# frozen_string_literal: true RSpec.describe 'fixed arrays' do they 'are of fixed size and shape' do hypothesis do ls = any array_of_shape( integer, string, integer ) expect(ls.size).to eq(3) expect(ls[0]).to be_a(Integer) expect(ls[2]).to be_a(Integer) expect(ls[1]).to be_a(String) end end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/backtrace_spec.rb000066400000000000000000000011041354103617500271100ustar00rootroot00000000000000# frozen_string_literal: true RSpec.describe 'backtrace manipulation' do JD = Hypothesis::HypothesisJunkDrawer it 'identifies the test file as relevant' do JD.find_first_relevant_line(caller).include?('backtrace_spec.rb') end it 'prunes out hypothesis and rspec related lines' do hypothesis do relevant = JD.prune_backtrace(caller) relevant.each do |e| expect(e).to_not include(JD::HYPOTHESIS_ROOT) expect(e).to_not include('/rspec-core/') end expect(relevant.grep(/backtrace_spec.rb/)).to_not be_empty end end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/bad_usage_spec.rb000066400000000000000000000012611354103617500271070ustar00rootroot00000000000000# frozen_string_literal: true def bad_usage(&block) expect(&block).to raise_exception(Hypothesis::UsageError) end RSpec.describe 'Incorrect usage' do it 'includes nesting hypothesis calls' do bad_usage do hypothesis do hypothesis do end end end end it 'includes using any outside a hypothesis call' do bad_usage { any integers } end it 'includes using assume outside a hypothesis call' do bad_usage { assume true } end it 'includes using find inside a hypothesis' do class <= 0 } end end end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/basic_examples_spec.rb000066400000000000000000000020441354103617500301540ustar00rootroot00000000000000# frozen_string_literal: true def expect_failure(&block) expect(&block).to raise_exception(RSpec::Expectations::ExpectationNotMetError) end RSpec.describe 'basic hypothesis tests' do they 'think integer addition is commutative' do hypothesis do x = any integers y = any integers expect(x + y).to eq(y + x) end end they 'are able to find zero values' do expect_failure do hypothesis do x = any integers expect(x).not_to eq(0) end end end they 'are able to filter out values' do hypothesis do x = any integers assume x != 0 1 / x end end they 'find that string addition is not commutative' do expect_failure do hypothesis do x = any strings y = any strings expect(x + y).to be == y + x end end end they 'raise unsatisfiable when all assumptions fail' do expect do hypothesis do any integers assume false end end.to raise_exception(Hypothesis::Unsatisfiable) end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/boolean_spec.rb000066400000000000000000000003311354103617500266110ustar00rootroot00000000000000# frozen_string_literal: true RSpec.describe 'booleans' do include Hypothesis::Debug they 'can be true' do find_any { any booleans } end they 'can be false' do find_any { !any(booleans) } end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/choice_spec.rb000066400000000000000000000005221354103617500264260ustar00rootroot00000000000000# frozen_string_literal: true RSpec.describe 'element_of possible' do include Hypothesis::Debug it 'includes the first argument' do find_any do m = any element_of([0, 1]) m == 0 end end it 'includes the last argument' do find_any do m = any element_of([0, 1, 2, 3]) m == 3 end end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/database_spec.rb000066400000000000000000000035501354103617500267440ustar00rootroot00000000000000# frozen_string_literal: true RSpec.describe 'database usage' do it 'saves a minimal failing example' do expect do hypothesis do n = any integer expect(n).to be < 10 end end.to raise_exception(RSpec::Expectations::ExpectationNotMetError) saved = Dir.glob("#{Hypothesis::DEFAULT_DATABASE_PATH}/*/*") expect(saved.length).to be == 1 end it 'can be disabled' do expect do hypothesis(database: false) do n = any integer expect(n).to be < 10 end end.to raise_exception(RSpec::Expectations::ExpectationNotMetError) expect(File.exist?(Hypothesis::DEFAULT_DATABASE_PATH)).to be false end it 'replays a previously failing example' do # This is a very unlikely value to be hit on by random. The first # time we run the test we fail for any value larger than it. # This then shrinks to exactly equal to magic. The second time we # run the test we only fail in this exact magic value. This # demonstrates replay from the previous test is working. magic = 17_658 expect do hypothesis do n = any integer expect(n).to be < magic end end.to raise_exception(RSpec::Expectations::ExpectationNotMetError) expect do hypothesis do n = any integer expect(n).not_to be == magic end end.to raise_exception(RSpec::Expectations::ExpectationNotMetError) end it 'cleans out passing examples' do expect do hypothesis do n = any integer expect(n).to be < 10 end end.to raise_exception(RSpec::Expectations::ExpectationNotMetError) saved = Dir.glob("#{Hypothesis::DEFAULT_DATABASE_PATH}/*/*") expect(saved.length).to be == 1 hypothesis do any integer end saved = Dir.glob("#{Hypothesis::DEFAULT_DATABASE_PATH}/*/*") expect(saved.length).to be == 0 end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/debug_spec.rb000066400000000000000000000004311354103617500262610ustar00rootroot00000000000000# frozen_string_literal: true RSpec.describe 'find' do include Hypothesis::Debug it "raises an error if it can't find anything" do expect do find do any integers false end end.to raise_exception(Hypothesis::Debug::NoSuchExample) end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/example_discovery_spec.rb000066400000000000000000000003611354103617500307170ustar00rootroot00000000000000# frozen_string_literal: true RSpec.describe 'hypothesis' do include Hypothesis::Debug it 'can find mid sized integers' do n, = find do m = any(integers) m >= 100 && m <= 1000 end expect(n).to eq(100) end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/example_printing_spec.rb000066400000000000000000000033141354103617500305430ustar00rootroot00000000000000# frozen_string_literal: true RSpec.describe 'printing examples' do it 'adds a statement to the exceptions string' do expect do hypothesis do n = any integers expect(n).to eq(0) end end.to raise_exception(/Given #1/) end it 'adds multiple statements to the exceptions string' do expect do hypothesis do n = any integers m = any integers expect(n).to eq(m) end end.to raise_exception(/Given #1.+Given #2/m) end it 'includes the name in the Given' do expect do hypothesis do n = any integers, name: 'fred' expect(n).to eq(1) end end.to raise_exception(/Given fred:/) end it 'does not mangle names if you reuse exceptions' do shared = Exception.new('Stuff') 3.times do expect do hypothesis do any integers raise shared end end.to raise_exception do |ex| expect(ex).to equal(shared) expect(ex.to_s.scan(/Given/).count).to eq(1) expect(ex.to_s.scan(/Stuff/).count).to eq(1) end end end it 'does not include nested anys in printing' do expect do hypothesis do value = any built_as do any integers any integers any integers end expect(value).to eq(0) end end.to raise_exception(RSpec::Expectations::ExpectationNotMetError) do |ex| expect(ex.to_s.scan(/Given/).count).to eq(1) end end it 'includes Given in inspect as well as to_s' do expect do hypothesis do n = any integers expect(n).to eq(0) end end.to raise_exception do |ex| expect(ex.inspect).to match(/Given #1/) end end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/example_shrinking_spec.rb000066400000000000000000000025201354103617500307030ustar00rootroot00000000000000# frozen_string_literal: true require 'set' RSpec.describe 'shrinking' do include Hypothesis::Debug it 'finds lower bounds on integers' do n, = find { any(integers) >= 10 } expect(n).to eq(10) end it 'iterates to a fixed point' do @original = nil a, b = find do m = any integers n = any integers m > n && n > 0 end expect(a).to eq(2) expect(b).to eq(1) end it 'can shrink through a chain' do ls, = find do x = any built_as do n = any integers(min: 1, max: 100) any arrays(of: integers(min: 0, max: 10), min_size: n, max_size: n) end x.sum >= 50 end expect(ls).to_not include(0) end it 'can shrink through a chain without deleting first element' do 10.times do ls, = find do x = any built_as do n = any integers(min: 1, max: 100) any arrays(of: integers(min: 0, max: 10), min_size: n, max_size: n) end assume x[0] > 0 x.sum >= 50 end expect(ls).to_not include(0) end end it 'can shrink duplicate elements' do 10.times do ls, = find do x = any array(of: integers(min: 0, max: 100)) significant = x.select { |n| n > 0 } Set.new(significant).length < significant.length end expect(ls).to eq([1, 1]) end end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/hashes_spec.rb000066400000000000000000000010631354103617500264500ustar00rootroot00000000000000# frozen_string_literal: true RSpec.describe 'fixed hash possibles' do they 'include all the keys' do hypothesis do x = any hash_of_shape(a: integers, b: integers) expect(x.size).to eq(2) expect(x[:a]).to be_a(Integer) expect(x[:b]).to be_a(Integer) end end end RSpec.describe 'variable hash possibles' do they 'respect lower bounds' do hypothesis do x = any hash_with( keys: integers(min: 0, max: 4), values: strings, min_size: 4 ) expect(x.size).to be >= 4 end end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/integer_spec.rb000066400000000000000000000012761354103617500266400ustar00rootroot00000000000000# frozen_string_literal: true RSpec.describe 'integer possibles' do they 'respect upper bounds' do hypothesis do expect(any(integers(max: 100))).to be <= 100 end end they 'respect lower bounds' do hypothesis do expect(any(integers(min: -100))).to be >= -100 end end they 'respect both bounds at once when lower bound is zero' do hypothesis do n = any integers(min: 0, max: 100) expect(n).to be <= 100 expect(n).to be >= 0 end end they 'respect both bounds at once when lower bound is non-zero' do hypothesis do n = any integers(min: 1, max: 100) expect(n).to be <= 100 expect(n).to be >= 1 end end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/mixing_spec.rb000066400000000000000000000005151354103617500264710ustar00rootroot00000000000000# frozen_string_literal: true RSpec.describe 'from possible' do include Hypothesis::Debug it 'includes the first argument' do find_any do any(from(integers, strings)).is_a? Integer end end it 'includes the second argument' do find_any do any(from(integers, strings)).is_a? String end end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/multiple_failures_spec.rb000066400000000000000000000016561354103617500307320ustar00rootroot00000000000000# frozen_string_literal: true RSpec.describe 'tests with multiple failures' do they 'show multiple failures' do expect do @initial = nil hypothesis do x = any integers if @initial.nil? if x >= 1000 @initial = x else next end end expect(x).to_not eq(@initial) raise 'Nope' end end.to raise_exception(Hypothesis::MultipleExceptionError) { |e| expect(e.all_exceptions.length).to eq(2) } end end RSpec.describe Hypothesis::MultipleExceptionError do it 'includes the message from each exception' do exceptions = [] %w[hello world].each do |m| begin raise m rescue Exception => e exceptions.push(e) end end e = Hypothesis::MultipleExceptionError.new(*exceptions) expect(e.message).to include('hello') expect(e.message).to include('world') end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/provided_list_spec.rb000066400000000000000000000011731354103617500300460ustar00rootroot00000000000000# frozen_string_literal: true RSpec.describe 'shrinking' do include Hypothesis::Debug include Hypothesis::Possibilities it 'finds a small list' do ls, = find { any(arrays(of: integers)).length >= 2 } expect(ls).to eq([0, 0]) end it 'shrinks a list to its last element' do 10.times do @original_target = nil ls, = find do v = any(arrays(of: integers)) if v.length >= 5 && @original_target.nil? && v[-1] > 0 @original_target = v end !@original_target.nil? && v && v[-1] == @original_target[-1] end expect(ls.length).to eq(1) end end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/spec_helper.rb000066400000000000000000000056331354103617500264630ustar00rootroot00000000000000# frozen_string_literal: true require 'simplecov' SimpleCov.minimum_coverage 100 class PrintingFormatter # Takes a SimpleCov::Result and generates a string out of it def format(result) bad = [] result.files.each do |file| bad.push file if file.covered_percent < 100.0 end unless bad.empty? puts 'Files with missing coverage!' bad.each do |file| lines = file.source_lines.select { |l| l.coverage == 0 } .map(&:line_number).sort s = lines[0] groups = [[s, s]] lines.each do |i| if i <= groups[-1][-1] + 1 groups[-1][-1] = i else groups.push([i, i]) end end markers = [] groups.each do |g| if g[0] == g[1] markers.push(g[0].to_s) else markers.push(g.join('-')) end end puts "#{file.filename}: #{markers.join(', ')}" end end end end SimpleCov.formatters = SimpleCov::Formatter::MultiFormatter.new( [SimpleCov::Formatter::HTMLFormatter, PrintingFormatter] ) SimpleCov.start do add_filter do |source_file| name = source_file.filename !(name.include?('/hypothesis-ruby/lib/') || name.end_with?('hypothesis.rb')) end end require 'hypothesis' module Hypothesis module Debug class NoSuchExample < HypothesisError end def find(options = {}, &block) unless Hypothesis::World.current_engine.nil? raise UsageError, 'Cannot nest hypothesis calls' end begin Hypothesis::World.current_engine = Hypothesis::Engine.new( 'find', max_examples: options.fetch(:max_examples, 1000) ) Hypothesis::World.current_engine.is_find = true Hypothesis::World.current_engine.run(&block) source = Hypothesis::World.current_engine.current_source raise NoSuchExample if source.nil? source.draws ensure Hypothesis::World.current_engine = nil end end def find_any(options = {}, &block) # Currently the same as find, but once we have config # options for shrinking it will turn that off. find(options, &block) end end end RSpec.configure do |config| config.expect_with :rspec do |expectations| expectations.include_chain_clauses_in_custom_matcher_descriptions = true end config.alias_example_to :they config.mock_with :rspec do |mocks| mocks.verify_partial_doubles = true end config.shared_context_metadata_behavior = :apply_to_host_groups config.example_status_persistence_file_path = 'spec/examples.txt' config.disable_monkey_patching! config.warnings = true config.default_formatter = 'doc' config.profile_examples = 10 config.order = :random Kernel.srand config.seed config.include(Hypothesis) config.include(Hypothesis::Possibilities) config.before(:each) do FileUtils.rm_rf Hypothesis::DEFAULT_DATABASE_PATH end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/stable_identifier_spec.rb000066400000000000000000000007171354103617500306560ustar00rootroot00000000000000# frozen_string_literal: true class SomeClass include Hypothesis def stuff hypothesis_stable_identifier end end RSpec.describe 'stable identifiers' do it 'are the full rspec string' do expect(hypothesis_stable_identifier).to eq( 'stable identifiers are the full rspec string' ) end it 'fall back to a traceback' do ident = SomeClass.new.stuff expect(ident).to include(__FILE__) expect(ident).to include('6') end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/spec/strings_spec.rb000066400000000000000000000013301354103617500266630ustar00rootroot00000000000000# frozen_string_literal: true RSpec.describe 'strings' do they 'respect a non-ascii lower bound' do hypothesis do expect(any(codepoints(min: 127))).to be >= 127 end end end RSpec.describe 'strings' do include Hypothesis::Debug they 'can be ascii' do find_any do s = any(strings(min_size: 3, max_size: 3)) s.codepoints.all? { |c| c < 127 } end end they 'can be non-ascii' do find_any do any(strings).codepoints.any? { |c| c > 127 } end end they 'produce valid strings' do find do s = any(strings) # Shrinking will try and fail to move this into # an invalid codepoint range. !s.empty? && s.codepoints[0] >= 56_785 end end end hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/src/000077500000000000000000000000001354103617500234735ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/hypothesis-ruby/src/lib.rs000066400000000000000000000123231354103617500246100ustar00rootroot00000000000000// "Bridging" root code that exists exclusively to provide // a ruby -> Hypothesis engine binding. #![recursion_limit = "256"] #![deny(warnings, missing_debug_implementations)] extern crate core; #[macro_use] extern crate helix; extern crate rand; extern crate conjecture; use std::mem; use conjecture::data::{DataSource, Status, TestResult}; use conjecture::distributions::Repeat; use conjecture::distributions; use conjecture::engine::Engine; use conjecture::database::{BoxedDatabase, NoDatabase, DirectoryDatabase}; ruby! { class HypothesisCoreDataSource { struct { source: Option, } def initialize(helix, engine: &mut HypothesisCoreEngine){ let mut result = HypothesisCoreDataSource{helix, source: None}; mem::swap(&mut result.source, &mut engine.pending); return result; } def start_draw(&mut self){ if let &mut Some(ref mut source) = &mut self.source { source.start_draw(); } } def stop_draw(&mut self){ if let &mut Some(ref mut source) = &mut self.source { source.stop_draw(); } } } class HypothesisCoreEngine { struct { engine: Engine, pending: Option, interesting_examples: Vec, } def initialize(helix, name: String, database_path: Option, seed: u64, max_examples: u64){ let xs: [u32; 2] = [seed as u32, (seed >> 32) as u32]; let db: BoxedDatabase = match database_path { None => Box::new(NoDatabase), Some(path) => Box::new(DirectoryDatabase::new(path)), }; HypothesisCoreEngine{ helix, engine: Engine::new(name, max_examples, &xs, db), pending: None, interesting_examples: Vec::new(), } } def new_source(&mut self) -> Option { match self.engine.next_source() { None => { self.interesting_examples = self.engine.list_minimized_examples(); None }, Some(source) => { self.pending = Some(source); Some(HypothesisCoreDataSource::new(self)) }, } } def count_failing_examples(&self) -> usize { self.interesting_examples.len() } def failing_example(&mut self, i: usize) -> HypothesisCoreDataSource { self.pending = Some( DataSource::from_vec(self.interesting_examples[i].record.clone()) ); HypothesisCoreDataSource::new(self) } def was_unsatisfiable(&mut self) -> bool { self.engine.was_unsatisfiable() } def finish_overflow(&mut self, child: &mut HypothesisCoreDataSource){ mark_child_status(&mut self.engine, child, Status::Overflow); } def finish_invalid(&mut self, child: &mut HypothesisCoreDataSource){ mark_child_status(&mut self.engine, child, Status::Invalid); } def finish_interesting(&mut self, child: &mut HypothesisCoreDataSource, label: u64){ mark_child_status(&mut self.engine, child, Status::Interesting(label)); } def finish_valid(&mut self, child: &mut HypothesisCoreDataSource){ mark_child_status(&mut self.engine, child, Status::Valid); } } class HypothesisCoreBitPossible{ struct { n_bits: u64, } def initialize(helix, n_bits: u64){ return HypothesisCoreBitPossible{helix, n_bits: n_bits}; } def provide(&mut self, data: &mut HypothesisCoreDataSource) -> Option{ match &mut data.source { &mut None => None, &mut Some(ref mut source) => source.bits(self.n_bits).ok(), } } } class HypothesisCoreRepeatValues{ struct { repeat: Repeat, } def initialize(helix, min_count: u64, max_count: u64, expected_count: f64){ return HypothesisCoreRepeatValues{ helix, repeat: Repeat::new(min_count, max_count, expected_count) } } def _should_continue(&mut self, data: &mut HypothesisCoreDataSource) -> Option{ return data.source.as_mut().and_then(|ref mut source| { self.repeat.should_continue(source).ok() }) } def reject(&mut self){ self.repeat.reject(); } } class HypothesisCoreIntegers{ struct { bitlengths: distributions::Sampler, } def initialize(helix){ return HypothesisCoreIntegers{helix,bitlengths: distributions::good_bitlengths()}; } def provide(&mut self, data: &mut HypothesisCoreDataSource) -> Option{ data.source.as_mut().and_then(|ref mut source| { distributions::integer_from_bitlengths(source, &self.bitlengths).ok() }) } } class HypothesisCoreBoundedIntegers{ struct { max_value: u64, } def initialize(helix, max_value: u64){ return HypothesisCoreBoundedIntegers{helix, max_value: max_value}; } def provide(&mut self, data: &mut HypothesisCoreDataSource) -> Option{ data.source.as_mut().and_then(|ref mut source| { distributions::bounded_int(source, self.max_value).ok() }) } } } fn mark_child_status(engine: &mut Engine, child: &mut HypothesisCoreDataSource, status: Status) { let mut replacement = None; mem::swap(&mut replacement, &mut child.source); match replacement { Some(source) => engine.mark_finished(source, status), None => (), } } hypothesis-hypothesis-python-4.36.2/mypy.ini000066400000000000000000000004471354103617500212320ustar00rootroot00000000000000[mypy] python_version = 3.6 platform = linux strict_optional = True disallow_untyped_decorators = True follow_imports = silent ignore_missing_imports = True warn_unused_ignores = True warn_unused_configs = True warn_redundant_casts = True [mypy-hypothesis.internal.*] ignore_errors = True hypothesis-hypothesis-python-4.36.2/notebooks/000077500000000000000000000000001354103617500215315ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/notebooks/Designing a better simplifier.ipynb000066400000000000000000004711231354103617500303060ustar00rootroot00000000000000{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Designing a better simplifier\n", "\n", "This is a notebook talking through some of the considerations in the design of Hypothesis's approach to simplification.\n", "\n", "It doesn't perfectly mirror what actually happens in Hypothesis, but it should give some consideration to the sort of things that Hypothesis does and why it takes a particular approach.\n", "\n", "In order to simplify the scope of this document we are only going to\n", "concern ourselves with lists of integers. There are a number of API considerations involved in expanding beyond that point, however most of the algorithmic considerations are the same.\n", "\n", "The big difference between lists of integers and the general case is that integers can never be too complex. In particular we will rapidly get to the point where individual elements can be simplified in usually only log(n) calls. When dealing with e.g. lists of lists this is a much more complicated proposition. That may be covered in another notebook.\n", "\n", "Our objective here is to minimize the number of times we check the condition. We won't be looking at actual timing performance, because usually the speed of the condition is the bottleneck there (and where it's not, everything is fast enough that we need not worry)." ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": false }, "outputs": [], "source": [ "def greedy_shrink(ls, constraint, shrink):\n", " \"\"\"\n", " This is the \"classic\" QuickCheck algorithm which takes a shrink function\n", " which will iterate over simpler versions of an example. We are trying\n", " to find a local minima: That is an example ls such that condition(ls)\n", " is True but that constraint(t) is False for each t in shrink(ls).\n", " \"\"\"\n", " while True:\n", " for s in shrink(ls):\n", " if constraint(s):\n", " ls = s\n", " break\n", " else:\n", " return ls" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def shrink1(ls):\n", " \"\"\"\n", " This is our prototype shrink function. It is very bad. It makes the\n", " mistake of only making very small changes to an example each time.\n", " \n", " Most people write something like this the first time they come to\n", " implement example shrinking. In particular early Hypothesis very much\n", " made this mistake.\n", " \n", " What this does:\n", " \n", " For each index, if the value of the index is non-zero we try\n", " decrementing it by 1.\n", " \n", " We then (regardless of if it's zero) try the list with the value at\n", " that index deleted.\n", " \"\"\"\n", " for i in range(len(ls)):\n", " s = list(ls)\n", " if s[i] > 0:\n", " s[i] -= 1\n", " yield list(s)\n", " del s[i]\n", " yield list(s)" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "collapsed": false }, "outputs": [], "source": [ "def show_trace(start, constraint, simplifier):\n", " \"\"\"\n", " This is a debug function. You shouldn't concern yourself with\n", " its implementation too much.\n", " \n", " What it does is print out every intermediate step in applying a\n", " simplifier (a function of the form (list, constraint) -> list)\n", " along with whether it is a successful shrink or not.\n", " \"\"\"\n", " if start is None:\n", " while True:\n", " start = gen_list()\n", " if constraint(start):\n", " break\n", "\n", " shrinks = [0]\n", " tests = [0]\n", "\n", " def print_shrink(ls):\n", " tests[0] += 1\n", " if constraint(ls):\n", " shrinks[0] += 1\n", " print(\"✓\", ls)\n", " return True\n", " else:\n", " print(\"✗\", ls)\n", " return False\n", " print(\"✓\", start)\n", " simplifier(start, print_shrink)\n", " print()\n", " print(\"%d shrinks with %d function calls\" % (\n", " shrinks[0], tests[0]))" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "collapsed": false }, "outputs": [], "source": [ "from functools import partial" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [5, 5]\n", "✓ [4, 5]\n", "✓ [3, 5]\n", "✓ [2, 5]\n", "✓ [1, 5]\n", "✓ [0, 5]\n", "✗ [5]\n", "✓ [0, 4]\n", "✗ [4]\n", "✓ [0, 3]\n", "✗ [3]\n", "✓ [0, 2]\n", "✗ [2]\n", "✓ [0, 1]\n", "✗ [1]\n", "✓ [0, 0]\n", "✗ [0]\n", "✗ [0]\n", "\n", "10 shrinks with 17 function calls\n" ] } ], "source": [ "show_trace([5, 5], lambda x: len(x) >= 2, partial(greedy_shrink, shrink=shrink1))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "That worked reasonably well, but it sure was a lot of function calls for such a small amount of shrinking. What would have happened if we'd started with [100, 100]?" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def shrink2(ls):\n", " \"\"\"\n", " Here is an improved shrink function. We first try deleting each element\n", " and then we try making each element smaller, but we do so from the left\n", " hand side instead of the right. This means we will always find the\n", " smallest value that can go in there, but we will do so much sooner.\n", " \"\"\"\n", " for i in range(len(ls)):\n", " s = list(ls)\n", " del s[i]\n", " yield list(s)\n", " \n", " for i in range(len(ls)):\n", " for x in range(ls[i]):\n", " s = list(ls)\n", " s[i] = x\n", " yield s" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [5, 5]\n", "✗ [5]\n", "✗ [5]\n", "✓ [0, 5]\n", "✗ [5]\n", "✗ [0]\n", "✓ [0, 0]\n", "✗ [0]\n", "✗ [0]\n", "\n", "2 shrinks with 8 function calls\n" ] } ], "source": [ "show_trace([5, 5], lambda x: len(x) >= 2, partial(\n", " greedy_shrink, shrink=shrink2))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This did indeed reduce the number of function calls significantly - we immediately determine that the value in the cell doesn't matter and we can just put zero there. \n", "\n", "But what would have happened if the value *did* matter?" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [1000]\n", "✗ []\n", "✗ [0]\n", "✗ [1]\n", "✗ [2]\n", "✗ [3]\n", "✗ [4]\n", "✗ [5]\n", "✗ [6]\n", "✗ [7]\n", "✗ [8]\n", "✗ [9]\n", "✗ [10]\n", "✗ [11]\n", "✗ [12]\n", "✗ [13]\n", "✗ [14]\n", "✗ [15]\n", "✗ [16]\n", "✗ [17]\n", "✗ [18]\n", "✗ [19]\n", "✗ [20]\n", "✗ [21]\n", "✗ [22]\n", "✗ [23]\n", "✗ [24]\n", "✗ [25]\n", "✗ [26]\n", "✗ [27]\n", "✗ [28]\n", "✗ [29]\n", "✗ [30]\n", "✗ [31]\n", "✗ [32]\n", "✗ [33]\n", "✗ [34]\n", "✗ [35]\n", "✗ [36]\n", "✗ [37]\n", "✗ [38]\n", "✗ [39]\n", "✗ [40]\n", "✗ [41]\n", "✗ [42]\n", "✗ [43]\n", "✗ [44]\n", "✗ [45]\n", "✗ [46]\n", "✗ [47]\n", "✗ [48]\n", "✗ [49]\n", "✗ [50]\n", "✗ [51]\n", "✗ [52]\n", "✗ [53]\n", "✗ [54]\n", "✗ [55]\n", "✗ [56]\n", "✗ [57]\n", "✗ [58]\n", "✗ [59]\n", "✗ [60]\n", "✗ [61]\n", "✗ [62]\n", "✗ [63]\n", "✗ [64]\n", "✗ [65]\n", "✗ [66]\n", "✗ [67]\n", "✗ [68]\n", "✗ [69]\n", "✗ [70]\n", "✗ [71]\n", "✗ [72]\n", "✗ [73]\n", "✗ [74]\n", "✗ [75]\n", "✗ [76]\n", "✗ [77]\n", "✗ [78]\n", "✗ [79]\n", "✗ [80]\n", "✗ [81]\n", "✗ [82]\n", "✗ [83]\n", "✗ [84]\n", "✗ [85]\n", "✗ [86]\n", "✗ [87]\n", "✗ [88]\n", "✗ [89]\n", "✗ [90]\n", "✗ [91]\n", "✗ [92]\n", "✗ [93]\n", "✗ [94]\n", "✗ [95]\n", "✗ [96]\n", "✗ [97]\n", "✗ [98]\n", "✗ [99]\n", "✗ [100]\n", "✗ [101]\n", "✗ [102]\n", "✗ [103]\n", "✗ [104]\n", "✗ [105]\n", "✗ [106]\n", "✗ [107]\n", "✗ [108]\n", "✗ [109]\n", "✗ [110]\n", "✗ [111]\n", "✗ [112]\n", "✗ [113]\n", "✗ [114]\n", "✗ [115]\n", "✗ [116]\n", "✗ [117]\n", "✗ [118]\n", "✗ [119]\n", "✗ [120]\n", "✗ [121]\n", "✗ [122]\n", "✗ [123]\n", "✗ [124]\n", "✗ [125]\n", "✗ [126]\n", "✗ [127]\n", "✗ [128]\n", "✗ [129]\n", "✗ [130]\n", "✗ [131]\n", "✗ [132]\n", "✗ [133]\n", "✗ [134]\n", "✗ [135]\n", "✗ [136]\n", "✗ [137]\n", "✗ [138]\n", "✗ [139]\n", "✗ [140]\n", "✗ [141]\n", "✗ [142]\n", "✗ [143]\n", "✗ [144]\n", "✗ [145]\n", "✗ [146]\n", "✗ [147]\n", "✗ [148]\n", "✗ [149]\n", "✗ [150]\n", "✗ [151]\n", "✗ [152]\n", "✗ [153]\n", "✗ [154]\n", "✗ [155]\n", "✗ [156]\n", "✗ [157]\n", "✗ [158]\n", "✗ [159]\n", "✗ [160]\n", "✗ [161]\n", "✗ [162]\n", "✗ [163]\n", "✗ [164]\n", "✗ [165]\n", "✗ [166]\n", "✗ [167]\n", "✗ [168]\n", "✗ [169]\n", "✗ [170]\n", "✗ [171]\n", "✗ [172]\n", "✗ [173]\n", "✗ [174]\n", "✗ [175]\n", "✗ [176]\n", "✗ [177]\n", "✗ [178]\n", "✗ [179]\n", "✗ [180]\n", "✗ [181]\n", "✗ [182]\n", "✗ [183]\n", "✗ [184]\n", "✗ [185]\n", "✗ [186]\n", "✗ [187]\n", "✗ [188]\n", "✗ [189]\n", "✗ [190]\n", "✗ [191]\n", "✗ [192]\n", "✗ [193]\n", "✗ [194]\n", "✗ [195]\n", "✗ [196]\n", "✗ [197]\n", "✗ [198]\n", "✗ [199]\n", "✗ [200]\n", "✗ [201]\n", "✗ [202]\n", "✗ [203]\n", "✗ [204]\n", "✗ [205]\n", "✗ [206]\n", "✗ [207]\n", "✗ [208]\n", "✗ [209]\n", "✗ [210]\n", "✗ [211]\n", "✗ [212]\n", "✗ [213]\n", "✗ [214]\n", "✗ [215]\n", "✗ [216]\n", "✗ [217]\n", "✗ [218]\n", "✗ [219]\n", "✗ [220]\n", "✗ [221]\n", "✗ [222]\n", "✗ [223]\n", "✗ [224]\n", "✗ [225]\n", "✗ [226]\n", "✗ [227]\n", "✗ [228]\n", "✗ [229]\n", "✗ [230]\n", "✗ [231]\n", "✗ [232]\n", "✗ [233]\n", "✗ [234]\n", "✗ [235]\n", "✗ [236]\n", "✗ [237]\n", "✗ [238]\n", "✗ [239]\n", "✗ [240]\n", "✗ [241]\n", "✗ [242]\n", "✗ [243]\n", "✗ [244]\n", "✗ [245]\n", "✗ [246]\n", "✗ [247]\n", "✗ [248]\n", "✗ [249]\n", "✗ [250]\n", "✗ [251]\n", "✗ [252]\n", "✗ [253]\n", "✗ [254]\n", "✗ [255]\n", "✗ [256]\n", "✗ [257]\n", "✗ [258]\n", "✗ [259]\n", "✗ [260]\n", "✗ [261]\n", "✗ [262]\n", "✗ [263]\n", "✗ [264]\n", "✗ [265]\n", "✗ [266]\n", "✗ [267]\n", "✗ [268]\n", "✗ [269]\n", "✗ [270]\n", "✗ [271]\n", "✗ [272]\n", "✗ [273]\n", "✗ [274]\n", "✗ [275]\n", "✗ [276]\n", "✗ [277]\n", "✗ [278]\n", "✗ [279]\n", "✗ [280]\n", "✗ [281]\n", "✗ [282]\n", "✗ [283]\n", "✗ [284]\n", "✗ [285]\n", "✗ [286]\n", "✗ [287]\n", "✗ [288]\n", "✗ [289]\n", "✗ [290]\n", "✗ [291]\n", "✗ [292]\n", "✗ [293]\n", "✗ [294]\n", "✗ [295]\n", "✗ [296]\n", "✗ [297]\n", "✗ [298]\n", "✗ [299]\n", "✗ [300]\n", "✗ [301]\n", "✗ [302]\n", "✗ [303]\n", "✗ [304]\n", "✗ [305]\n", "✗ [306]\n", "✗ [307]\n", "✗ [308]\n", "✗ [309]\n", "✗ [310]\n", "✗ [311]\n", "✗ [312]\n", "✗ [313]\n", "✗ [314]\n", "✗ [315]\n", "✗ [316]\n", "✗ [317]\n", "✗ [318]\n", "✗ [319]\n", "✗ [320]\n", "✗ [321]\n", "✗ [322]\n", "✗ [323]\n", "✗ [324]\n", "✗ [325]\n", "✗ [326]\n", "✗ [327]\n", "✗ [328]\n", "✗ [329]\n", "✗ [330]\n", "✗ [331]\n", "✗ [332]\n", "✗ [333]\n", "✗ [334]\n", "✗ [335]\n", "✗ [336]\n", "✗ [337]\n", "✗ [338]\n", "✗ [339]\n", "✗ [340]\n", "✗ [341]\n", "✗ [342]\n", "✗ [343]\n", "✗ [344]\n", "✗ [345]\n", "✗ [346]\n", "✗ [347]\n", "✗ [348]\n", "✗ [349]\n", "✗ [350]\n", "✗ [351]\n", "✗ [352]\n", "✗ [353]\n", "✗ [354]\n", "✗ [355]\n", "✗ [356]\n", "✗ [357]\n", "✗ [358]\n", "✗ [359]\n", "✗ [360]\n", "✗ [361]\n", "✗ [362]\n", "✗ [363]\n", "✗ [364]\n", "✗ [365]\n", "✗ [366]\n", "✗ [367]\n", "✗ [368]\n", "✗ [369]\n", "✗ [370]\n", "✗ [371]\n", "✗ [372]\n", "✗ [373]\n", "✗ [374]\n", "✗ [375]\n", "✗ [376]\n", "✗ [377]\n", "✗ [378]\n", "✗ [379]\n", "✗ [380]\n", "✗ [381]\n", "✗ [382]\n", "✗ [383]\n", "✗ [384]\n", "✗ [385]\n", "✗ [386]\n", "✗ [387]\n", "✗ [388]\n", "✗ [389]\n", "✗ [390]\n", "✗ [391]\n", "✗ [392]\n", "✗ [393]\n", "✗ [394]\n", "✗ [395]\n", "✗ [396]\n", "✗ [397]\n", "✗ [398]\n", "✗ [399]\n", "✗ [400]\n", "✗ [401]\n", "✗ [402]\n", "✗ [403]\n", "✗ [404]\n", "✗ [405]\n", "✗ [406]\n", "✗ [407]\n", "✗ [408]\n", "✗ [409]\n", "✗ [410]\n", "✗ [411]\n", "✗ [412]\n", "✗ [413]\n", "✗ [414]\n", "✗ [415]\n", "✗ [416]\n", "✗ [417]\n", "✗ [418]\n", "✗ [419]\n", "✗ [420]\n", "✗ [421]\n", "✗ [422]\n", "✗ [423]\n", "✗ [424]\n", "✗ [425]\n", "✗ [426]\n", "✗ [427]\n", "✗ [428]\n", "✗ [429]\n", "✗ [430]\n", "✗ [431]\n", "✗ [432]\n", "✗ [433]\n", "✗ [434]\n", "✗ [435]\n", "✗ [436]\n", "✗ [437]\n", "✗ [438]\n", "✗ [439]\n", "✗ [440]\n", "✗ [441]\n", "✗ [442]\n", "✗ [443]\n", "✗ [444]\n", "✗ [445]\n", "✗ [446]\n", "✗ [447]\n", "✗ [448]\n", "✗ [449]\n", "✗ [450]\n", "✗ [451]\n", "✗ [452]\n", "✗ [453]\n", "✗ [454]\n", "✗ [455]\n", "✗ [456]\n", "✗ [457]\n", "✗ [458]\n", "✗ [459]\n", "✗ [460]\n", "✗ [461]\n", "✗ [462]\n", "✗ [463]\n", "✗ [464]\n", "✗ [465]\n", "✗ [466]\n", "✗ [467]\n", "✗ [468]\n", "✗ [469]\n", "✗ [470]\n", "✗ [471]\n", "✗ [472]\n", "✗ [473]\n", "✗ [474]\n", "✗ [475]\n", "✗ [476]\n", "✗ [477]\n", "✗ [478]\n", "✗ [479]\n", "✗ [480]\n", "✗ [481]\n", "✗ [482]\n", "✗ [483]\n", "✗ [484]\n", "✗ [485]\n", "✗ [486]\n", "✗ [487]\n", "✗ [488]\n", "✗ [489]\n", "✗ [490]\n", "✗ [491]\n", "✗ [492]\n", "✗ [493]\n", "✗ [494]\n", "✗ [495]\n", "✗ [496]\n", "✗ [497]\n", "✗ [498]\n", "✗ [499]\n", "✓ [500]\n", "✗ []\n", "✗ [0]\n", "✗ [1]\n", "✗ [2]\n", "✗ [3]\n", "✗ [4]\n", "✗ [5]\n", "✗ [6]\n", "✗ [7]\n", "✗ [8]\n", "✗ [9]\n", "✗ [10]\n", "✗ [11]\n", "✗ [12]\n", "✗ [13]\n", "✗ [14]\n", "✗ [15]\n", "✗ [16]\n", "✗ [17]\n", "✗ [18]\n", "✗ [19]\n", "✗ [20]\n", "✗ [21]\n", "✗ [22]\n", "✗ [23]\n", "✗ [24]\n", "✗ [25]\n", "✗ [26]\n", "✗ [27]\n", "✗ [28]\n", "✗ [29]\n", "✗ [30]\n", "✗ [31]\n", "✗ [32]\n", "✗ [33]\n", "✗ [34]\n", "✗ [35]\n", "✗ [36]\n", "✗ [37]\n", "✗ [38]\n", "✗ [39]\n", "✗ [40]\n", "✗ [41]\n", "✗ [42]\n", "✗ [43]\n", "✗ [44]\n", "✗ [45]\n", "✗ [46]\n", "✗ [47]\n", "✗ [48]\n", "✗ [49]\n", "✗ [50]\n", "✗ [51]\n", "✗ [52]\n", "✗ [53]\n", "✗ [54]\n", "✗ [55]\n", "✗ [56]\n", "✗ [57]\n", "✗ [58]\n", "✗ [59]\n", "✗ [60]\n", "✗ [61]\n", "✗ [62]\n", "✗ [63]\n", "✗ [64]\n", "✗ [65]\n", "✗ [66]\n", "✗ [67]\n", "✗ [68]\n", "✗ [69]\n", "✗ [70]\n", "✗ [71]\n", "✗ [72]\n", "✗ [73]\n", "✗ [74]\n", "✗ [75]\n", "✗ [76]\n", "✗ [77]\n", "✗ [78]\n", "✗ [79]\n", "✗ [80]\n", "✗ [81]\n", "✗ [82]\n", "✗ [83]\n", "✗ [84]\n", "✗ [85]\n", "✗ [86]\n", "✗ [87]\n", "✗ [88]\n", "✗ [89]\n", "✗ [90]\n", "✗ [91]\n", "✗ [92]\n", "✗ [93]\n", "✗ [94]\n", "✗ [95]\n", "✗ [96]\n", "✗ [97]\n", "✗ [98]\n", "✗ [99]\n", "✗ [100]\n", "✗ [101]\n", "✗ [102]\n", "✗ [103]\n", "✗ [104]\n", "✗ [105]\n", "✗ [106]\n", "✗ [107]\n", "✗ [108]\n", "✗ [109]\n", "✗ [110]\n", "✗ [111]\n", "✗ [112]\n", "✗ [113]\n", "✗ [114]\n", "✗ [115]\n", "✗ [116]\n", "✗ [117]\n", "✗ [118]\n", "✗ [119]\n", "✗ [120]\n", "✗ [121]\n", "✗ [122]\n", "✗ [123]\n", "✗ [124]\n", "✗ [125]\n", "✗ [126]\n", "✗ [127]\n", "✗ [128]\n", "✗ [129]\n", "✗ [130]\n", "✗ [131]\n", "✗ [132]\n", "✗ [133]\n", "✗ [134]\n", "✗ [135]\n", "✗ [136]\n", "✗ [137]\n", "✗ [138]\n", "✗ [139]\n", "✗ [140]\n", "✗ [141]\n", "✗ [142]\n", "✗ [143]\n", "✗ [144]\n", "✗ [145]\n", "✗ [146]\n", "✗ [147]\n", "✗ [148]\n", "✗ [149]\n", "✗ [150]\n", "✗ [151]\n", "✗ [152]\n", "✗ [153]\n", "✗ [154]\n", "✗ [155]\n", "✗ [156]\n", "✗ [157]\n", "✗ [158]\n", "✗ [159]\n", "✗ [160]\n", "✗ [161]\n", "✗ [162]\n", "✗ [163]\n", "✗ [164]\n", "✗ [165]\n", "✗ [166]\n", "✗ [167]\n", "✗ [168]\n", "✗ [169]\n", "✗ [170]\n", "✗ [171]\n", "✗ [172]\n", "✗ [173]\n", "✗ [174]\n", "✗ [175]\n", "✗ [176]\n", "✗ [177]\n", "✗ [178]\n", "✗ [179]\n", "✗ [180]\n", "✗ [181]\n", "✗ [182]\n", "✗ [183]\n", "✗ [184]\n", "✗ [185]\n", "✗ [186]\n", "✗ [187]\n", "✗ [188]\n", "✗ [189]\n", "✗ [190]\n", "✗ [191]\n", "✗ [192]\n", "✗ [193]\n", "✗ [194]\n", "✗ [195]\n", "✗ [196]\n", "✗ [197]\n", "✗ [198]\n", "✗ [199]\n", "✗ [200]\n", "✗ [201]\n", "✗ [202]\n", "✗ [203]\n", "✗ [204]\n", "✗ [205]\n", "✗ [206]\n", "✗ [207]\n", "✗ [208]\n", "✗ [209]\n", "✗ [210]\n", "✗ [211]\n", "✗ [212]\n", "✗ [213]\n", "✗ [214]\n", "✗ [215]\n", "✗ [216]\n", "✗ [217]\n", "✗ [218]\n", "✗ [219]\n", "✗ [220]\n", "✗ [221]\n", "✗ [222]\n", "✗ [223]\n", "✗ [224]\n", "✗ [225]\n", "✗ [226]\n", "✗ [227]\n", "✗ [228]\n", "✗ [229]\n", "✗ [230]\n", "✗ [231]\n", "✗ [232]\n", "✗ [233]\n", "✗ [234]\n", "✗ [235]\n", "✗ [236]\n", "✗ [237]\n", "✗ [238]\n", "✗ [239]\n", "✗ [240]\n", "✗ [241]\n", "✗ [242]\n", "✗ [243]\n", "✗ [244]\n", "✗ [245]\n", "✗ [246]\n", "✗ [247]\n", "✗ [248]\n", "✗ [249]\n", "✗ [250]\n", "✗ [251]\n", "✗ [252]\n", "✗ [253]\n", "✗ [254]\n", "✗ [255]\n", "✗ [256]\n", "✗ [257]\n", "✗ [258]\n", "✗ [259]\n", "✗ [260]\n", "✗ [261]\n", "✗ [262]\n", "✗ [263]\n", "✗ [264]\n", "✗ [265]\n", "✗ [266]\n", "✗ [267]\n", "✗ [268]\n", "✗ [269]\n", "✗ [270]\n", "✗ [271]\n", "✗ [272]\n", "✗ [273]\n", "✗ [274]\n", "✗ [275]\n", "✗ [276]\n", "✗ [277]\n", "✗ [278]\n", "✗ [279]\n", "✗ [280]\n", "✗ [281]\n", "✗ [282]\n", "✗ [283]\n", "✗ [284]\n", "✗ [285]\n", "✗ [286]\n", "✗ [287]\n", "✗ [288]\n", "✗ [289]\n", "✗ [290]\n", "✗ [291]\n", "✗ [292]\n", "✗ [293]\n", "✗ [294]\n", "✗ [295]\n", "✗ [296]\n", "✗ [297]\n", "✗ [298]\n", "✗ [299]\n", "✗ [300]\n", "✗ [301]\n", "✗ [302]\n", "✗ [303]\n", "✗ [304]\n", "✗ [305]\n", "✗ [306]\n", "✗ [307]\n", "✗ [308]\n", "✗ [309]\n", "✗ [310]\n", "✗ [311]\n", "✗ [312]\n", "✗ [313]\n", "✗ [314]\n", "✗ [315]\n", "✗ [316]\n", "✗ [317]\n", "✗ [318]\n", "✗ [319]\n", "✗ [320]\n", "✗ [321]\n", "✗ [322]\n", "✗ [323]\n", "✗ [324]\n", "✗ [325]\n", "✗ [326]\n", "✗ [327]\n", "✗ [328]\n", "✗ [329]\n", "✗ [330]\n", "✗ [331]\n", "✗ [332]\n", "✗ [333]\n", "✗ [334]\n", "✗ [335]\n", "✗ [336]\n", "✗ [337]\n", "✗ [338]\n", "✗ [339]\n", "✗ [340]\n", "✗ [341]\n", "✗ [342]\n", "✗ [343]\n", "✗ [344]\n", "✗ [345]\n", "✗ [346]\n", "✗ [347]\n", "✗ [348]\n", "✗ [349]\n", "✗ [350]\n", "✗ [351]\n", "✗ [352]\n", "✗ [353]\n", "✗ [354]\n", "✗ [355]\n", "✗ [356]\n", "✗ [357]\n", "✗ [358]\n", "✗ [359]\n", "✗ [360]\n", "✗ [361]\n", "✗ [362]\n", "✗ [363]\n", "✗ [364]\n", "✗ [365]\n", "✗ [366]\n", "✗ [367]\n", "✗ [368]\n", "✗ [369]\n", "✗ [370]\n", "✗ [371]\n", "✗ [372]\n", "✗ [373]\n", "✗ [374]\n", "✗ [375]\n", "✗ [376]\n", "✗ [377]\n", "✗ [378]\n", "✗ [379]\n", "✗ [380]\n", "✗ [381]\n", "✗ [382]\n", "✗ [383]\n", "✗ [384]\n", "✗ [385]\n", "✗ [386]\n", "✗ [387]\n", "✗ [388]\n", "✗ [389]\n", "✗ [390]\n", "✗ [391]\n", "✗ [392]\n", "✗ [393]\n", "✗ [394]\n", "✗ [395]\n", "✗ [396]\n", "✗ [397]\n", "✗ [398]\n", "✗ [399]\n", "✗ [400]\n", "✗ [401]\n", "✗ [402]\n", "✗ [403]\n", "✗ [404]\n", "✗ [405]\n", "✗ [406]\n", "✗ [407]\n", "✗ [408]\n", "✗ [409]\n", "✗ [410]\n", "✗ [411]\n", "✗ [412]\n", "✗ [413]\n", "✗ [414]\n", "✗ [415]\n", "✗ [416]\n", "✗ [417]\n", "✗ [418]\n", "✗ [419]\n", "✗ [420]\n", "✗ [421]\n", "✗ [422]\n", "✗ [423]\n", "✗ [424]\n", "✗ [425]\n", "✗ [426]\n", "✗ [427]\n", "✗ [428]\n", "✗ [429]\n", "✗ [430]\n", "✗ [431]\n", "✗ [432]\n", "✗ [433]\n", "✗ [434]\n", "✗ [435]\n", "✗ [436]\n", "✗ [437]\n", "✗ [438]\n", "✗ [439]\n", "✗ [440]\n", "✗ [441]\n", "✗ [442]\n", "✗ [443]\n", "✗ [444]\n", "✗ [445]\n", "✗ [446]\n", "✗ [447]\n", "✗ [448]\n", "✗ [449]\n", "✗ [450]\n", "✗ [451]\n", "✗ [452]\n", "✗ [453]\n", "✗ [454]\n", "✗ [455]\n", "✗ [456]\n", "✗ [457]\n", "✗ [458]\n", "✗ [459]\n", "✗ [460]\n", "✗ [461]\n", "✗ [462]\n", "✗ [463]\n", "✗ [464]\n", "✗ [465]\n", "✗ [466]\n", "✗ [467]\n", "✗ [468]\n", "✗ [469]\n", "✗ [470]\n", "✗ [471]\n", "✗ [472]\n", "✗ [473]\n", "✗ [474]\n", "✗ [475]\n", "✗ [476]\n", "✗ [477]\n", "✗ [478]\n", "✗ [479]\n", "✗ [480]\n", "✗ [481]\n", "✗ [482]\n", "✗ [483]\n", "✗ [484]\n", "✗ [485]\n", "✗ [486]\n", "✗ [487]\n", "✗ [488]\n", "✗ [489]\n", "✗ [490]\n", "✗ [491]\n", "✗ [492]\n", "✗ [493]\n", "✗ [494]\n", "✗ [495]\n", "✗ [496]\n", "✗ [497]\n", "✗ [498]\n", "✗ [499]\n", "\n", "1 shrinks with 1003 function calls\n" ] } ], "source": [ "show_trace([1000], lambda x: sum(x) >= 500,\n", " partial(greedy_shrink, shrink=shrink2))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Because we're trying every intermediate value, what we have amounts to a linear probe up to the smallest value that will work. If that smallest value is large, this will take a long time. Our shrinking is still O(n), but n is now the size of the smallest value that will work rather than the starting value. This is still pretty suboptimal.\n", "\n", "What we want to do is try to replace our linear probe with a binary search. What we'll get isn't exactly a binary search, but it's close enough." ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def shrink_integer(n):\n", " \"\"\"\n", " Shrinker for individual integers.\n", " \n", " What happens is that we start from the left, first probing upwards in powers of two.\n", " \n", " When this would take us past our target value we then binary chop towards it.\n", " \"\"\"\n", " if not n:\n", " return\n", " for k in range(64):\n", " probe = 2 ** k\n", " if probe >= n:\n", " break\n", " yield probe - 1\n", " probe //= 2\n", " while True:\n", " probe = (probe + n) // 2\n", " yield probe\n", " if probe == n - 1:\n", " break\n", "\n", "\n", "def shrink3(ls):\n", " for i in range(len(ls)):\n", " s = list(ls)\n", " del s[i]\n", " yield list(s)\n", " for x in shrink_integer(ls[i]):\n", " s = list(ls)\n", " s[i] = x\n", " yield s" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "[0, 1, 3, 7, 15, 31, 63, 127, 255, 378, 439, 469, 484, 492, 496, 498, 499]" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "list(shrink_integer(500))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This gives us a reasonable distribution of O(log(n)) values in the middle while still making sure we start with 0 and finish with n - 1.\n", "\n", "In Hypothesis's actual implementation we also try random values in the probe region in case there's something special about things near powers of two, but we won't worry about that here." ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [1000]\n", "✗ []\n", "✗ [0]\n", "✗ [1]\n", "✗ [3]\n", "✗ [7]\n", "✗ [15]\n", "✗ [31]\n", "✗ [63]\n", "✗ [127]\n", "✗ [255]\n", "✓ [511]\n", "✗ []\n", "✗ [0]\n", "✗ [1]\n", "✗ [3]\n", "✗ [7]\n", "✗ [15]\n", "✗ [31]\n", "✗ [63]\n", "✗ [127]\n", "✗ [255]\n", "✗ [383]\n", "✗ [447]\n", "✗ [479]\n", "✗ [495]\n", "✓ [503]\n", "✗ []\n", "✗ [0]\n", "✗ [1]\n", "✗ [3]\n", "✗ [7]\n", "✗ [15]\n", "✗ [31]\n", "✗ [63]\n", "✗ [127]\n", "✗ [255]\n", "✗ [379]\n", "✗ [441]\n", "✗ [472]\n", "✗ [487]\n", "✗ [495]\n", "✗ [499]\n", "✓ [501]\n", "✗ []\n", "✗ [0]\n", "✗ [1]\n", "✗ [3]\n", "✗ [7]\n", "✗ [15]\n", "✗ [31]\n", "✗ [63]\n", "✗ [127]\n", "✗ [255]\n", "✗ [378]\n", "✗ [439]\n", "✗ [470]\n", "✗ [485]\n", "✗ [493]\n", "✗ [497]\n", "✗ [499]\n", "✓ [500]\n", "✗ []\n", "✗ [0]\n", "✗ [1]\n", "✗ [3]\n", "✗ [7]\n", "✗ [15]\n", "✗ [31]\n", "✗ [63]\n", "✗ [127]\n", "✗ [255]\n", "✗ [378]\n", "✗ [439]\n", "✗ [469]\n", "✗ [484]\n", "✗ [492]\n", "✗ [496]\n", "✗ [498]\n", "✗ [499]\n", "\n", "4 shrinks with 79 function calls\n" ] } ], "source": [ "show_trace([1000], lambda x: sum(x) >= 500, partial(\n", " greedy_shrink, shrink=shrink3))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This now runs in a much more reasonable number of function calls.\n", "\n", "Now we want to look at how to reduce the number of elements in the list more efficiently. We're currently making the same mistake we did with n umbers. Only reducing one at a time." ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2, 2]\n", "✓ [2, 2, 2, 2]\n", "✓ [2, 2, 2]\n", "✓ [2, 2]\n", "✗ [2]\n", "✗ [0, 2]\n", "✓ [1, 2]\n", "✗ [2]\n", "✗ [0, 2]\n", "✗ [1]\n", "✗ [1, 0]\n", "✗ [1, 1]\n", "\n", "19 shrinks with 26 function calls\n" ] } ], "source": [ "show_trace([2] * 20, lambda x: sum(x) >= 3, partial(\n", " greedy_shrink, shrink=shrink3))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We won't try too hard here, because typically our lists are not *that* long. We will just attempt to start by finding a shortish initial prefix that demonstrates the behaviour:" ] }, { "cell_type": "code", "execution_count": 13, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def shrink_to_prefix(ls):\n", " i = 1\n", " while i < len(ls):\n", " yield ls[:i]\n", " i *= 2\n", "\n", "\n", "def delete_individual_elements(ls):\n", " for i in range(len(ls)):\n", " s = list(ls)\n", " del s[i]\n", " yield list(s)\n", "\n", "\n", "def shrink_individual_elements(ls):\n", " for i in range(len(ls)):\n", " for x in shrink_integer(ls[i]):\n", " s = list(ls)\n", " s[i] = x\n", " yield s\n", " \n", "def shrink4(ls):\n", " yield from shrink_to_prefix(ls)\n", " yield from delete_individual_elements(ls)\n", " yield from shrink_individual_elements(ls) " ] }, { "cell_type": "code", "execution_count": 14, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [2]\n", "✓ [2, 2]\n", "✗ [2]\n", "✗ [2]\n", "✗ [2]\n", "✗ [0, 2]\n", "✓ [1, 2]\n", "✗ [1]\n", "✗ [2]\n", "✗ [1]\n", "✗ [0, 2]\n", "✗ [1, 0]\n", "✗ [1, 1]\n", "\n", "2 shrinks with 13 function calls\n" ] } ], "source": [ "show_trace([2] * 20, lambda x: sum(x) >= 3, partial(\n", " greedy_shrink, shrink=shrink4))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The problem we now want to address is the fact that when we're shrinking elements we're only shrinking them one at a time. This means that even though we're only O(log(k)) in each element, we're O(log(k)^n) in the whole list where n is the length of the list. For even very modest k this is bad.\n", "\n", "In general we may not be able to fix this, but in practice for a lot of common structures we can exploit similarity to try to do simultaneous shrinking.\n", "\n", "Here is our starting example: We start and finish with all identical values. We would like to be able to shortcut through a lot of the uninteresting intermediate examples somehow." ] }, { "cell_type": "code", "execution_count": 15, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [20, 20, 20, 20, 20, 20, 20]\n", "✗ [20]\n", "✗ [20, 20]\n", "✗ [20, 20, 20, 20]\n", "✓ [20, 20, 20, 20, 20, 20]\n", "✗ [20]\n", "✗ [20, 20]\n", "✗ [20, 20, 20, 20]\n", "✓ [20, 20, 20, 20, 20]\n", "✗ [20]\n", "✗ [20, 20]\n", "✗ [20, 20, 20, 20]\n", "✗ [20, 20, 20, 20]\n", "✗ [20, 20, 20, 20]\n", "✗ [20, 20, 20, 20]\n", "✗ [20, 20, 20, 20]\n", "✗ [20, 20, 20, 20]\n", "✗ [0, 20, 20, 20, 20]\n", "✗ [1, 20, 20, 20, 20]\n", "✗ [3, 20, 20, 20, 20]\n", "✓ [7, 20, 20, 20, 20]\n", "✗ [7]\n", "✗ [7, 20]\n", "✗ [7, 20, 20, 20]\n", "✗ [20, 20, 20, 20]\n", "✗ [7, 20, 20, 20]\n", "✗ [7, 20, 20, 20]\n", "✗ [7, 20, 20, 20]\n", "✗ [7, 20, 20, 20]\n", "✗ [0, 20, 20, 20, 20]\n", "✗ [1, 20, 20, 20, 20]\n", "✗ [3, 20, 20, 20, 20]\n", "✓ [5, 20, 20, 20, 20]\n", "✗ [5]\n", "✗ [5, 20]\n", "✗ [5, 20, 20, 20]\n", "✗ [20, 20, 20, 20]\n", "✗ [5, 20, 20, 20]\n", "✗ [5, 20, 20, 20]\n", "✗ [5, 20, 20, 20]\n", "✗ [5, 20, 20, 20]\n", "✗ [0, 20, 20, 20, 20]\n", "✗ [1, 20, 20, 20, 20]\n", "✗ [3, 20, 20, 20, 20]\n", "✗ [4, 20, 20, 20, 20]\n", "✗ [5, 0, 20, 20, 20]\n", "✗ [5, 1, 20, 20, 20]\n", "✗ [5, 3, 20, 20, 20]\n", "✓ [5, 7, 20, 20, 20]\n", "✗ [5]\n", "✗ [5, 7]\n", "✗ [5, 7, 20, 20]\n", "✗ [7, 20, 20, 20]\n", "✗ [5, 20, 20, 20]\n", "✗ [5, 7, 20, 20]\n", "✗ [5, 7, 20, 20]\n", "✗ [5, 7, 20, 20]\n", "✗ [0, 7, 20, 20, 20]\n", "✗ [1, 7, 20, 20, 20]\n", "✗ [3, 7, 20, 20, 20]\n", "✗ [4, 7, 20, 20, 20]\n", "✗ [5, 0, 20, 20, 20]\n", "✗ [5, 1, 20, 20, 20]\n", "✗ [5, 3, 20, 20, 20]\n", "✓ [5, 5, 20, 20, 20]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 20, 20]\n", "✗ [5, 20, 20, 20]\n", "✗ [5, 20, 20, 20]\n", "✗ [5, 5, 20, 20]\n", "✗ [5, 5, 20, 20]\n", "✗ [5, 5, 20, 20]\n", "✗ [0, 5, 20, 20, 20]\n", "✗ [1, 5, 20, 20, 20]\n", "✗ [3, 5, 20, 20, 20]\n", "✗ [4, 5, 20, 20, 20]\n", "✗ [5, 0, 20, 20, 20]\n", "✗ [5, 1, 20, 20, 20]\n", "✗ [5, 3, 20, 20, 20]\n", "✗ [5, 4, 20, 20, 20]\n", "✗ [5, 5, 0, 20, 20]\n", "✗ [5, 5, 1, 20, 20]\n", "✗ [5, 5, 3, 20, 20]\n", "✓ [5, 5, 7, 20, 20]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 7, 20]\n", "✗ [5, 7, 20, 20]\n", "✗ [5, 7, 20, 20]\n", "✗ [5, 5, 20, 20]\n", "✗ [5, 5, 7, 20]\n", "✗ [5, 5, 7, 20]\n", "✗ [0, 5, 7, 20, 20]\n", "✗ [1, 5, 7, 20, 20]\n", "✗ [3, 5, 7, 20, 20]\n", "✗ [4, 5, 7, 20, 20]\n", "✗ [5, 0, 7, 20, 20]\n", "✗ [5, 1, 7, 20, 20]\n", "✗ [5, 3, 7, 20, 20]\n", "✗ [5, 4, 7, 20, 20]\n", "✗ [5, 5, 0, 20, 20]\n", "✗ [5, 5, 1, 20, 20]\n", "✗ [5, 5, 3, 20, 20]\n", "✓ [5, 5, 5, 20, 20]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 5, 20]\n", "✗ [5, 5, 20, 20]\n", "✗ [5, 5, 20, 20]\n", "✗ [5, 5, 20, 20]\n", "✗ [5, 5, 5, 20]\n", "✗ [5, 5, 5, 20]\n", "✗ [0, 5, 5, 20, 20]\n", "✗ [1, 5, 5, 20, 20]\n", "✗ [3, 5, 5, 20, 20]\n", "✗ [4, 5, 5, 20, 20]\n", "✗ [5, 0, 5, 20, 20]\n", "✗ [5, 1, 5, 20, 20]\n", "✗ [5, 3, 5, 20, 20]\n", "✗ [5, 4, 5, 20, 20]\n", "✗ [5, 5, 0, 20, 20]\n", "✗ [5, 5, 1, 20, 20]\n", "✗ [5, 5, 3, 20, 20]\n", "✗ [5, 5, 4, 20, 20]\n", "✗ [5, 5, 5, 0, 20]\n", "✗ [5, 5, 5, 1, 20]\n", "✗ [5, 5, 5, 3, 20]\n", "✓ [5, 5, 5, 7, 20]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 5, 7]\n", "✗ [5, 5, 7, 20]\n", "✗ [5, 5, 7, 20]\n", "✗ [5, 5, 7, 20]\n", "✗ [5, 5, 5, 20]\n", "✗ [5, 5, 5, 7]\n", "✗ [0, 5, 5, 7, 20]\n", "✗ [1, 5, 5, 7, 20]\n", "✗ [3, 5, 5, 7, 20]\n", "✗ [4, 5, 5, 7, 20]\n", "✗ [5, 0, 5, 7, 20]\n", "✗ [5, 1, 5, 7, 20]\n", "✗ [5, 3, 5, 7, 20]\n", "✗ [5, 4, 5, 7, 20]\n", "✗ [5, 5, 0, 7, 20]\n", "✗ [5, 5, 1, 7, 20]\n", "✗ [5, 5, 3, 7, 20]\n", "✗ [5, 5, 4, 7, 20]\n", "✗ [5, 5, 5, 0, 20]\n", "✗ [5, 5, 5, 1, 20]\n", "✗ [5, 5, 5, 3, 20]\n", "✓ [5, 5, 5, 5, 20]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 20]\n", "✗ [5, 5, 5, 20]\n", "✗ [5, 5, 5, 20]\n", "✗ [5, 5, 5, 20]\n", "✗ [5, 5, 5, 5]\n", "✗ [0, 5, 5, 5, 20]\n", "✗ [1, 5, 5, 5, 20]\n", "✗ [3, 5, 5, 5, 20]\n", "✗ [4, 5, 5, 5, 20]\n", "✗ [5, 0, 5, 5, 20]\n", "✗ [5, 1, 5, 5, 20]\n", "✗ [5, 3, 5, 5, 20]\n", "✗ [5, 4, 5, 5, 20]\n", "✗ [5, 5, 0, 5, 20]\n", "✗ [5, 5, 1, 5, 20]\n", "✗ [5, 5, 3, 5, 20]\n", "✗ [5, 5, 4, 5, 20]\n", "✗ [5, 5, 5, 0, 20]\n", "✗ [5, 5, 5, 1, 20]\n", "✗ [5, 5, 5, 3, 20]\n", "✗ [5, 5, 5, 4, 20]\n", "✗ [5, 5, 5, 5, 0]\n", "✗ [5, 5, 5, 5, 1]\n", "✗ [5, 5, 5, 5, 3]\n", "✓ [5, 5, 5, 5, 7]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 7]\n", "✗ [5, 5, 5, 7]\n", "✗ [5, 5, 5, 7]\n", "✗ [5, 5, 5, 7]\n", "✗ [5, 5, 5, 5]\n", "✗ [0, 5, 5, 5, 7]\n", "✗ [1, 5, 5, 5, 7]\n", "✗ [3, 5, 5, 5, 7]\n", "✗ [4, 5, 5, 5, 7]\n", "✗ [5, 0, 5, 5, 7]\n", "✗ [5, 1, 5, 5, 7]\n", "✗ [5, 3, 5, 5, 7]\n", "✗ [5, 4, 5, 5, 7]\n", "✗ [5, 5, 0, 5, 7]\n", "✗ [5, 5, 1, 5, 7]\n", "✗ [5, 5, 3, 5, 7]\n", "✗ [5, 5, 4, 5, 7]\n", "✗ [5, 5, 5, 0, 7]\n", "✗ [5, 5, 5, 1, 7]\n", "✗ [5, 5, 5, 3, 7]\n", "✗ [5, 5, 5, 4, 7]\n", "✗ [5, 5, 5, 5, 0]\n", "✗ [5, 5, 5, 5, 1]\n", "✗ [5, 5, 5, 5, 3]\n", "✓ [5, 5, 5, 5, 5]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [0, 5, 5, 5, 5]\n", "✗ [1, 5, 5, 5, 5]\n", "✗ [3, 5, 5, 5, 5]\n", "✗ [4, 5, 5, 5, 5]\n", "✗ [5, 0, 5, 5, 5]\n", "✗ [5, 1, 5, 5, 5]\n", "✗ [5, 3, 5, 5, 5]\n", "✗ [5, 4, 5, 5, 5]\n", "✗ [5, 5, 0, 5, 5]\n", "✗ [5, 5, 1, 5, 5]\n", "✗ [5, 5, 3, 5, 5]\n", "✗ [5, 5, 4, 5, 5]\n", "✗ [5, 5, 5, 0, 5]\n", "✗ [5, 5, 5, 1, 5]\n", "✗ [5, 5, 5, 3, 5]\n", "✗ [5, 5, 5, 4, 5]\n", "✗ [5, 5, 5, 5, 0]\n", "✗ [5, 5, 5, 5, 1]\n", "✗ [5, 5, 5, 5, 3]\n", "✗ [5, 5, 5, 5, 4]\n", "\n", "12 shrinks with 236 function calls\n" ] } ], "source": [ "show_trace([20] * 7,\n", " lambda x: len([t for t in x if t >= 5]) >= 5,\n", " partial(greedy_shrink, shrink=shrink4))" ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def shrink_shared(ls):\n", " \"\"\"\n", " Look for all sets of shared indices and try to perform a simultaneous shrink on\n", " their value, replacing all of them at once.\n", " \n", " In actual Hypothesis we also try replacing only subsets of the values when there\n", " are more than two shared values, but we won't worry about that here.\n", " \"\"\"\n", " shared_indices = {}\n", " for i in range(len(ls)):\n", " shared_indices.setdefault(ls[i], []).append(i)\n", " for sharing in shared_indices.values():\n", " if len(sharing) > 1:\n", " for v in shrink_integer(ls[sharing[0]]):\n", " s = list(ls)\n", " for i in sharing:\n", " s[i] = v\n", " yield s\n", "\n", "\n", "def shrink5(ls):\n", " yield from shrink_to_prefix(ls)\n", " yield from delete_individual_elements(ls)\n", " yield from shrink_shared(ls)\n", " yield from shrink_individual_elements(ls)" ] }, { "cell_type": "code", "execution_count": 17, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [20, 20, 20, 20, 20, 20, 20]\n", "✗ [20]\n", "✗ [20, 20]\n", "✗ [20, 20, 20, 20]\n", "✓ [20, 20, 20, 20, 20, 20]\n", "✗ [20]\n", "✗ [20, 20]\n", "✗ [20, 20, 20, 20]\n", "✓ [20, 20, 20, 20, 20]\n", "✗ [20]\n", "✗ [20, 20]\n", "✗ [20, 20, 20, 20]\n", "✗ [20, 20, 20, 20]\n", "✗ [20, 20, 20, 20]\n", "✗ [20, 20, 20, 20]\n", "✗ [20, 20, 20, 20]\n", "✗ [20, 20, 20, 20]\n", "✗ [0, 0, 0, 0, 0]\n", "✗ [1, 1, 1, 1, 1]\n", "✗ [3, 3, 3, 3, 3]\n", "✓ [7, 7, 7, 7, 7]\n", "✗ [7]\n", "✗ [7, 7]\n", "✗ [7, 7, 7, 7]\n", "✗ [7, 7, 7, 7]\n", "✗ [7, 7, 7, 7]\n", "✗ [7, 7, 7, 7]\n", "✗ [7, 7, 7, 7]\n", "✗ [7, 7, 7, 7]\n", "✗ [0, 0, 0, 0, 0]\n", "✗ [1, 1, 1, 1, 1]\n", "✗ [3, 3, 3, 3, 3]\n", "✓ [5, 5, 5, 5, 5]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [0, 0, 0, 0, 0]\n", "✗ [1, 1, 1, 1, 1]\n", "✗ [3, 3, 3, 3, 3]\n", "✗ [4, 4, 4, 4, 4]\n", "✗ [0, 5, 5, 5, 5]\n", "✗ [1, 5, 5, 5, 5]\n", "✗ [3, 5, 5, 5, 5]\n", "✗ [4, 5, 5, 5, 5]\n", "✗ [5, 0, 5, 5, 5]\n", "✗ [5, 1, 5, 5, 5]\n", "✗ [5, 3, 5, 5, 5]\n", "✗ [5, 4, 5, 5, 5]\n", "✗ [5, 5, 0, 5, 5]\n", "✗ [5, 5, 1, 5, 5]\n", "✗ [5, 5, 3, 5, 5]\n", "✗ [5, 5, 4, 5, 5]\n", "✗ [5, 5, 5, 0, 5]\n", "✗ [5, 5, 5, 1, 5]\n", "✗ [5, 5, 5, 3, 5]\n", "✗ [5, 5, 5, 4, 5]\n", "✗ [5, 5, 5, 5, 0]\n", "✗ [5, 5, 5, 5, 1]\n", "✗ [5, 5, 5, 5, 3]\n", "✗ [5, 5, 5, 5, 4]\n", "\n", "4 shrinks with 64 function calls\n" ] } ], "source": [ "show_trace([20] * 7,\n", " lambda x: len([t for t in x if t >= 5]) >= 5,\n", " partial(greedy_shrink, shrink=shrink5))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This achieves the desired result. We rapidly progress through all of the intermediate stages. We do still have to perform individual shrinks at the end unfortunately (this is unavoidable), but the size of the elements is much smaller now so it takes less time.\n", "\n", "Unfortunately while this solves the problem in this case it's almost useless, because unless you find yourself in the exact right starting position it never does anything." ] }, { "cell_type": "code", "execution_count": 18, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [20, 21, 22, 23, 24, 25, 26]\n", "✗ [20]\n", "✗ [20, 21]\n", "✗ [20, 21, 22, 23]\n", "✓ [21, 22, 23, 24, 25, 26]\n", "✗ [21]\n", "✗ [21, 22]\n", "✗ [21, 22, 23, 24]\n", "✓ [22, 23, 24, 25, 26]\n", "✗ [22]\n", "✗ [22, 23]\n", "✗ [22, 23, 24, 25]\n", "✗ [23, 24, 25, 26]\n", "✗ [22, 24, 25, 26]\n", "✗ [22, 23, 25, 26]\n", "✗ [22, 23, 24, 26]\n", "✗ [22, 23, 24, 25]\n", "✗ [0, 23, 24, 25, 26]\n", "✗ [1, 23, 24, 25, 26]\n", "✗ [3, 23, 24, 25, 26]\n", "✓ [7, 23, 24, 25, 26]\n", "✗ [7]\n", "✗ [7, 23]\n", "✗ [7, 23, 24, 25]\n", "✗ [23, 24, 25, 26]\n", "✗ [7, 24, 25, 26]\n", "✗ [7, 23, 25, 26]\n", "✗ [7, 23, 24, 26]\n", "✗ [7, 23, 24, 25]\n", "✗ [0, 23, 24, 25, 26]\n", "✗ [1, 23, 24, 25, 26]\n", "✗ [3, 23, 24, 25, 26]\n", "✓ [5, 23, 24, 25, 26]\n", "✗ [5]\n", "✗ [5, 23]\n", "✗ [5, 23, 24, 25]\n", "✗ [23, 24, 25, 26]\n", "✗ [5, 24, 25, 26]\n", "✗ [5, 23, 25, 26]\n", "✗ [5, 23, 24, 26]\n", "✗ [5, 23, 24, 25]\n", "✗ [0, 23, 24, 25, 26]\n", "✗ [1, 23, 24, 25, 26]\n", "✗ [3, 23, 24, 25, 26]\n", "✗ [4, 23, 24, 25, 26]\n", "✗ [5, 0, 24, 25, 26]\n", "✗ [5, 1, 24, 25, 26]\n", "✗ [5, 3, 24, 25, 26]\n", "✓ [5, 7, 24, 25, 26]\n", "✗ [5]\n", "✗ [5, 7]\n", "✗ [5, 7, 24, 25]\n", "✗ [7, 24, 25, 26]\n", "✗ [5, 24, 25, 26]\n", "✗ [5, 7, 25, 26]\n", "✗ [5, 7, 24, 26]\n", "✗ [5, 7, 24, 25]\n", "✗ [0, 7, 24, 25, 26]\n", "✗ [1, 7, 24, 25, 26]\n", "✗ [3, 7, 24, 25, 26]\n", "✗ [4, 7, 24, 25, 26]\n", "✗ [5, 0, 24, 25, 26]\n", "✗ [5, 1, 24, 25, 26]\n", "✗ [5, 3, 24, 25, 26]\n", "✓ [5, 5, 24, 25, 26]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 24, 25]\n", "✗ [5, 24, 25, 26]\n", "✗ [5, 24, 25, 26]\n", "✗ [5, 5, 25, 26]\n", "✗ [5, 5, 24, 26]\n", "✗ [5, 5, 24, 25]\n", "✗ [0, 0, 24, 25, 26]\n", "✗ [1, 1, 24, 25, 26]\n", "✗ [3, 3, 24, 25, 26]\n", "✗ [4, 4, 24, 25, 26]\n", "✗ [0, 5, 24, 25, 26]\n", "✗ [1, 5, 24, 25, 26]\n", "✗ [3, 5, 24, 25, 26]\n", "✗ [4, 5, 24, 25, 26]\n", "✗ [5, 0, 24, 25, 26]\n", "✗ [5, 1, 24, 25, 26]\n", "✗ [5, 3, 24, 25, 26]\n", "✗ [5, 4, 24, 25, 26]\n", "✗ [5, 5, 0, 25, 26]\n", "✗ [5, 5, 1, 25, 26]\n", "✗ [5, 5, 3, 25, 26]\n", "✓ [5, 5, 7, 25, 26]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 7, 25]\n", "✗ [5, 7, 25, 26]\n", "✗ [5, 7, 25, 26]\n", "✗ [5, 5, 25, 26]\n", "✗ [5, 5, 7, 26]\n", "✗ [5, 5, 7, 25]\n", "✗ [0, 0, 7, 25, 26]\n", "✗ [1, 1, 7, 25, 26]\n", "✗ [3, 3, 7, 25, 26]\n", "✗ [4, 4, 7, 25, 26]\n", "✗ [0, 5, 7, 25, 26]\n", "✗ [1, 5, 7, 25, 26]\n", "✗ [3, 5, 7, 25, 26]\n", "✗ [4, 5, 7, 25, 26]\n", "✗ [5, 0, 7, 25, 26]\n", "✗ [5, 1, 7, 25, 26]\n", "✗ [5, 3, 7, 25, 26]\n", "✗ [5, 4, 7, 25, 26]\n", "✗ [5, 5, 0, 25, 26]\n", "✗ [5, 5, 1, 25, 26]\n", "✗ [5, 5, 3, 25, 26]\n", "✓ [5, 5, 5, 25, 26]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 5, 25]\n", "✗ [5, 5, 25, 26]\n", "✗ [5, 5, 25, 26]\n", "✗ [5, 5, 25, 26]\n", "✗ [5, 5, 5, 26]\n", "✗ [5, 5, 5, 25]\n", "✗ [0, 0, 0, 25, 26]\n", "✗ [1, 1, 1, 25, 26]\n", "✗ [3, 3, 3, 25, 26]\n", "✗ [4, 4, 4, 25, 26]\n", "✗ [0, 5, 5, 25, 26]\n", "✗ [1, 5, 5, 25, 26]\n", "✗ [3, 5, 5, 25, 26]\n", "✗ [4, 5, 5, 25, 26]\n", "✗ [5, 0, 5, 25, 26]\n", "✗ [5, 1, 5, 25, 26]\n", "✗ [5, 3, 5, 25, 26]\n", "✗ [5, 4, 5, 25, 26]\n", "✗ [5, 5, 0, 25, 26]\n", "✗ [5, 5, 1, 25, 26]\n", "✗ [5, 5, 3, 25, 26]\n", "✗ [5, 5, 4, 25, 26]\n", "✗ [5, 5, 5, 0, 26]\n", "✗ [5, 5, 5, 1, 26]\n", "✗ [5, 5, 5, 3, 26]\n", "✓ [5, 5, 5, 7, 26]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 5, 7]\n", "✗ [5, 5, 7, 26]\n", "✗ [5, 5, 7, 26]\n", "✗ [5, 5, 7, 26]\n", "✗ [5, 5, 5, 26]\n", "✗ [5, 5, 5, 7]\n", "✗ [0, 0, 0, 7, 26]\n", "✗ [1, 1, 1, 7, 26]\n", "✗ [3, 3, 3, 7, 26]\n", "✗ [4, 4, 4, 7, 26]\n", "✗ [0, 5, 5, 7, 26]\n", "✗ [1, 5, 5, 7, 26]\n", "✗ [3, 5, 5, 7, 26]\n", "✗ [4, 5, 5, 7, 26]\n", "✗ [5, 0, 5, 7, 26]\n", "✗ [5, 1, 5, 7, 26]\n", "✗ [5, 3, 5, 7, 26]\n", "✗ [5, 4, 5, 7, 26]\n", "✗ [5, 5, 0, 7, 26]\n", "✗ [5, 5, 1, 7, 26]\n", "✗ [5, 5, 3, 7, 26]\n", "✗ [5, 5, 4, 7, 26]\n", "✗ [5, 5, 5, 0, 26]\n", "✗ [5, 5, 5, 1, 26]\n", "✗ [5, 5, 5, 3, 26]\n", "✓ [5, 5, 5, 5, 26]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 26]\n", "✗ [5, 5, 5, 26]\n", "✗ [5, 5, 5, 26]\n", "✗ [5, 5, 5, 26]\n", "✗ [5, 5, 5, 5]\n", "✗ [0, 0, 0, 0, 26]\n", "✗ [1, 1, 1, 1, 26]\n", "✗ [3, 3, 3, 3, 26]\n", "✗ [4, 4, 4, 4, 26]\n", "✗ [0, 5, 5, 5, 26]\n", "✗ [1, 5, 5, 5, 26]\n", "✗ [3, 5, 5, 5, 26]\n", "✗ [4, 5, 5, 5, 26]\n", "✗ [5, 0, 5, 5, 26]\n", "✗ [5, 1, 5, 5, 26]\n", "✗ [5, 3, 5, 5, 26]\n", "✗ [5, 4, 5, 5, 26]\n", "✗ [5, 5, 0, 5, 26]\n", "✗ [5, 5, 1, 5, 26]\n", "✗ [5, 5, 3, 5, 26]\n", "✗ [5, 5, 4, 5, 26]\n", "✗ [5, 5, 5, 0, 26]\n", "✗ [5, 5, 5, 1, 26]\n", "✗ [5, 5, 5, 3, 26]\n", "✗ [5, 5, 5, 4, 26]\n", "✗ [5, 5, 5, 5, 0]\n", "✗ [5, 5, 5, 5, 1]\n", "✗ [5, 5, 5, 5, 3]\n", "✓ [5, 5, 5, 5, 7]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 7]\n", "✗ [5, 5, 5, 7]\n", "✗ [5, 5, 5, 7]\n", "✗ [5, 5, 5, 7]\n", "✗ [5, 5, 5, 5]\n", "✗ [0, 0, 0, 0, 7]\n", "✗ [1, 1, 1, 1, 7]\n", "✗ [3, 3, 3, 3, 7]\n", "✗ [4, 4, 4, 4, 7]\n", "✗ [0, 5, 5, 5, 7]\n", "✗ [1, 5, 5, 5, 7]\n", "✗ [3, 5, 5, 5, 7]\n", "✗ [4, 5, 5, 5, 7]\n", "✗ [5, 0, 5, 5, 7]\n", "✗ [5, 1, 5, 5, 7]\n", "✗ [5, 3, 5, 5, 7]\n", "✗ [5, 4, 5, 5, 7]\n", "✗ [5, 5, 0, 5, 7]\n", "✗ [5, 5, 1, 5, 7]\n", "✗ [5, 5, 3, 5, 7]\n", "✗ [5, 5, 4, 5, 7]\n", "✗ [5, 5, 5, 0, 7]\n", "✗ [5, 5, 5, 1, 7]\n", "✗ [5, 5, 5, 3, 7]\n", "✗ [5, 5, 5, 4, 7]\n", "✗ [5, 5, 5, 5, 0]\n", "✗ [5, 5, 5, 5, 1]\n", "✗ [5, 5, 5, 5, 3]\n", "✓ [5, 5, 5, 5, 5]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [0, 0, 0, 0, 0]\n", "✗ [1, 1, 1, 1, 1]\n", "✗ [3, 3, 3, 3, 3]\n", "✗ [4, 4, 4, 4, 4]\n", "✗ [0, 5, 5, 5, 5]\n", "✗ [1, 5, 5, 5, 5]\n", "✗ [3, 5, 5, 5, 5]\n", "✗ [4, 5, 5, 5, 5]\n", "✗ [5, 0, 5, 5, 5]\n", "✗ [5, 1, 5, 5, 5]\n", "✗ [5, 3, 5, 5, 5]\n", "✗ [5, 4, 5, 5, 5]\n", "✗ [5, 5, 0, 5, 5]\n", "✗ [5, 5, 1, 5, 5]\n", "✗ [5, 5, 3, 5, 5]\n", "✗ [5, 5, 4, 5, 5]\n", "✗ [5, 5, 5, 0, 5]\n", "✗ [5, 5, 5, 1, 5]\n", "✗ [5, 5, 5, 3, 5]\n", "✗ [5, 5, 5, 4, 5]\n", "✗ [5, 5, 5, 5, 0]\n", "✗ [5, 5, 5, 5, 1]\n", "✗ [5, 5, 5, 5, 3]\n", "✗ [5, 5, 5, 5, 4]\n", "\n", "12 shrinks with 264 function calls\n" ] } ], "source": [ "show_trace([20 + i for i in range(7)],\n", " lambda x: len([t for t in x if t >= 5]) >= 5,\n", " partial(greedy_shrink, shrink=shrink5))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "So what we're going to try to do is to try a simplification first which *creates* that exact right starting condition. Further it's one that will be potentially very useful even if we don't actually have the situation where we have shared shrinks.\n", "\n", "What we're going to do is we're going to use values from the list to act as evidence for how complex things need to be. Starting from the smallest, we'll try capping the array at each individual value and see what happens.\n", "\n", "As well as being potentially a very rapid shrink, this creates lists with lots of duplicates, which enables the simultaneous shrinking to shine." ] }, { "cell_type": "code", "execution_count": 19, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def replace_with_simpler(ls):\n", " if not ls:\n", " return\n", " values = set(ls)\n", " values.remove(max(ls))\n", " values = sorted(values)\n", " for v in values:\n", " yield [min(v, l) for l in ls]\n", "\n", "\n", "def shrink6(ls):\n", " yield from shrink_to_prefix(ls)\n", " yield from delete_individual_elements(ls)\n", " yield from replace_with_simpler(ls)\n", " yield from shrink_shared(ls)\n", " yield from shrink_individual_elements(ls)" ] }, { "cell_type": "code", "execution_count": 20, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [20, 21, 22, 23, 24, 25, 26]\n", "✗ [20]\n", "✗ [20, 21]\n", "✗ [20, 21, 22, 23]\n", "✓ [21, 22, 23, 24, 25, 26]\n", "✗ [21]\n", "✗ [21, 22]\n", "✗ [21, 22, 23, 24]\n", "✓ [22, 23, 24, 25, 26]\n", "✗ [22]\n", "✗ [22, 23]\n", "✗ [22, 23, 24, 25]\n", "✗ [23, 24, 25, 26]\n", "✗ [22, 24, 25, 26]\n", "✗ [22, 23, 25, 26]\n", "✗ [22, 23, 24, 26]\n", "✗ [22, 23, 24, 25]\n", "✓ [22, 22, 22, 22, 22]\n", "✗ [22]\n", "✗ [22, 22]\n", "✗ [22, 22, 22, 22]\n", "✗ [22, 22, 22, 22]\n", "✗ [22, 22, 22, 22]\n", "✗ [22, 22, 22, 22]\n", "✗ [22, 22, 22, 22]\n", "✗ [22, 22, 22, 22]\n", "✗ [0, 0, 0, 0, 0]\n", "✗ [1, 1, 1, 1, 1]\n", "✗ [3, 3, 3, 3, 3]\n", "✓ [7, 7, 7, 7, 7]\n", "✗ [7]\n", "✗ [7, 7]\n", "✗ [7, 7, 7, 7]\n", "✗ [7, 7, 7, 7]\n", "✗ [7, 7, 7, 7]\n", "✗ [7, 7, 7, 7]\n", "✗ [7, 7, 7, 7]\n", "✗ [7, 7, 7, 7]\n", "✗ [0, 0, 0, 0, 0]\n", "✗ [1, 1, 1, 1, 1]\n", "✗ [3, 3, 3, 3, 3]\n", "✓ [5, 5, 5, 5, 5]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [5, 5, 5, 5]\n", "✗ [0, 0, 0, 0, 0]\n", "✗ [1, 1, 1, 1, 1]\n", "✗ [3, 3, 3, 3, 3]\n", "✗ [4, 4, 4, 4, 4]\n", "✗ [0, 5, 5, 5, 5]\n", "✗ [1, 5, 5, 5, 5]\n", "✗ [3, 5, 5, 5, 5]\n", "✗ [4, 5, 5, 5, 5]\n", "✗ [5, 0, 5, 5, 5]\n", "✗ [5, 1, 5, 5, 5]\n", "✗ [5, 3, 5, 5, 5]\n", "✗ [5, 4, 5, 5, 5]\n", "✗ [5, 5, 0, 5, 5]\n", "✗ [5, 5, 1, 5, 5]\n", "✗ [5, 5, 3, 5, 5]\n", "✗ [5, 5, 4, 5, 5]\n", "✗ [5, 5, 5, 0, 5]\n", "✗ [5, 5, 5, 1, 5]\n", "✗ [5, 5, 5, 3, 5]\n", "✗ [5, 5, 5, 4, 5]\n", "✗ [5, 5, 5, 5, 0]\n", "✗ [5, 5, 5, 5, 1]\n", "✗ [5, 5, 5, 5, 3]\n", "✗ [5, 5, 5, 5, 4]\n", "\n", "5 shrinks with 73 function calls\n" ] } ], "source": [ "show_trace([20 + i for i in range(7)],\n", " lambda x: len([t for t in x if t >= 5]) >= 5,\n", " partial(greedy_shrink, shrink=shrink6))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Now we're going to start looking at some numbers.\n", "\n", "What we'll do is we'll generate 1000 random lists satisfying some predicate, and then simplify them down to the smallest possible examples satisfying those predicates. This lets us verify that these aren't just cherry-picked examples and our methods help in the general case. We fix the set of examples per predicate so that we're comparing like for like.\n", "\n", "A more proper statistical treatment would probably be a good idea." ] }, { "cell_type": "code", "execution_count": 21, "metadata": { "collapsed": true }, "outputs": [], "source": [ "from collections import OrderedDict\n", "\n", "conditions = OrderedDict([\n", " (\"length >= 2\", lambda xs: len(xs) >= 2),\n", " (\"sum >= 500\", lambda xs: sum(xs) >= 500),\n", " (\"sum >= 3\", lambda xs: sum(xs) >= 3),\n", " (\"At least 10 by 5\", lambda xs: len(\n", " [t for t in xs if t >= 5]) >= 10),\n", "])" ] }, { "cell_type": "code", "execution_count": 22, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "[17861213645196285187,\n", " 15609796832515195084,\n", " 8808697621832673046,\n", " 1013319847337885109,\n", " 1252281976438780211,\n", " 15526909770962854196,\n", " 2065337703776048239,\n", " 11654092230944134701,\n", " 5554896851708700201,\n", " 17485190250805381572,\n", " 7700396730246958474,\n", " 402840882133605445,\n", " 5303116940477413125,\n", " 7459257850255946545,\n", " 10349184495871650178,\n", " 4361155591615075311,\n", " 15194020468024244632,\n", " 14428821588688846242,\n", " 5754975712549869618,\n", " 13740966788951413307,\n", " 15209704957418077856,\n", " 12562588328524673262,\n", " 8415556016795311987,\n", " 3993098291779210741,\n", " 16874756914619597640,\n", " 7932421182532982309,\n", " 1080869529149674704,\n", " 13878842261614060122,\n", " 229976195287031921,\n", " 8378461140013520338,\n", " 6189522326946191255,\n", " 16684625600934047114,\n", " 12533448641134015292,\n", " 10459192142175991903,\n", " 15688511015570391481,\n", " 3091340728247101611,\n", " 4034760776171697910,\n", " 6258572097778886531,\n", " 13555449085571665140,\n", " 6727488149749641424,\n", " 7125107819562430884,\n", " 1557872425804423698,\n", " 4810250441100696888,\n", " 10500486959813930693,\n", " 841300069403644975,\n", " 9278626999406014662,\n", " 17219731431761688449,\n", " 15650446646901259126,\n", " 8683172055034528265,\n", " 5138373693056086816,\n", " 4055877702343936882,\n", " 5696765901584750542,\n", " 7133363948804979946,\n", " 988518370429658551,\n", " 16302597472193523184,\n", " 579078764159525857,\n", " 10678347012503400890,\n", " 8433836779160269996,\n", " 13884258181758870664,\n", " 13594877609651310055]" ] }, "execution_count": 22, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import random\n", "\n", "N_EXAMPLES = 1000\n", "\n", "datasets = {}\n", "\n", "def gen_list(rnd):\n", " return [\n", " random.getrandbits(64)\n", " for _ in range(random.randint(0, 100))\n", " ]\n", "\n", "def dataset_for(condition):\n", " if condition in datasets:\n", " return datasets[condition]\n", " constraint = conditions[condition]\n", " dataset = []\n", " rnd = random.Random(condition)\n", " while len(dataset) < N_EXAMPLES:\n", " ls = gen_list(rnd)\n", " if constraint(ls):\n", " dataset.append(ls)\n", " datasets[condition] = dataset\n", " return dataset\n", "\n", "dataset_for(\"sum >= 3\")[1]" ] }, { "cell_type": "code", "execution_count": 23, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/plain": [ "13" ] }, "execution_count": 23, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# In order to avoid run-away cases where things will take basically forever\n", "# we cap at 5000 as \"you've taken too long. Stop it\". Because we're only ever\n", "# showing the worst case scenario we'll just display this as > 5000 if we ever\n", "# hit it and it won't distort statistics.\n", "MAX_COUNT = 5000\n", "\n", "class MaximumCountExceeded(Exception):\n", " pass\n", "\n", "def call_counts(condition, simplifier):\n", " constraint = conditions[condition]\n", " dataset = dataset_for(condition)\n", " counts = []\n", "\n", " for ex in dataset:\n", " counter = [0]\n", " \n", " def run_and_count(ls):\n", " counter[0] += 1\n", " if counter[0] > MAX_COUNT:\n", " raise MaximumCountExceeded()\n", " return constraint(ls)\n", " \n", " try:\n", " simplifier(ex, run_and_count)\n", " counts.extend(counter)\n", " except MaximumCountExceeded:\n", " counts.append(MAX_COUNT + 1)\n", " break\n", " return counts\n", " \n", "def worst_case(condition, simplifier):\n", " return max(call_counts(condition, simplifier))\n", "\n", "worst_case(\n", " \"length >= 2\",\n", " partial(greedy_shrink, shrink=shrink6))" ] }, { "cell_type": "code", "execution_count": 24, "metadata": { "collapsed": false }, "outputs": [], "source": [ "from IPython.display import HTML\n", "\n", "def compare_simplifiers(named_simplifiers):\n", " \"\"\"\n", " Given a list of (name, simplifier) pairs, output a table comparing\n", " the worst case performance of each on our current set of examples.\n", " \"\"\"\n", " html_fragments = []\n", " html_fragments.append(\"\\n\\n\")\n", " header = [\"Condition\"]\n", " header.extend(name for name, _ in named_simplifiers)\n", " for h in header:\n", " html_fragments.append(\"\" % (h,))\n", " html_fragments.append(\"\\n\\n\")\n", " \n", " for name in conditions:\n", " bits = [name.replace(\">\", \">\")] \n", " for _, simplifier in named_simplifiers:\n", " value = worst_case(name, simplifier)\n", " if value <= MAX_COUNT:\n", " bits.append(str(value))\n", " else:\n", " bits.append(\" > %d\" % (MAX_COUNT,))\n", " html_fragments.append(\" \")\n", " html_fragments.append(' '.join(\n", " \"\" % (b,) for b in bits))\n", " html_fragments.append(\"\")\n", " html_fragments.append(\"\\n
%s
%s
\")\n", " return HTML('\\n'.join(html_fragments))" ] }, { "cell_type": "code", "execution_count": 25, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", "\n", "
Condition23456
length >= 2 106 105 13 13 13
sum >= 500 1102 178 80 80 80
sum >= 3 108 107 9 9 9
At least 10 by 5 535 690 809 877 144
" ], "text/plain": [ "" ] }, "execution_count": 25, "metadata": {}, "output_type": "execute_result" } ], "source": [ "compare_simplifiers([\n", " (f.__name__[-1], partial(greedy_shrink, shrink=f))\n", " for f in [shrink2, shrink3, shrink4, shrink5, shrink6]\n", "])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "So you can see from the above table, the iterations 2 through 5 were a little ambiguous ion that they helped a lot in the cases they were designed to help with but hurt in other cases. 6 however is clearly the best of the lot, being no worse than any of the others on any of the cases and often significantly better.\n", "\n", "Rather than continuing to refine our shrink further, we instead look to improvements to how we use shrinking. We'll start by noting a simple optimization: If you look at our traces above, we often checked the same example twice. We're only interested in deterministic conditions, so this isn't useful to do. So we'll start by simply pruning out all duplicates. This should have exactly the same set and order of successful shrinks but will avoid a bunch of redundant work." ] }, { "cell_type": "code", "execution_count": 26, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def greedy_shrink_with_dedupe(ls, constraint, shrink):\n", " seen = set()\n", " while True:\n", " for s in shrink(ls):\n", " key = tuple(s)\n", " if key in seen:\n", " continue\n", " seen.add(key)\n", " if constraint(s):\n", " ls = s\n", " break\n", " else:\n", " return ls" ] }, { "cell_type": "code", "execution_count": 27, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", "\n", "
ConditionNormalDeduped
length >= 2 13 6
sum >= 500 80 35
sum >= 3 9 6
At least 10 by 5 144 107
" ], "text/plain": [ "" ] }, "execution_count": 27, "metadata": {}, "output_type": "execute_result" } ], "source": [ "compare_simplifiers([\n", " (\"Normal\", partial(greedy_shrink, shrink=shrink6)),\n", " (\"Deduped\", partial(greedy_shrink_with_dedupe,\n", " shrink=shrink6)),\n", "\n", "])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "As expected, this is a significant improvement in some cases. It is logically impossible that it could ever make things worse, but it's nice that it makes it better.\n", "\n", "So far we've only looked at things where the interaction between elements was fairly light - the sum cases the values of other elements mattered a bit, but shrinking an integer could never enable other shrinks. Lets look at one where this is not the case: Where our condition is that we have at least 10 distinct elements." ] }, { "cell_type": "code", "execution_count": 28, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [100, 101, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [100]\n", "✗ [100, 101]\n", "✗ [100, 101, 102, 103]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 107]\n", "✗ [101, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [100, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [100, 101, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [100, 101, 102, 104, 105, 106, 107, 108, 109]\n", "✗ [100, 101, 102, 103, 105, 106, 107, 108, 109]\n", "✗ [100, 101, 102, 103, 104, 106, 107, 108, 109]\n", "✗ [100, 101, 102, 103, 104, 105, 107, 108, 109]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 108, 109]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 107, 109]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 107, 108]\n", "✗ [100, 100, 100, 100, 100, 100, 100, 100, 100, 100]\n", "✗ [100, 101, 101, 101, 101, 101, 101, 101, 101, 101]\n", "✗ [100, 101, 102, 102, 102, 102, 102, 102, 102, 102]\n", "✗ [100, 101, 102, 103, 103, 103, 103, 103, 103, 103]\n", "✗ [100, 101, 102, 103, 104, 104, 104, 104, 104, 104]\n", "✗ [100, 101, 102, 103, 104, 105, 105, 105, 105, 105]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 106, 106, 106]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 107, 107, 107]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 107, 108, 108]\n", "✓ [0, 101, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0]\n", "✗ [0, 101]\n", "✗ [0, 101, 102, 103]\n", "✗ [0, 101, 102, 103, 104, 105, 106, 107]\n", "✗ [101, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 101, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 101, 102, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 101, 102, 103, 105, 106, 107, 108, 109]\n", "✗ [0, 101, 102, 103, 104, 106, 107, 108, 109]\n", "✗ [0, 101, 102, 103, 104, 105, 107, 108, 109]\n", "✗ [0, 101, 102, 103, 104, 105, 106, 108, 109]\n", "✗ [0, 101, 102, 103, 104, 105, 106, 107, 109]\n", "✗ [0, 101, 102, 103, 104, 105, 106, 107, 108]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 101, 101, 101, 101, 101, 101, 101, 101, 101]\n", "✗ [0, 101, 102, 102, 102, 102, 102, 102, 102, 102]\n", "✗ [0, 101, 102, 103, 103, 103, 103, 103, 103, 103]\n", "✗ [0, 101, 102, 103, 104, 104, 104, 104, 104, 104]\n", "✗ [0, 101, 102, 103, 104, 105, 105, 105, 105, 105]\n", "✗ [0, 101, 102, 103, 104, 105, 106, 106, 106, 106]\n", "✗ [0, 101, 102, 103, 104, 105, 106, 107, 107, 107]\n", "✗ [0, 101, 102, 103, 104, 105, 106, 107, 108, 108]\n", "✗ [0, 0, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 102, 103]\n", "✗ [0, 1, 102, 103, 104, 105, 106, 107]\n", "✗ [1, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 102, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 102, 103, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 102, 103, 104, 106, 107, 108, 109]\n", "✗ [0, 1, 102, 103, 104, 105, 107, 108, 109]\n", "✗ [0, 1, 102, 103, 104, 105, 106, 108, 109]\n", "✗ [0, 1, 102, 103, 104, 105, 106, 107, 109]\n", "✗ [0, 1, 102, 103, 104, 105, 106, 107, 108]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 102, 102, 102, 102, 102, 102, 102, 102]\n", "✗ [0, 1, 102, 103, 103, 103, 103, 103, 103, 103]\n", "✗ [0, 1, 102, 103, 104, 104, 104, 104, 104, 104]\n", "✗ [0, 1, 102, 103, 104, 105, 105, 105, 105, 105]\n", "✗ [0, 1, 102, 103, 104, 105, 106, 106, 106, 106]\n", "✗ [0, 1, 102, 103, 104, 105, 106, 107, 107, 107]\n", "✗ [0, 1, 102, 103, 104, 105, 106, 107, 108, 108]\n", "✗ [0, 0, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 103, 104, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 3, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 3, 103]\n", "✗ [0, 1, 3, 103, 104, 105, 106, 107]\n", "✗ [1, 3, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 3, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 3, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 3, 103, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 3, 103, 104, 106, 107, 108, 109]\n", "✗ [0, 1, 3, 103, 104, 105, 107, 108, 109]\n", "✗ [0, 1, 3, 103, 104, 105, 106, 108, 109]\n", "✗ [0, 1, 3, 103, 104, 105, 106, 107, 109]\n", "✗ [0, 1, 3, 103, 104, 105, 106, 107, 108]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 3, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 3, 103, 103, 103, 103, 103, 103, 103]\n", "✗ [0, 1, 3, 103, 104, 104, 104, 104, 104, 104]\n", "✗ [0, 1, 3, 103, 104, 105, 105, 105, 105, 105]\n", "✗ [0, 1, 3, 103, 104, 105, 106, 106, 106, 106]\n", "✗ [0, 1, 3, 103, 104, 105, 106, 107, 107, 107]\n", "✗ [0, 1, 3, 103, 104, 105, 106, 107, 108, 108]\n", "✗ [0, 0, 3, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 103, 104, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 103]\n", "✗ [0, 1, 2, 103, 104, 105, 106, 107]\n", "✗ [1, 2, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 2, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 103, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 103, 104, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 103, 104, 105, 107, 108, 109]\n", "✗ [0, 1, 2, 103, 104, 105, 106, 108, 109]\n", "✗ [0, 1, 2, 103, 104, 105, 106, 107, 109]\n", "✗ [0, 1, 2, 103, 104, 105, 106, 107, 108]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 103, 103, 103, 103, 103, 103, 103]\n", "✗ [0, 1, 2, 103, 104, 104, 104, 104, 104, 104]\n", "✗ [0, 1, 2, 103, 104, 105, 105, 105, 105, 105]\n", "✗ [0, 1, 2, 103, 104, 105, 106, 106, 106, 106]\n", "✗ [0, 1, 2, 103, 104, 105, 106, 107, 107, 107]\n", "✗ [0, 1, 2, 103, 104, 105, 106, 107, 108, 108]\n", "✗ [0, 0, 2, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 104, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 104, 105, 106, 107, 108, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 104, 105, 106, 107]\n", "✗ [1, 2, 3, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 2, 3, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 3, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 104, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 104, 105, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 104, 105, 106, 108, 109]\n", "✗ [0, 1, 2, 3, 104, 105, 106, 107, 109]\n", "✗ [0, 1, 2, 3, 104, 105, 106, 107, 108]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 104, 104, 104, 104, 104, 104]\n", "✗ [0, 1, 2, 3, 104, 105, 105, 105, 105, 105]\n", "✗ [0, 1, 2, 3, 104, 105, 106, 106, 106, 106]\n", "✗ [0, 1, 2, 3, 104, 105, 106, 107, 107, 107]\n", "✗ [0, 1, 2, 3, 104, 105, 106, 107, 108, 108]\n", "✗ [0, 0, 2, 3, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 7, 105, 106, 107, 108, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 7, 105, 106, 107]\n", "✗ [1, 2, 3, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 2, 3, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 3, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 7, 105, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 7, 105, 106, 108, 109]\n", "✗ [0, 1, 2, 3, 7, 105, 106, 107, 109]\n", "✗ [0, 1, 2, 3, 7, 105, 106, 107, 108]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 7, 7, 7, 7, 7, 7]\n", "✗ [0, 1, 2, 3, 7, 105, 105, 105, 105, 105]\n", "✗ [0, 1, 2, 3, 7, 105, 106, 106, 106, 106]\n", "✗ [0, 1, 2, 3, 7, 105, 106, 107, 107, 107]\n", "✗ [0, 1, 2, 3, 7, 105, 106, 107, 108, 108]\n", "✗ [0, 0, 2, 3, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 5, 105, 106, 107, 108, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 5, 105, 106, 107]\n", "✗ [1, 2, 3, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 2, 3, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 3, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 5, 105, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 5, 105, 106, 108, 109]\n", "✗ [0, 1, 2, 3, 5, 105, 106, 107, 109]\n", "✗ [0, 1, 2, 3, 5, 105, 106, 107, 108]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 5, 5, 5, 5, 5, 5]\n", "✗ [0, 1, 2, 3, 5, 105, 105, 105, 105, 105]\n", "✗ [0, 1, 2, 3, 5, 105, 106, 106, 106, 106]\n", "✗ [0, 1, 2, 3, 5, 105, 106, 107, 107, 107]\n", "✗ [0, 1, 2, 3, 5, 105, 106, 107, 108, 108]\n", "✗ [0, 0, 2, 3, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 105, 106, 107, 108, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 4, 105, 106, 107]\n", "✗ [1, 2, 3, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 2, 3, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 3, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 105, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 105, 106, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 105, 106, 107, 109]\n", "✗ [0, 1, 2, 3, 4, 105, 106, 107, 108]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 4, 4, 4, 4, 4, 4]\n", "✗ [0, 1, 2, 3, 4, 105, 105, 105, 105, 105]\n", "✗ [0, 1, 2, 3, 4, 105, 106, 106, 106, 106]\n", "✗ [0, 1, 2, 3, 4, 105, 106, 107, 107, 107]\n", "✗ [0, 1, 2, 3, 4, 105, 106, 107, 108, 108]\n", "✗ [0, 0, 2, 3, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 7, 106, 107, 108, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 4, 7, 106, 107]\n", "✗ [1, 2, 3, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 2, 3, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 3, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 7, 106, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 7, 106, 107, 109]\n", "✗ [0, 1, 2, 3, 4, 7, 106, 107, 108]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 4, 4, 4, 4, 4, 4]\n", "✗ [0, 1, 2, 3, 4, 7, 7, 7, 7, 7]\n", "✗ [0, 1, 2, 3, 4, 7, 106, 106, 106, 106]\n", "✗ [0, 1, 2, 3, 4, 7, 106, 107, 107, 107]\n", "✗ [0, 1, 2, 3, 4, 7, 106, 107, 108, 108]\n", "✗ [0, 0, 2, 3, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 106, 107, 108, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 106, 107]\n", "✗ [1, 2, 3, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 2, 3, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 3, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 106, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 106, 107, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 106, 107, 108]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 4, 4, 4, 4, 4, 4]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 5, 5, 5]\n", "✗ [0, 1, 2, 3, 4, 5, 106, 106, 106, 106]\n", "✗ [0, 1, 2, 3, 4, 5, 106, 107, 107, 107]\n", "✗ [0, 1, 2, 3, 4, 5, 106, 107, 108, 108]\n", "✗ [0, 0, 2, 3, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 7, 107, 108, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 7, 107]\n", "✗ [1, 2, 3, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 2, 3, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 3, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 7, 107, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 7, 107, 108]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 4, 4, 4, 4, 4, 4]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 5, 5, 5]\n", "✗ [0, 1, 2, 3, 4, 5, 7, 7, 7, 7]\n", "✗ [0, 1, 2, 3, 4, 5, 7, 107, 107, 107]\n", "✗ [0, 1, 2, 3, 4, 5, 7, 107, 108, 108]\n", "✗ [0, 0, 2, 3, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 107, 108, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 107]\n", "✗ [1, 2, 3, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 2, 3, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 3, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 107, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 107, 108]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 4, 4, 4, 4, 4, 4]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 5, 5, 5]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 6, 6]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 107, 107, 107]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 107, 108, 108]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 108, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7]\n", "✗ [1, 2, 3, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 2, 3, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 3, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 108]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 4, 4, 4, 4, 4, 4]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 5, 5, 5]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 6, 6]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 7]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 108, 108]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 0, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 1, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 3, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 15, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7]\n", "✗ [1, 2, 3, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 2, 3, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 3, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 15]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 4, 4, 4, 4, 4, 4]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 5, 5, 5]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 6, 6]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 7]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 15, 15]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 0, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 1, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 3, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 11, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7]\n", "✗ [1, 2, 3, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 2, 3, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 3, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 11]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 4, 4, 4, 4, 4, 4]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 5, 5, 5]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 6, 6]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 7]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 11, 11]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 0, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 1, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 3, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 9, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7]\n", "✗ [1, 2, 3, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 2, 3, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 3, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 9]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 4, 4, 4, 4, 4, 4]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 5, 5, 5]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 6, 6]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 7]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 9, 9]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 0, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 1, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 3, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 8, 109]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7]\n", "✗ [1, 2, 3, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 2, 3, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 3, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 4, 4, 4, 4, 4, 4]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 5, 5, 5]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 6, 6]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 7]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 8]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 0, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 1, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 3, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 6, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 0]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 1]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 7]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 8, 15]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7]\n", "✗ [1, 2, 3, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 2, 3, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 3, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 4, 4, 4, 4, 4, 4]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 5, 5, 5]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 6, 6]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 7]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 8]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 0, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 1, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 3, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 6, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 0]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 1]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 7]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 8, 11]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7]\n", "✗ [1, 2, 3, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 2, 3, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 3, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 4, 4, 4, 4, 4, 4]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 5, 5, 5]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 6, 6]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 7]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 8]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 0, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 1, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 3, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 6, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 0]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 1]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 7]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n", "✗ [0]\n", "✗ [0, 1]\n", "✗ [0, 1, 2, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7]\n", "✗ [1, 2, 3, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 2, 3, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 3, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8]\n", "✗ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", "✗ [0, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n", "✗ [0, 1, 2, 2, 2, 2, 2, 2, 2, 2]\n", "✗ [0, 1, 2, 3, 3, 3, 3, 3, 3, 3]\n", "✗ [0, 1, 2, 3, 4, 4, 4, 4, 4, 4]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 5, 5, 5]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 6, 6]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 7]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 8]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 0, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 1, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 3, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 6, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 0]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 1]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 7]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 8]\n", "\n", "20 shrinks with 848 function calls\n" ] } ], "source": [ "show_trace([100 + i for i in range(10)],\n", " lambda x: len(set(x)) >= 10,\n", " partial(greedy_shrink, shrink=shrink6))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This does not do very well at all.\n", "\n", "The reason it doesn't is that we keep trying useless shrinks. e.g. none of the shrinks done by shrink\\_to\\_prefix, replace\\_with\\_simpler or shrink\\_shared will ever do anything useful here.\n", "\n", "So lets switch to an approach where we try shrink types until they stop working and then we move on to the next type:" ] }, { "cell_type": "code", "execution_count": 29, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def multicourse_shrink1(ls, constraint):\n", " seen = set()\n", " for shrink in [\n", " shrink_to_prefix,\n", " replace_with_simpler,\n", " shrink_shared,\n", " shrink_individual_elements,\n", " ]:\n", " while True:\n", " for s in shrink(ls):\n", " key = tuple(s)\n", " if key in seen:\n", " continue\n", " seen.add(key)\n", " if constraint(s):\n", " ls = s\n", " break\n", " else:\n", " break\n", " return ls" ] }, { "cell_type": "code", "execution_count": 30, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [100, 101, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [100]\n", "✗ [100, 101]\n", "✗ [100, 101, 102, 103]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 107]\n", "✗ [100, 100, 100, 100, 100, 100, 100, 100, 100, 100]\n", "✗ [100, 101, 101, 101, 101, 101, 101, 101, 101, 101]\n", "✗ [100, 101, 102, 102, 102, 102, 102, 102, 102, 102]\n", "✗ [100, 101, 102, 103, 103, 103, 103, 103, 103, 103]\n", "✗ [100, 101, 102, 103, 104, 104, 104, 104, 104, 104]\n", "✗ [100, 101, 102, 103, 104, 105, 105, 105, 105, 105]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 106, 106, 106]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 107, 107, 107]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 107, 108, 108]\n", "✓ [0, 101, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 0, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 103, 104, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 3, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 0, 3, 103, 104, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 0, 2, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 104, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 0, 2, 3, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 0, 2, 3, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 7, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 7, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 0, 2, 3, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 5, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 5, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 0, 2, 3, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 0, 2, 3, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 4, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 7, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 7, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 0, 2, 3, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 0, 2, 3, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 0, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 1, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 3, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 15, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 15, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 11, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 11, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 9, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 9, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 6, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 0]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 1]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 7]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 8, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 0, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 1, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 3, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 6, 15]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 15]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 8, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 0, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 1, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 3, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 6, 11]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 11]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 0, 2, 3, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 0, 3, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 1, 3, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 0, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 1, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 2, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 0, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 1, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 3, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 0, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 1, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 3, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 4, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 0, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 1, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 3, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 6, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 8]\n", "\n", "20 shrinks with 318 function calls\n" ] } ], "source": [ "show_trace([100 + i for i in range(10)],\n", " lambda x: len(set(x)) >= 10,\n", " multicourse_shrink1)" ] }, { "cell_type": "code", "execution_count": 31, "metadata": { "collapsed": false }, "outputs": [], "source": [ "conditions[\"10 distinct elements\"] = lambda xs: len(set(xs)) >= 10" ] }, { "cell_type": "code", "execution_count": 32, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", "\n", "
ConditionSingle passMulti pass
length >= 2 6 4
sum >= 500 35 34
sum >= 3 6 5
At least 10 by 5 107 58
10 distinct elements 623 320
" ], "text/plain": [ "" ] }, "execution_count": 32, "metadata": {}, "output_type": "execute_result" } ], "source": [ "compare_simplifiers([\n", " (\"Single pass\", partial(greedy_shrink_with_dedupe,\n", " shrink=shrink6)),\n", " (\"Multi pass\", multicourse_shrink1)\n", "])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "So that helped, but not as much as we'd have liked. It's saved us about half the calls, when really we wanted to save 90% of the calls.\n", "\n", "We're on the right track though. The problem is not that our solution isn't good, it's that it didn't go far enough: We're *still* making an awful lot of useless calls. The problem is that each time we shrink the element at index i we try shrinking the elements at indexes 0 through i - 1, and this will never work. So what we want to do is to break shrinking elements into a separate shrinker for each index:" ] }, { "cell_type": "code", "execution_count": 33, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def simplify_index(i):\n", " def accept(ls):\n", " if i >= len(ls):\n", " return\n", " for v in shrink_integer(ls[i]):\n", " s = list(ls)\n", " s[i] = v\n", " yield s\n", " return accept\n", "\n", "def shrinkers_for(ls):\n", " yield shrink_to_prefix\n", " yield delete_individual_elements\n", " yield replace_with_simpler\n", " yield shrink_shared\n", " for i in range(len(ls)):\n", " yield simplify_index(i)\n", "\n", "def multicourse_shrink2(ls, constraint):\n", " seen = set()\n", " for shrink in shrinkers_for(ls):\n", " while True:\n", " for s in shrink(ls):\n", " key = tuple(s)\n", " if key in seen:\n", " continue\n", " seen.add(key)\n", " if constraint(s):\n", " ls = s\n", " break\n", " else:\n", " break\n", " return ls" ] }, { "cell_type": "code", "execution_count": 34, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [100, 101, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [100]\n", "✗ [100, 101]\n", "✗ [100, 101, 102, 103]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 107]\n", "✗ [101, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [100, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [100, 101, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [100, 101, 102, 104, 105, 106, 107, 108, 109]\n", "✗ [100, 101, 102, 103, 105, 106, 107, 108, 109]\n", "✗ [100, 101, 102, 103, 104, 106, 107, 108, 109]\n", "✗ [100, 101, 102, 103, 104, 105, 107, 108, 109]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 108, 109]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 107, 109]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 107, 108]\n", "✗ [100, 100, 100, 100, 100, 100, 100, 100, 100, 100]\n", "✗ [100, 101, 101, 101, 101, 101, 101, 101, 101, 101]\n", "✗ [100, 101, 102, 102, 102, 102, 102, 102, 102, 102]\n", "✗ [100, 101, 102, 103, 103, 103, 103, 103, 103, 103]\n", "✗ [100, 101, 102, 103, 104, 104, 104, 104, 104, 104]\n", "✗ [100, 101, 102, 103, 104, 105, 105, 105, 105, 105]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 106, 106, 106]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 107, 107, 107]\n", "✗ [100, 101, 102, 103, 104, 105, 106, 107, 108, 108]\n", "✓ [0, 101, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 0, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 102, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 0, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 1, 103, 104, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 3, 103, 104, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 103, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 0, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 1, 104, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 2, 104, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 0, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 1, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 3, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 7, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 5, 105, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 105, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 0, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 1, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 3, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 7, 106, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 4, 106, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 0, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 1, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 3, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 7, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 5, 107, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 107, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 0, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 1, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 3, 108, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 5, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 6, 108, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 0, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 1, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 3, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 7, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 15, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 11, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 9, 109]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 8, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 6, 109]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 0]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 1]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 3]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 7]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 8, 15]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 8, 11]\n", "✓ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n", "✗ [0, 1, 2, 3, 4, 5, 6, 7, 8, 8]\n", "\n", "20 shrinks with 75 function calls\n" ] } ], "source": [ "show_trace([100 + i for i in range(10)],\n", " lambda x: len(set(x)) >= 10,\n", " multicourse_shrink2)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This worked great! It saved us a huge number of function calls.\n", "\n", "Unfortunately it's wrong. Actually the previous one was wrong too, but this one is more obviously wrong. The problem is that shrinking later elements can unlock more shrinks for earlier elements and we'll never be able to benefit from that here:" ] }, { "cell_type": "code", "execution_count": 35, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [101, 100]\n", "✗ [101]\n", "✗ [100]\n", "✗ [100, 100]\n", "✗ [0, 100]\n", "✗ [1, 100]\n", "✗ [3, 100]\n", "✗ [7, 100]\n", "✗ [15, 100]\n", "✗ [31, 100]\n", "✗ [63, 100]\n", "✗ [82, 100]\n", "✗ [91, 100]\n", "✗ [96, 100]\n", "✗ [98, 100]\n", "✗ [99, 100]\n", "✓ [101, 0]\n", "\n", "1 shrinks with 16 function calls\n" ] } ], "source": [ "show_trace([101, 100],\n", " lambda x: len(x) >= 2 and x[0] > x[1],\n", " multicourse_shrink2)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Armed with this example we can also show an example where the previous one is wrong because a later simplification unlocks an earlier one because shrinking values allows us to delete more elements:" ] }, { "cell_type": "code", "execution_count": 36, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [5, 5, 5, 5, 5, 5, 5, 5, 5, 5]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 5, 5]\n", "✓ [5, 5, 5, 5, 5, 5, 5, 5]\n", "✓ [0, 0, 0, 0, 0, 0, 0, 0]\n", "\n", "2 shrinks with 5 function calls\n" ] } ], "source": [ "show_trace([5] * 10,\n", " lambda x: x and len(x) > max(x),\n", " multicourse_shrink1)" ] }, { "cell_type": "code", "execution_count": 37, "metadata": { "collapsed": true }, "outputs": [], "source": [ "conditions[\"First > Second\"] = lambda xs: len(xs) >= 2 and xs[0] > xs[1]" ] }, { "cell_type": "code", "execution_count": 38, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# Note: We modify this to mask off the high bits because otherwise the probability of\n", "# hitting the condition at random is too low.\n", "conditions[\"Size > max & 63\"] = lambda xs: xs and len(xs) > (max(xs) & 63)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "So what we'll try doing is iterating this to a fixed point and see what happens:" ] }, { "cell_type": "code", "execution_count": 39, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def multicourse_shrink3(ls, constraint):\n", " seen = set()\n", " while True:\n", " old_ls = ls\n", " for shrink in shrinkers_for(ls):\n", " while True:\n", " for s in shrink(ls):\n", " key = tuple(s)\n", " if key in seen:\n", " continue\n", " seen.add(key)\n", " if constraint(s):\n", " ls = s\n", " break\n", " else:\n", " break\n", " if ls == old_ls:\n", " return ls" ] }, { "cell_type": "code", "execution_count": 40, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [101, 100]\n", "✗ [101]\n", "✗ [100]\n", "✗ [100, 100]\n", "✗ [0, 100]\n", "✗ [1, 100]\n", "✗ [3, 100]\n", "✗ [7, 100]\n", "✗ [15, 100]\n", "✗ [31, 100]\n", "✗ [63, 100]\n", "✗ [82, 100]\n", "✗ [91, 100]\n", "✗ [96, 100]\n", "✗ [98, 100]\n", "✗ [99, 100]\n", "✓ [101, 0]\n", "✗ [0]\n", "✗ [0, 0]\n", "✓ [1, 0]\n", "✗ [1]\n", "\n", "2 shrinks with 20 function calls\n" ] } ], "source": [ "show_trace([101, 100],\n", " lambda xs: len(xs) >= 2 and xs[0] > xs[1],\n", " multicourse_shrink3)" ] }, { "cell_type": "code", "execution_count": 41, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [5, 5, 5, 5, 5, 5, 5, 5, 5, 5]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 5, 5]\n", "✓ [5, 5, 5, 5, 5, 5, 5, 5]\n", "✓ [5, 5, 5, 5, 5, 5, 5]\n", "✓ [5, 5, 5, 5, 5, 5]\n", "✗ [5, 5, 5, 5, 5]\n", "✓ [0, 0, 0, 0, 0, 0]\n", "✓ [0]\n", "✗ []\n", "\n", "5 shrinks with 10 function calls\n" ] } ], "source": [ "show_trace([5] * 10,\n", " lambda x: x and len(x) > max(x),\n", " multicourse_shrink3)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "So that worked. Yay!\n", "\n", "Lets compare how this does to our single pass implementation." ] }, { "cell_type": "code", "execution_count": 42, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", "\n", "
ConditionSingle passMulti pass
length >= 2 6 6
sum >= 500 35 35
sum >= 3 6 6
At least 10 by 5 107 73
10 distinct elements 623 131
First > Second 1481 1445
Size > max & 63 600 > 5000
" ], "text/plain": [ "" ] }, "execution_count": 42, "metadata": {}, "output_type": "execute_result" } ], "source": [ "compare_simplifiers([\n", " (\"Single pass\", partial(greedy_shrink_with_dedupe,\n", " shrink=shrink6)),\n", " (\"Multi pass\", multicourse_shrink3)\n", " \n", "])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "So the answer is generally favourably but *ouch* that last one.\n", "\n", "What's happening there is that because later shrinks are opening up potentially very large improvements accessible to the lower shrinks, the original greedy algorithm can exploit that much better, while the multi pass algorithm spends a lot of time in the later stages with their incremental shrinks.\n", "\n", "Lets see another similar example before we try to fix this:" ] }, { "cell_type": "code", "execution_count": 43, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import hashlib\n", "\n", "conditions[\"Messy\"] = lambda xs: hashlib.md5(repr(xs).encode('utf-8')).hexdigest()[0] == '0'" ] }, { "cell_type": "code", "execution_count": 44, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", "\n", "
ConditionSingle passMulti pass
length >= 2 6 6
sum >= 500 35 35
sum >= 3 6 6
At least 10 by 5 107 73
10 distinct elements 623 131
First > Second 1481 1445
Size > max & 63 600 > 5000
Messy 1032 > 5000
" ], "text/plain": [ "" ] }, "execution_count": 44, "metadata": {}, "output_type": "execute_result" } ], "source": [ "compare_simplifiers([\n", " (\"Single pass\", partial(greedy_shrink_with_dedupe,\n", " shrink=shrink6)),\n", " (\"Multi pass\", multicourse_shrink3)\n", " \n", "])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This one is a bit different in that the problem is not that the structure is one we're ill suited to exploiting, it's that there is no structure at all so we have no hope of exploiting it. Literally any change at all will unlock earlier shrinks we could have done.\n", "\n", "What we're going to try to do is hybridize the two approaches. If we notice we're performing an awful lot of shrinks we can take that as a hint that we should be trying again from earlier stages.\n", "\n", "Here is our first approach. We simply restart the whole process every five shrinks:" ] }, { "cell_type": "code", "execution_count": 45, "metadata": { "collapsed": true }, "outputs": [], "source": [ "MAX_SHRINKS_PER_RUN = 2\n", "\n", "\n", "def multicourse_shrink4(ls, constraint):\n", " seen = set()\n", " while True:\n", " old_ls = ls\n", " shrinks_this_run = 0\n", " for shrink in shrinkers_for(ls):\n", " while shrinks_this_run < MAX_SHRINKS_PER_RUN:\n", " for s in shrink(ls):\n", " key = tuple(s)\n", " if key in seen:\n", " continue\n", " seen.add(key)\n", " if constraint(s):\n", " shrinks_this_run += 1\n", " ls = s\n", " break\n", " else:\n", " break\n", " if ls == old_ls:\n", " return ls" ] }, { "cell_type": "code", "execution_count": 46, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", "\n", "
ConditionSingle passMulti passMulti pass with restart
length >= 2 6 6 6
sum >= 500 35 35 35
sum >= 3 6 6 6
At least 10 by 5 107 73 90
10 distinct elements 623 131 396
First > Second 1481 1445 1463
Size > max & 63 600 > 5000 > 5000
Messy 1032 > 5000 1423
" ], "text/plain": [ "" ] }, "execution_count": 46, "metadata": {}, "output_type": "execute_result" } ], "source": [ "compare_simplifiers([\n", " (\"Single pass\", partial(greedy_shrink_with_dedupe,\n", " shrink=shrink6)),\n", " (\"Multi pass\", multicourse_shrink3),\n", " (\"Multi pass with restart\", multicourse_shrink4) \n", " \n", "])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "That works OK, but it's pretty unsatisfying as it loses us most of the benefits of the multi pass shrinking - we're now at most twice as good as the greedy one.\n", "\n", "So what we're going to do is bet on the multi pass working and then gradually degrade to the greedy algorithm as it fails to work." ] }, { "cell_type": "code", "execution_count": 47, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def multicourse_shrink5(ls, constraint):\n", " seen = set()\n", " max_shrinks_per_run = 10\n", " while True:\n", " shrinks_this_run = 0\n", " for shrink in shrinkers_for(ls):\n", " while shrinks_this_run < max_shrinks_per_run:\n", " for s in shrink(ls):\n", " key = tuple(s)\n", " if key in seen:\n", " continue\n", " seen.add(key)\n", " if constraint(s):\n", " shrinks_this_run += 1\n", " ls = s\n", " break\n", " else:\n", " break\n", " if max_shrinks_per_run > 1:\n", " max_shrinks_per_run -= 2\n", " if not shrinks_this_run:\n", " return ls" ] }, { "cell_type": "code", "execution_count": 48, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "✓ [5, 5, 5, 5, 5, 5, 5, 5, 5, 5]\n", "✗ [5]\n", "✗ [5, 5]\n", "✗ [5, 5, 5, 5]\n", "✓ [5, 5, 5, 5, 5, 5, 5, 5]\n", "✓ [5, 5, 5, 5, 5, 5, 5]\n", "✓ [5, 5, 5, 5, 5, 5]\n", "✗ [5, 5, 5, 5, 5]\n", "✓ [0, 0, 0, 0, 0, 0]\n", "✓ [0]\n", "✗ []\n", "\n", "5 shrinks with 10 function calls\n" ] } ], "source": [ "show_trace([5] * 10,\n", " lambda x: x and len(x) > max(x),\n", " multicourse_shrink5)" ] }, { "cell_type": "code", "execution_count": 49, "metadata": { "collapsed": false }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", " \n", "\n", "\n", "\n", "
ConditionSingle passMulti passMulti pass with restartMulti pass with variable restart
length >= 2 6 6 6 6
sum >= 500 35 35 35 35
sum >= 3 6 6 6 6
At least 10 by 5 107 73 90 73
10 distinct elements 623 131 396 212
First > Second 1481 1445 1463 1168
Size > max & 63 600 > 5000 > 5000 1002
Messy 1032 > 5000 1423 824
" ], "text/plain": [ "" ] }, "execution_count": 49, "metadata": {}, "output_type": "execute_result" } ], "source": [ "compare_simplifiers([\n", " (\"Single pass\", partial(greedy_shrink_with_dedupe,\n", " shrink=shrink6)),\n", " (\"Multi pass\", multicourse_shrink3), \n", " (\"Multi pass with restart\", multicourse_shrink4),\n", " (\"Multi pass with variable restart\", multicourse_shrink5) \n", "])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This is now more or less the current state of the art (it's actually a bit different from the Hypothesis state of the art at the time of this writing. I'm planning to merge some of the things I figured out in the course of writing this back in). We've got something that is able to adaptively take advantage of structure where it is present, but degrades reasonably gracefully back to the more aggressive version that works better in unstructured examples.\n", "\n", "Surprisingly, on some examples it seems to even be best of all of them. I think that's more coincidence than truth though." ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.4.3" } }, "nbformat": 4, "nbformat_minor": 0 } hypothesis-hypothesis-python-4.36.2/pytest.ini000066400000000000000000000001311354103617500215520ustar00rootroot00000000000000[pytest] addopts=--strict --tb=native -p pytester --runpytest=subprocess --durations=20 hypothesis-hypothesis-python-4.36.2/requirements/000077500000000000000000000000001354103617500222515ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/requirements/coverage.in000066400000000000000000000000471354103617500243750ustar00rootroot00000000000000coverage lark-parser numpy pandas pytz hypothesis-hypothesis-python-4.36.2/requirements/coverage.txt000066400000000000000000000004721354103617500246100ustar00rootroot00000000000000# # This file is autogenerated by pip-compile # To update, run: # # pip-compile --output-file=requirements/coverage.txt requirements/coverage.in # coverage==4.5.4 lark-parser==0.7.5 numpy==1.17.2 pandas==0.25.1 python-dateutil==2.8.0 # via pandas pytz==2019.2 six==1.12.0 # via python-dateutil hypothesis-hypothesis-python-4.36.2/requirements/py2.txt000066400000000000000000000014761354103617500235340ustar00rootroot00000000000000# # This file is no longer being updated, as several of # the packages below no longer support Python 2. # apipkg==1.5 # via execnet atomicwrites==1.3.0 # via pytest attrs==19.1.0 colorama==0.4.1 # via pytest execnet==1.6.0 # via pytest-xdist importlib-metadata==0.18 # via pluggy, pytest mock==3.0.5 more-itertools==5.0.0 numpy==1.16.4 # last compatible with Python 2 packaging==19.0 # via pytest pluggy==0.12.0 # via pytest py==1.8.0 # via pytest pyparsing==2.4.0 # via packaging pytest-forked==1.0.2 # via pytest-xdist pytest-xdist==1.29.0 pytest==4.6.4 six==1.12.0 # via mock, more-itertools, packaging, pytest-xdist wcwidth==0.1.7 # via pytest zipp==0.5.1 # via importlib-metadata hypothesis-hypothesis-python-4.36.2/requirements/test.in000066400000000000000000000000321354103617500235530ustar00rootroot00000000000000attrs pytest pytest-xdist hypothesis-hypothesis-python-4.36.2/requirements/test.txt000066400000000000000000000013651354103617500237760ustar00rootroot00000000000000# # This file is autogenerated by pip-compile # To update, run: # # pip-compile --output-file=requirements/test.txt requirements/test.in # apipkg==1.5 # via execnet atomicwrites==1.3.0 # via pytest attrs==19.1.0 execnet==1.7.1 # via pytest-xdist importlib-metadata==0.23 # via pluggy, pytest more-itertools==7.2.0 # via pytest, zipp packaging==19.1 # via pytest pluggy==0.13.0 # via pytest py==1.8.0 # via pytest pyparsing==2.4.2 # via packaging pytest-forked==1.0.2 # via pytest-xdist pytest-xdist==1.29.0 pytest==5.1.2 six==1.12.0 # via packaging, pytest-xdist wcwidth==0.1.7 # via pytest zipp==0.6.0 # via importlib-metadata hypothesis-hypothesis-python-4.36.2/requirements/tools.in000066400000000000000000000004041354103617500237370ustar00rootroot00000000000000attrs autoflake bandit black coverage django dpcontracts flake8 flake8-alfred flake8-docstrings ipython isort lark-parser mypy numpy pip-tools pylint pytest python-dateutil pyupgrade pyupio requests restructuredtext-lint sphinx sphinx-rtd-theme toml tox twine hypothesis-hypothesis-python-4.36.2/requirements/tools.txt000066400000000000000000000100701354103617500241500ustar00rootroot00000000000000# # This file is autogenerated by pip-compile # To update, run: # # pip-compile --output-file=requirements/tools.txt requirements/tools.in # alabaster==0.7.12 # via sphinx appdirs==1.4.3 # via black astroid==2.2.5 # via pylint atomicwrites==1.3.0 # via pytest attrs==19.1.0 autoflake==1.3.1 babel==2.7.0 # via sphinx backcall==0.1.0 # via ipython bandit==1.6.2 black==19.3b0 bleach==3.1.0 # via readme-renderer certifi==2019.9.11 # via requests chardet==3.0.4 # via requests click==7.0 # via black, pip-tools, pyupio, safety coverage==4.5.4 decorator==4.4.0 # via ipython, traitlets deprecated==1.2.6 # via pygithub django==2.2.5 docutils==0.15.2 # via readme-renderer, restructuredtext-lint, sphinx dparse==0.4.1 # via pyupio, safety dpcontracts==0.6.0 entrypoints==0.3 # via flake8 filelock==3.0.12 # via tox flake8-alfred==1.1.1 flake8-docstrings==1.4.0 flake8==3.7.8 gitdb2==2.0.5 # via gitpython gitpython==3.0.2 # via bandit idna==2.8 # via requests imagesize==1.1.0 # via sphinx importlib-metadata==0.23 # via pluggy, pytest, tox ipython-genutils==0.2.0 # via traitlets ipython==7.8.0 isort==4.3.21 jedi==0.15.1 # via ipython jinja2==2.10.1 # via pyupio, sphinx lark-parser==0.7.5 lazy-object-proxy==1.4.2 # via astroid markupsafe==1.1.1 # via jinja2 mccabe==0.6.1 # via flake8, pylint more-itertools==7.2.0 # via pytest, zipp mypy-extensions==0.4.1 # via mypy mypy==0.720 numpy==1.17.2 packaging==19.1 # via dparse, pytest, pyupio, safety, sphinx, tox parso==0.5.1 # via jedi pbr==5.4.3 # via stevedore pexpect==4.7.0 # via ipython pickleshare==0.7.5 # via ipython pip-tools==4.1.0 pkginfo==1.5.0.1 # via twine pluggy==0.13.0 # via pytest, tox prompt-toolkit==2.0.9 # via ipython ptyprocess==0.6.0 # via pexpect py==1.8.0 # via pytest, tox pycodestyle==2.5.0 # via flake8 pydocstyle==4.0.1 # via flake8-docstrings pyflakes==2.1.1 # via autoflake, flake8 pygithub==1.43.8 # via pyupio pygments==2.4.2 # via ipython, readme-renderer, sphinx pyjwt==1.7.1 # via pygithub pylint==2.3.1 pyparsing==2.4.2 # via packaging pytest==5.1.2 python-dateutil==2.8.0 python-gitlab==1.11.0 # via pyupio pytz==2019.2 # via babel, django pyupgrade==1.23.0 pyupio==1.0.2 pyyaml==5.1.2 # via bandit, dparse, pyupio readme-renderer==24.0 # via twine requests-toolbelt==0.9.1 # via twine requests==2.22.0 restructuredtext-lint==1.3.0 safety==1.8.5 # via pyupio six==1.12.0 # via astroid, bandit, bleach, dparse, packaging, pip-tools, prompt-toolkit, python-dateutil, python-gitlab, pyupio, readme-renderer, stevedore, tox, traitlets smmap2==2.0.5 # via gitdb2 snowballstemmer==1.9.1 # via pydocstyle, sphinx sphinx-rtd-theme==0.4.3 sphinx==2.2.0 sphinxcontrib-applehelp==1.0.1 # via sphinx sphinxcontrib-devhelp==1.0.1 # via sphinx sphinxcontrib-htmlhelp==1.0.2 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx sphinxcontrib-qthelp==1.0.2 # via sphinx sphinxcontrib-serializinghtml==1.1.3 # via sphinx sqlparse==0.3.0 # via django stevedore==1.31.0 # via bandit tokenize-rt==3.2.0 # via pyupgrade toml==0.10.0 tox==3.14.0 tqdm==4.35.0 # via pyupio, twine traitlets==4.3.2 # via ipython twine==1.14.0 typed-ast==1.4.0 # via astroid, mypy typing-extensions==3.7.4 # via mypy urllib3==1.25.3 # via requests virtualenv==16.7.5 # via tox wcwidth==0.1.7 # via prompt-toolkit, pytest webencodings==0.5.1 # via bleach wrapt==1.11.2 # via astroid, deprecated zipp==0.6.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: # setuptools==41.2.0 # via ipython, safety, sphinx, twine hypothesis-hypothesis-python-4.36.2/requirements/typing.in000066400000000000000000000000071354103617500241100ustar00rootroot00000000000000typing hypothesis-hypothesis-python-4.36.2/requirements/typing.txt000066400000000000000000000002421354103617500243220ustar00rootroot00000000000000# # This file is autogenerated by pip-compile # To update, run: # # pip-compile --output-file=requirements/typing.txt requirements/typing.in # typing==3.7.4.1 hypothesis-hypothesis-python-4.36.2/scripts/000077500000000000000000000000001354103617500212155ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/scripts/install.ps1000066400000000000000000000133351354103617500233150ustar00rootroot00000000000000# Sample script to install Python and pip under Windows # Authors: Olivier Grisel, Jonathan Helmus and Kyle Kastner # License: CC0 1.0 Universal: https://creativecommons.org/publicdomain/zero/1.0/ $MINICONDA_URL = "https://repo.continuum.io/miniconda/" $BASE_URL = "https://www.python.org/ftp/python/" $GET_PIP_URL = "https://bootstrap.pypa.io/get-pip.py" $GET_PIP_PATH = "C:\get-pip.py" function DownloadPython ($python_version, $platform_suffix) { $webclient = New-Object System.Net.WebClient $filename = "python-" + $python_version + $platform_suffix + ".msi" $url = $BASE_URL + $python_version + "/" + $filename $basedir = $pwd.Path + "\" $filepath = $basedir + $filename if (Test-Path $filename) { Write-Host "Reusing" $filepath return $filepath } # Download and retry up to 3 times in case of network transient errors. Write-Host "Downloading" $filename "from" $url $retry_attempts = 2 for($i=0; $i -lt $retry_attempts; $i++){ try { $webclient.DownloadFile($url, $filepath) break } Catch [Exception]{ Start-Sleep 1 } } if (Test-Path $filepath) { Write-Host "File saved at" $filepath } else { # Retry once to get the error message if any at the last try $webclient.DownloadFile($url, $filepath) } return $filepath } function InstallPython ($python_version, $architecture, $python_home) { Write-Host "Installing Python" $python_version "for" $architecture "bit architecture to" $python_home if (Test-Path $python_home) { Write-Host $python_home "already exists, skipping." return $false } if ($architecture -eq "32") { $platform_suffix = "" } else { $platform_suffix = ".amd64" } $msipath = DownloadPython $python_version $platform_suffix Write-Host "Installing" $msipath "to" $python_home $install_log = $python_home + ".log" $install_args = "/qn /log $install_log /i $msipath TARGETDIR=$python_home" $uninstall_args = "/qn /x $msipath" RunCommand "msiexec.exe" $install_args if (-not(Test-Path $python_home)) { Write-Host "Python seems to be installed else-where, reinstalling." RunCommand "msiexec.exe" $uninstall_args RunCommand "msiexec.exe" $install_args } if (Test-Path $python_home) { Write-Host "Python $python_version ($architecture) installation complete" } else { Write-Host "Failed to install Python in $python_home" Get-Content -Path $install_log Exit 1 } } function RunCommand ($command, $command_args) { Write-Host $command $command_args Start-Process -FilePath $command -ArgumentList $command_args -Wait -Passthru } function InstallPip ($python_home) { $pip_path = $python_home + "\Scripts\pip.exe" $python_path = $python_home + "\python.exe" if (-not(Test-Path $pip_path)) { Write-Host "Installing pip..." $webclient = New-Object System.Net.WebClient $webclient.DownloadFile($GET_PIP_URL, $GET_PIP_PATH) Write-Host "Executing:" $python_path $GET_PIP_PATH Start-Process -FilePath "$python_path" -ArgumentList "$GET_PIP_PATH" -Wait -Passthru } else { Write-Host "pip already installed." } } function DownloadMiniconda ($python_version, $platform_suffix) { $webclient = New-Object System.Net.WebClient $filename = "Miniconda-3.5.5-Windows-" + $platform_suffix + ".exe" $url = $MINICONDA_URL + $filename $basedir = $pwd.Path + "\" $filepath = $basedir + $filename if (Test-Path $filename) { Write-Host "Reusing" $filepath return $filepath } # Download and retry up to 3 times in case of network transient errors. Write-Host "Downloading" $filename "from" $url $retry_attempts = 2 for($i=0; $i -lt $retry_attempts; $i++){ try { $webclient.DownloadFile($url, $filepath) break } Catch [Exception]{ Start-Sleep 1 } } if (Test-Path $filepath) { Write-Host "File saved at" $filepath } else { # Retry once to get the error message if any at the last try $webclient.DownloadFile($url, $filepath) } return $filepath } function InstallMiniconda ($python_version, $architecture, $python_home) { Write-Host "Installing Python" $python_version "for" $architecture "bit architecture to" $python_home if (Test-Path $python_home) { Write-Host $python_home "already exists, skipping." return $false } if ($architecture -eq "32") { $platform_suffix = "x86" } else { $platform_suffix = "x86_64" } $filepath = DownloadMiniconda $python_version $platform_suffix Write-Host "Installing" $filepath "to" $python_home $install_log = $python_home + ".log" $args = "/S /D=$python_home" Write-Host $filepath $args Start-Process -FilePath $filepath -ArgumentList $args -Wait -Passthru if (Test-Path $python_home) { Write-Host "Python $python_version ($architecture) installation complete" } else { Write-Host "Failed to install Python in $python_home" Get-Content -Path $install_log Exit 1 } } function InstallMinicondaPip ($python_home) { $pip_path = $python_home + "\Scripts\pip.exe" $conda_path = $python_home + "\Scripts\conda.exe" if (-not(Test-Path $pip_path)) { Write-Host "Installing pip..." $args = "install --yes pip" Write-Host $conda_path $args Start-Process -FilePath "$conda_path" -ArgumentList $args -Wait -Passthru } else { Write-Host "pip already installed." } } function main () { InstallPython $env:PYTHON_VERSION $env:PYTHON_ARCH $env:PYTHON InstallPip $env:PYTHON } main hypothesis-hypothesis-python-4.36.2/scripts/run_with_env.cmd000066400000000000000000000034641354103617500244200ustar00rootroot00000000000000:: To build extensions for 64 bit Python 3, we need to configure environment :: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of: :: MS Windows SDK for Windows 7 and .NET Framework 4 (SDK v7.1) :: :: To build extensions for 64 bit Python 2, we need to configure environment :: variables to use the MSVC 2008 C++ compilers from GRMSDKX_EN_DVD.iso of: :: MS Windows SDK for Windows 7 and .NET Framework 3.5 (SDK v7.0) :: :: 32 bit builds do not require specific environment configurations. :: :: Note: this script needs to be run with the /E:ON and /V:ON flags for the :: cmd interpreter, at least for (SDK v7.0) :: :: More details at: :: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows :: https://stackoverflow.com/a/13751649/163740 :: :: Author: Olivier Grisel :: License: CC0 1.0 Universal: https://creativecommons.org/publicdomain/zero/1.0/ @ECHO OFF SET COMMAND_TO_RUN=%* SET WIN_SDK_ROOT=C:\Program Files\Microsoft SDKs\Windows SET MAJOR_PYTHON_VERSION="%PYTHON_VERSION:~0,1%" IF %MAJOR_PYTHON_VERSION% == "2" ( SET WINDOWS_SDK_VERSION="v7.0" ) ELSE IF %MAJOR_PYTHON_VERSION% == "3" ( SET WINDOWS_SDK_VERSION="v7.1" ) ELSE ( ECHO Unsupported Python version: "%MAJOR_PYTHON_VERSION%" EXIT 1 ) IF "%PYTHON_ARCH%"=="64" ( ECHO Configuring Windows SDK %WINDOWS_SDK_VERSION% for Python %MAJOR_PYTHON_VERSION% on a 64 bit architecture SET DISTUTILS_USE_SDK=1 SET MSSdk=1 "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Setup\WindowsSdkVer.exe" -q -version:%WINDOWS_SDK_VERSION% "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Bin\SetEnv.cmd" /x64 /release ECHO Executing: %COMMAND_TO_RUN% call %COMMAND_TO_RUN% || EXIT 1 ) ELSE ( ECHO Using default MSVC build environment for 32 bit architecture ECHO Executing: %COMMAND_TO_RUN% call %COMMAND_TO_RUN% || EXIT 1 ) hypothesis-hypothesis-python-4.36.2/secrets.tar.enc000066400000000000000000000240201354103617500224500ustar00rootroot00000000000000vV^pD4hL"Ѷnڲ+Csj:4T*8Hb{;=zGA O|uz:,S+/Xv1w ~+H:jقs%!J! E#pKxo0;_m Й XF*Y.n{2O,U%gt\5PM#-Wiht|C:Yvk[ Aظ%7=ј:Wjlk4D gߗ$M\74U|>YZwn;SE.ECH&`Br+@}ox4OX TG7b K#?Ay ~w&gc{#\Ր05TQP g7[#VKvp#:TftMd$ ]>JGq!:~TDB9xaf3ƀo;6.~ T>]/XOF*6o_!FAK%AMke]&%tʏ=+Uwq{\gwsHHSNaB̩ef^_t110%+تӻsN x" 4)EŴU.xzҢp%sr2{zz N'qP9}׵wU{EpG.Q@z7in-Yn9$%rHҲFtYv% -l)to|) ٩Gm"&=LWTgZ}~׭4+ZYP`Av)B e7ޤFIRG(m󗏪8YElT\ 9?g12PS \ (E:{MV_W+(F%m"e8Ү^4h?$kށoJu}H-E9,ёaӼN l4Z/PJD=% THT X˯EgchGf@M,AM@N*ԘE6i+/\Uk,;(>>F0 -oXth'jW%Ҍ{0"IM~Bj&SnoJ붷LC׿EvR .DEl:/̕n<ق6Je߽-kH bR ɮ3I@mD\Ys/uNW%kQߜRfҮ]:آ$ͷZ%eacV& Gm5Ax&ќf]uW[a͑V[->o|r ] 9 \3rutAW[l ˽M:bJ|PrclLҳ9O{j8%2 {\h&eniZ|| mϭ%ͱ1zĭCJ'DcPq&rJ=FſNvdUmC`pfe?yWή5@/XRadHv8gN܂Gh1iEJE&D, /!,dqbYukTyR3l[KHz(," n^]vcl)Ttf+\?#|Z~7u ؔAZ=T:Yx%%nsc*5q*y(AG@mLoѧk ܝ}h 5*ύhumk<3TӃ)l|=<*V][ [oiڎV y>MD j{VAxFvT߰B)oԍ5EyΉzm׸k'? +odZcMmR!iAqmý5(=zؤP}[ %KoA0D$DV{Z‹ƀ3#܊0|5P~NxAQz݇pyT%80YUxv{6` {#=&>g)C 9~AMgd]tpqd&s"j/V(~btݺQFO 1~_zo+c!Y+ϫbs -^\Vx>mLD?a Y8U8dEۮ7z9#_o{IH%! 'مGQĤ?FidTR= Hp=qo$9wr_|]RKdR 3#>k0 ޖڝ涆Ӌ!łaSclM*W^VQ1ƞv]D!as׭Ap\b'NCe)Ljz&O7OmG/*8_*XYLh<-/x8TvXLCr 1qZ@Ҕv5BZtXtۨ\Dci]/ZfOBՄ\"} _R{ĦP3.Tz}S֪ȓochr;ƽuKi/ OjqBd/?qRDKׁ;FqC5C1, `;aeX h[ kU% Ps,y:YC\duI4{H9b"FМ383Y3z&۲AZ8נg#BkmCÒ57Bjq%lTo?]yjssj>B0,~!yn.tU"@"B[6r跲}B'Ʋ*&AGY`U &s > ~?'1B 0& &Ck]?֕tu,<]NVMϐW(sǑ>N/M C ,a_d# >C DžֽČSCow|JbIƨWJhޣ'(HrD=J_U[mƽ ̍AJSuiJD`3g #ސ!5tpe;g) 9nG7( /&__ZkɁpF:8ua'N8W甊}qlەACvJ:KTAbl (Uhy=j. ̿higԥ6Lt:GpՀsw{xr6YU|8|Z˧'U: [ͩDrDV/</埵 0QYUnz~m+@PXb}PC)&pW7[Zen1GPfQA0CH = ; w. GRm^Yppom^LM_8vfJ0pKCsE)XBA$X]&C]q>X&#< X羓ɥmQZ_]lIF+dT4>iaSF4̑zRNS!ǣi5 KZUga /w'.g +$܂poAΌSy<.4pנFk^)Hk{'cK5G$B lG]eGJdm~@ }qyX3HĻ|H٠(Q{QD_St5-;W 1}_0h}V*f=kxfuTTR~r'XL(vS%ܿ%I M ےc>gEˏe"ךAE5ɽ,+1FU+,^$o u[! 7@&F#zN@t.JA[$zjȚ2fZhٗ3q@ElA0VjE8<]lg!:q1[Lb*11A8SYkE%M$F St+@0X$zɪ86ă"X[6qY>9w{T-8b*S$aMR@$Q-'_eAr]2FB1q^BQg"[K$xh@{m4-<2 tZ3i@!^<Ÿ3{I˧pnD/y ޭ#Gk& (]wUFƁBSwjK27k1 ,hsMU谼P_JR㱐k@\3›2zjxԩ>ѕC鼩lVJѾt yd bﮘlb fN҇R wTv)s=;OB!P\ĊM\/ t#]AT@D嚵 :Dک>LZ~A%XsJp}|Չo-|`W4պi r{6J֋b;<"%eHAUwq9k>ZO{~Η%d'GM"lji3i@߃tFY: ɛݓ$٨ެt" !i4Zx9}qL ߁E`Fi0% bL ,1Td3dLC*!uH/KP sC ʦԧtq ZxSX}1OȢ{w.}:ICe f鈅%d$?pa\ 9%pߍ݈a:S:PsV_ ݪN8kO>פ^P,&F60Gm?Wg#@M BFhe52qޅXYânϖ_U۟)Rﳲ_($wj q)dJ?ocA9(C&CGkk:~i9[Is!6?lN~ m܊- ѩLbwL.!8ֿuԇYqL1`4L or8IٸSkz/̄[O,D`U ' :O\oXY^9">halӐLIJZp\Z PD,,#J^F,s4Z_?l0pV=/"/A ,gϴ93 ~9MHv[f-JƲ^oTIP]6wmw")4܄ :XK^1Sa:Bw@,c? @R&K˪0Q 吰2lv`nozO[2Z' *R?-puJ^/Z Odʐ3i.E!PB1(UX.JxyWX晶,2}ZˊW 0`8Ah } K>q"I΄VM857l`@9XD.׆Z4չfmc۾5ŁաW6`M@0 kSfE~@͹l'5PÞɝO2k\z@s7iQ~<4Up9e_>9"5^bMo.1)$Fdlִu@]`-B^i5F(`a^q[ Q݃W$ϋÿA! S̺dhtQYqcmOFCoXL9ID+Ӱ.sc?ݕL7CYh~/ݎ~λBـ֯WxCĽTBxtUW&0X(JfI2xMjF0.'1!Ih t] ^W,3"4>}Fȯy w*yrNzu͓S`xHxʗQ[-atOwWnzGNG 9uz/YE캕UVؠا:ԗ(q5:oJϩD }^.U3=޺P8$Vc > J&UJi*!$L˷fc\H);p!4YPN{1!cSU(0*`iLµw_x1$t!  Aq C J8_d.KWN:u2~j%V&sAxݎy95nɻ/&"Dbi,SQv[P |cn"o*gs؎ES_]o"{Nǎl|APeu}6 ,:tBWmjg+Jgq0(5.o־,7~"M`j*%X$;4vR |wɊdw*@BQ gf-,B4"QU ?j53jT^QMq[>MMI,ݣ0/ &Pm' m{Eb;D-nym9^Y9er :O=R^rl*,Z7U8 Ȭ (3x'lX eNqD^-N-Mp9l`0k5 7 GGBͭL43]휗VN<58pEF4Z979*ՆƂ C!Hq6 PՅ`?̹tC`1]=`E,SѠGfJq6=hypothesis-hypothesis-python-4.36.2/tooling/000077500000000000000000000000001354103617500212015ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/tooling/README.rst000066400000000000000000000003511354103617500226670ustar00rootroot00000000000000======================== Hypothesis Build Tooling ======================== This is a piece of software for managing Hypothesis's build tasks, releases, etc. It's very Hypothesis specific, though it may become less so in the future. hypothesis-hypothesis-python-4.36.2/tooling/scripts/000077500000000000000000000000001354103617500226705ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/tooling/scripts/common.sh000066400000000000000000000020501354103617500245110ustar00rootroot00000000000000#!/usr/bin/env bash # This file is not really a script but is intended for sourcing from other # scripts so that they can share a common set of paths conveniently. set -o errexit set -o nounset HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" ROOT="$(git -C "$HERE" rev-parse --show-toplevel)" export ROOT export BUILD_RUNTIMES=${BUILD_RUNTIMES:-$HOME/.cache/hypothesis-build-runtimes} export BASE="$BUILD_RUNTIMES" export PYENV="$BASE/pyenv" export SNAKEPIT="$BASE/python-versions/" # Note: Deliberately ignoring BUILD_RUNTIMES configuration because we don't # want this to go in cache, because it takes up a huge amount of space and # slows everything down! export VIRTUALENVS="${TMPDIR:-/tmp}/.hypothesis-runtimes/virtualenvs/" export RBENV_VERSION="2.5.1" export RBENV_ROOT="$BASE/.rbenv" export INSTALLED_RUBY_DIR="$RBENV_ROOT/versions/$RBENV_VERSION/" export GEM_HOME="$INSTALLED_RUBY_DIR" export GEM_PATH="$GEM_HOME" export PATH="$INSTALLED_RUBY_DIR/bin:$HOME/.cargo/bin:$PATH" pythonloc() { VERSION="$1" echo "$SNAKEPIT/$VERSION" } hypothesis-hypothesis-python-4.36.2/tooling/scripts/ensure-python.sh000077500000000000000000000040411354103617500260460ustar00rootroot00000000000000#!/usr/bin/env bash set -o errexit set -o nounset set -x HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # shellcheck source=tooling/scripts/common.sh source "$HERE/common.sh" # This is to guard against multiple builds in parallel. The various installers will tend # to stomp all over each other if you do this and they haven't previously successfully # succeeded. We use a lock file to block progress so only one install runs at a time. # This script should be pretty fast once files are cached, so the loss of concurrency # is not a major problem. # This should be using the lockfile command, but that's not available on the # containerized travis and we can't install it without sudo. # It is unclear if this is actually useful. I was seeing behaviour that suggested # concurrent runs of the installer, but I can't seem to find any evidence of this lock # ever not being acquired. VERSION="$1" TARGET=$(pythonloc "$VERSION") if [ ! -e "$TARGET/bin/python" ] ; then mkdir -p "$BASE" LOCKFILE="$BASE/.install-lockfile" while true; do if mkdir "$LOCKFILE" 2>/dev/null; then echo "Successfully acquired installer." break else echo "Failed to acquire lock. Is another installer running? Waiting a bit." fi sleep $(( ( RANDOM % 10 ) + 1 )).$(( RANDOM % 100 ))s if (( $(date '+%s') > 300 + $(stat --format=%X "$LOCKFILE") )); then echo "We've waited long enough" rm -rf "$LOCKFILE" fi done trap 'rm -rf $LOCKFILE' EXIT if [ ! -d "$PYENV/.git" ]; then rm -rf "$PYENV" git clone https://github.com/yyuu/pyenv.git "$PYENV" else back=$PWD cd "$PYENV" git fetch || echo "Update failed to complete. Ignoring" git reset --hard origin/master cd "$back" fi for _ in $(seq 5); do if "$BASE/pyenv/plugins/python-build/bin/python-build" "$VERSION" "$TARGET" ; then exit 0 fi echo "Command failed. Retrying..." sleep $(( ( RANDOM % 10 ) + 1 )).$(( RANDOM % 100 ))s done fi hypothesis-hypothesis-python-4.36.2/tooling/scripts/ensure-rustup.sh000077500000000000000000000004131354103617500260660ustar00rootroot00000000000000#!/usr/bin/env bash set -o errexit set -o nounset if ! command -v rustup > /dev/null ; then curl https://sh.rustup.rs -sSf | sh -s -- -y fi if ! rustup show | grep stable > /dev/null ; then rustup install stable fi rustup default stable rustup update stable hypothesis-hypothesis-python-4.36.2/tooling/scripts/install-python.sh000077500000000000000000000054571354103617500262270ustar00rootroot00000000000000#!/usr/bin/env bash # Special license: Take literally anything you want out of this file. I don't # care. Consider it WTFPL licensed if you like. # Basically there's a lot of suffering encoded here that I don't want you to # have to go through and you should feel free to use this to avoid some of # that suffering in advance. set -e set -x # OS X seems to have some weird Localse problems on Travis. This attempts to set # the Locale to known good ones during install env | grep UTF # This is to guard against multiple builds in parallel. The various installers will tend # to stomp all over each other if you do this and they haven't previously successfully # succeeded. We use a lock file to block progress so only one install runs at a time. # This script should be pretty fast once files are cached, so the lost of concurrency # is not a major problem. # This should be using the lockfile command, but that's not available on the # containerized travis and we can't install it without sudo. # Is is unclear if this is actually useful. I was seeing behaviour that suggested # concurrent runs of the installer, but I can't seem to find any evidence of this lock # ever not being acquired. BASE=${BUILD_RUNTIMES-$PWD/.runtimes} mkdir -p "$BASE" LOCKFILE="$BASE/.install-lockfile" while true; do if mkdir "$LOCKFILE" 2>/dev/null; then echo "Successfully acquired installer." break else echo "Failed to acquire lock. Is another installer running? Waiting a bit." fi sleep $(( ( RANDOM % 10 ) + 1 )).$(( RANDOM % 100 ))s if (( $(date '+%s') > 300 + $(stat --format=%X "$LOCKFILE") )); then echo "We've waited long enough" rm -rf "$LOCKFILE" fi done trap 'rm -rf $LOCKFILE' EXIT PYENV=$BASE/pyenv if [ ! -d "$PYENV/.git" ]; then rm -rf "$PYENV" git clone https://github.com/yyuu/pyenv.git "$BASE/pyenv" else back=$PWD cd "$PYENV" git fetch || echo "Update failed to complete. Ignoring" git reset --hard origin/master cd "$back" fi SNAKEPIT=$BASE/snakepit install () { VERSION="$1" ALIAS="$2" mkdir -p "$BASE/versions" SOURCE=$BASE/versions/$ALIAS if [ ! -e "$SOURCE" ]; then mkdir -p "$SNAKEPIT" mkdir -p "$BASE/versions" "$BASE/pyenv/plugins/python-build/bin/python-build" "$VERSION" "$SOURCE" fi rm -f "$SNAKEPIT/$ALIAS" mkdir -p "$SNAKEPIT" "$SOURCE/bin/python" -m pip.__main__ install --upgrade pip wheel virtualenv ln -s "$SOURCE/bin/python" "$SNAKEPIT/$ALIAS" } for var in "$@"; do case "${var}" in 2.7) install 2.7.15 python2.7 ;; 2.7.3) install 2.7.3 python2.7.3 ;; 3.5) install 3.5.6 python3.5 ;; 3.6) install 3.6.7 python3.6 ;; 3.7) install 3.7.1 python3.7 ;; pypy) install pypy2.7-5.8.0 pypy ;; pypy3) install pypy3.5-5.10.1 pypy3 ;; esac done hypothesis-hypothesis-python-4.36.2/tooling/scripts/tool-hash.py000077500000000000000000000015261354103617500251470ustar00rootroot00000000000000#!/usr/bin/env python # coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import hashlib import sys if __name__ == "__main__": print(hashlib.sha1(sys.stdin.read().encode("utf-8")).hexdigest()[:10]) hypothesis-hypothesis-python-4.36.2/tooling/setup.py000066400000000000000000000026171354103617500227210ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import setuptools def local_file(name): return os.path.relpath(os.path.join(os.path.dirname(__file__), name)) SOURCE = local_file("src") README = local_file("README.rst") setuptools.setup( name="hypothesis-tooling", # We don't actually ship this, it just has a setup.py for convenience. version="0.0.0", author="David R. MacIver", author_email="david@drmaciver.com", packages=setuptools.find_packages(SOURCE), package_dir={"": SOURCE}, url=("https://github.com/HypothesisWorks/hypothesis-python/tree/master/tooling"), license="MPL v2", description="A library for property based testing", python_requires=">=3.6", long_description=open(README).read(), ) hypothesis-hypothesis-python-4.36.2/tooling/src/000077500000000000000000000000001354103617500217705ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/tooling/src/hypothesistooling/000077500000000000000000000000001354103617500255635ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/tooling/src/hypothesistooling/__init__.py000066400000000000000000000140671354103617500277040ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import shlex import subprocess def current_branch(): return ( subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]) .decode("ascii") .strip() ) def tags(): result = [ t.decode("ascii") for t in subprocess.check_output(["git", "tag"]).split(b"\n") ] assert len(set(result)) == len(result) return set(result) ROOT = ( subprocess.check_output( ["git", "-C", os.path.dirname(__file__), "rev-parse", "--show-toplevel"] ) .decode("ascii") .strip() ) REPO_TESTS = os.path.join(ROOT, "whole-repo-tests") PYUP_FILE = os.path.join(ROOT, ".pyup.yml") def hash_for_name(name): return subprocess.check_output(["git", "rev-parse", name]).decode("ascii").strip() def is_ancestor(a, b): check = subprocess.call(["git", "merge-base", "--is-ancestor", a, b]) assert 0 <= check <= 1 return check == 0 def merge_base(a, b): return subprocess.check_output(["git", "merge-base", a, b]).strip() def point_of_divergence(): return merge_base("HEAD", "origin/master") def has_changes(files): command = [ "git", "diff", "--no-patch", "--exit-code", point_of_divergence(), "HEAD", "--", *files, ] return subprocess.call(command) != 0 def has_uncommitted_changes(filename): return subprocess.call(["git", "diff", "--exit-code", filename]) != 0 def last_committer(): out, _ = subprocess.Popen( ["git", "log", "-1", "--pretty=format:%an"], stdout=subprocess.PIPE, universal_newlines=True, ).communicate() return out def git(*args): subprocess.check_call(("git",) + args) TOOLING_COMMITER_NAME = "Travis CI on behalf of David R. MacIver" def configure_git(): git("config", "user.name", TOOLING_COMMITER_NAME) git("config", "user.email", "david@drmaciver.com") git("config", "core.sshCommand", "ssh -i deploy_key") git("remote", "add", "ssh-origin", "git@github.com:HypothesisWorks/hypothesis.git") def create_tag(tagname): assert tagname not in tags() git("tag", tagname) def push_tag(tagname): assert_can_release() subprocess.check_call( [ "ssh-agent", "sh", "-c", "ssh-add %s && " % (shlex.quote(DEPLOY_KEY),) + "git push ssh-origin HEAD:master &&" + "git push ssh-origin %s" % (shlex.quote(tagname),), ] ) def assert_can_release(): assert not IS_PULL_REQUEST, "Cannot release from pull requests" assert has_travis_secrets(), "Cannot release without travis secure vars" def has_travis_secrets(): return os.environ.get("TRAVIS_SECURE_ENV_VARS", None) == "true" def modified_files(): files = set() for command in [ [ "git", "diff", "--name-only", "--diff-filter=d", point_of_divergence(), "HEAD", ], ["git", "diff", "--name-only"], ]: diff_output = subprocess.check_output(command).decode("ascii") for l in diff_output.split("\n"): filepath = l.strip() if filepath: assert os.path.exists(filepath), filepath files.add(filepath) return files def all_files(): return [ f for f in subprocess.check_output(["git", "ls-files"]) .decode("ascii") .splitlines() if os.path.exists(f) ] def changed_files_from_master(): """Returns a list of files which have changed between a branch and master.""" files = set() command = ["git", "diff", "--name-only", "HEAD", "master"] diff_output = subprocess.check_output(command).decode("ascii") for line in diff_output.splitlines(): filepath = line.strip() if filepath: files.add(filepath) return files SECRETS_BASE = os.path.join(ROOT, "secrets") SECRETS_TAR = SECRETS_BASE + ".tar" ENCRYPTED_SECRETS = SECRETS_TAR + ".enc" SECRETS = os.path.join(ROOT, "secrets") DEPLOY_KEY = os.path.join(SECRETS, "deploy_key") PYPIRC = os.path.join(SECRETS, ".pypirc") RUBYGEMS_API_KEY = os.path.join(SECRETS, "api_key.yaml") CARGO_API_KEY = os.path.join(SECRETS, "cargo-credentials") SECRET_FILES = [DEPLOY_KEY, PYPIRC, RUBYGEMS_API_KEY, CARGO_API_KEY] def decrypt_secrets(): subprocess.check_call( [ "openssl", "aes-256-cbc", "-K", os.environ["encrypted_b8618e5d043b_key"], "-iv", os.environ["encrypted_b8618e5d043b_iv"], "-in", ENCRYPTED_SECRETS, "-out", SECRETS_TAR, "-d", ] ) subprocess.check_call(["tar", "-xvf", SECRETS_TAR], cwd=ROOT) missing_files = [os.path.basename(f) for f in SECRET_FILES if not os.path.exists(f)] assert not missing_files, missing_files os.chmod(DEPLOY_KEY, int("0600", 8)) IS_TRAVIS_PULL_REQUEST = os.environ.get("TRAVIS_EVENT_TYPE") == "pull_request" IS_CIRCLE_PULL_REQUEST = ( os.environ.get("CIRCLE_BRANCH") == "master" and os.environ.get("CI_PULL_REQUESTS", "") != "" ) IS_PULL_REQUEST = IS_TRAVIS_PULL_REQUEST or IS_CIRCLE_PULL_REQUEST def all_projects(): import hypothesistooling.projects.conjecturerust as cr import hypothesistooling.projects.hypothesispython as hp import hypothesistooling.projects.hypothesisruby as hr return [cr, hp, hr] hypothesis-hypothesis-python-4.36.2/tooling/src/hypothesistooling/__main__.py000066400000000000000000000330231354103617500276560ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import shlex import subprocess import sys from datetime import datetime from glob import glob import hypothesistooling as tools import hypothesistooling.installers as install import hypothesistooling.projects.conjecturerust as cr import hypothesistooling.projects.hypothesispython as hp import hypothesistooling.projects.hypothesisruby as hr import hypothesistooling.releasemanagement as rm from hypothesistooling.scripts import pip_tool TASKS = {} BUILD_FILES = tuple( os.path.join(tools.ROOT, f) for f in ["tooling", "requirements", ".travis.yml", "hypothesis-python/tox.ini"] ) def task(if_changed=()): if isinstance(if_changed, str): if_changed = (if_changed,) def accept(fn): def wrapped(*args, **kwargs): if if_changed and tools.IS_PULL_REQUEST: if not tools.has_changes(if_changed + BUILD_FILES): print( "Skipping task due to no changes in %s" % (", ".join(if_changed),) ) return fn(*args, **kwargs) wrapped.__name__ = fn.__name__ name = fn.__name__.replace("_", "-") if name != "": TASKS[name] = wrapped return wrapped return accept @task() def check_installed(): """No-op task that can be used to test for a successful install (so we don't fail to run if a previous install failed midway).""" @task() def lint(): pip_tool( "flake8", *[f for f in tools.all_files() if f.endswith(".py")], "--config", os.path.join(tools.ROOT, ".flake8"), ) # Check for redefined test functions, where e.g. a copy-pasted definition # will shadow the earlier test and Pytest never sees or executes it. pip_tool( "pylint", "--score=n", "--jobs=0", "--disable=all", "--enable=function-redefined", "hypothesis-python/tests/", ) HEAD = tools.hash_for_name("HEAD") MASTER = tools.hash_for_name("origin/master") def do_release(package): if not package.has_release(): print("No release for %s" % (package.__name__,)) return os.chdir(package.BASE_DIR) print("Updating changelog and version") package.update_changelog_and_version() print("Committing changes") rm.commit_pending_release(package) print("Building distribution") package.build_distribution() print("Looks good to release!") tag_name = package.tag_name() print("Creating tag %s" % (tag_name,)) tools.create_tag(tag_name) tools.push_tag(tag_name) print("Uploading distribution") package.upload_distribution() @task() def deploy(): print("Current head: ", HEAD) print("Current master:", MASTER) if not tools.is_ancestor(HEAD, MASTER): print("Not deploying due to not being on master") sys.exit(0) if not tools.has_travis_secrets(): print("Running without access to secure variables, so no deployment") sys.exit(0) print("Decrypting secrets") tools.decrypt_secrets() tools.configure_git() for project in tools.all_projects(): do_release(project) sys.exit(0) CURRENT_YEAR = datetime.utcnow().year HEADER = """ # coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-%(year)s David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER""".strip() % { "year": CURRENT_YEAR } @task() def format(): def should_format_file(path): if "vendor" in path.split(os.path.sep): return False return path.endswith(".py") changed = tools.modified_files() format_all = os.environ.get("FORMAT_ALL", "").lower() == "true" if "requirements/tools.txt" in changed: # We've changed the tools, which includes a lot of our formatting # logic, so we need to rerun formatters. format_all = True files = tools.all_files() if format_all else changed files_to_format = [f for f in sorted(files) if should_format_file(f)] if not files_to_format: return for f in files_to_format: lines = [] with open(f, encoding="utf-8") as o: shebang = None first = True header_done = False for l in o.readlines(): if first: first = False if l[:2] == "#!": shebang = l continue if "END HEADER" in l and not header_done: lines = [] header_done = True else: lines.append(l) source = "".join(lines).strip() with open(f, "w", encoding="utf-8") as o: if shebang is not None: o.write(shebang) o.write("\n") o.write(HEADER) if source: o.write("\n\n") o.write(source) o.write("\n") pip_tool( "autoflake", "--recursive", "--in-place", "--exclude=compat.py", "--remove-all-unused-imports", "--remove-duplicate-keys", "--remove-unused-variables", *files_to_format, ) pip_tool("pyupgrade", "--keep-percent-format", *files_to_format) pip_tool("isort", *files_to_format) pip_tool("black", *files_to_format) VALID_STARTS = ("# coding=utf-8", "#!/usr/bin/env python") @task() def check_format(): format() n = max(map(len, VALID_STARTS)) bad = False for f in tools.all_files(): if not f.endswith(".py"): continue with open(f, "r", encoding="utf-8") as i: start = i.read(n) if not any(start.startswith(s) for s in VALID_STARTS): print("%s has incorrect start %r" % (f, start), file=sys.stderr) bad = True assert not bad check_not_changed() def check_not_changed(): subprocess.check_call(["git", "diff", "--exit-code"]) @task() def compile_requirements(upgrade=False): if upgrade: extra = ["--upgrade"] else: extra = [] for f in glob(os.path.join("requirements", "*.in")): base, _ = os.path.splitext(f) pip_tool( "pip-compile", *extra, f, "--output-file", base + ".txt", cwd=tools.ROOT ) @task() def upgrade_requirements(): compile_requirements(upgrade=True) def is_pyup_branch(): if os.environ.get("TRAVIS_EVENT_TYPE") == "pull_request" and os.environ.get( "TRAVIS_PULL_REQUEST_BRANCH", "" ).startswith("pyup-scheduled-update"): return True return ( os.environ.get("Build.SourceBranchName", "").startswith("pyup-scheduled-update") and os.environ.get("System.PullRequest.IsFork") == "False" and os.environ.get("Build.Reason") == "PullRequest" ) def push_pyup_requirements_commit(): """Because pyup updates each package individually, it can create a requirements.txt with an incompatible set of versions. Depending on the changes, pyup might also have introduced whitespace errors. If we've recompiled requirements.txt in Travis and made changes, and this is a PR where pyup is running, push a consistent set of versions as a new commit to the PR. """ if is_pyup_branch(): print("Pushing new requirements, as this is a pyup pull request") print("Decrypting secrets") tools.decrypt_secrets() tools.configure_git() print("Creating commit") tools.git("add", "--update", "requirements") tools.git("commit", "-m", "Bump requirements for pyup pull request") print("Pushing to GitHub") subprocess.check_call( [ "ssh-agent", "sh", "-c", "ssh-add %s && " % (shlex.quote(tools.DEPLOY_KEY),) + "git push ssh-origin HEAD:%s" % (os.environ["TRAVIS_PULL_REQUEST_BRANCH"],), ] ) @task() def check_requirements(): if is_pyup_branch() and tools.last_committer() != tools.TOOLING_COMMITER_NAME: # Recompile to fix broken formatting etc., but ensure there can't be a loop. compile_requirements(upgrade=True) if tools.has_uncommitted_changes("requirements"): push_pyup_requirements_commit() raise RuntimeError("Pushed new requirements; check next build.") else: compile_requirements(upgrade=False) @task(if_changed=hp.HYPOTHESIS_PYTHON) def documentation(): try: if hp.has_release(): hp.update_changelog_and_version() hp.build_docs() finally: subprocess.check_call( ["git", "checkout", "docs/changes.rst", "src/hypothesis/version.py"], cwd=hp.HYPOTHESIS_PYTHON, ) def run_tox(task, version): python = install.python_executable(version) # Create a version of the name that tox will pick up for the correct # interpreter alias. linked_version = os.path.basename(python) + ALIASES[version] try: os.symlink(python, linked_version) except FileExistsError: pass env = dict(os.environ) python = install.python_executable(version) env["PATH"] = os.path.dirname(python) + ":" + env["PATH"] print(env["PATH"]) pip_tool("tox", "-e", task, env=env, cwd=hp.HYPOTHESIS_PYTHON) PY27 = "2.7.14" PY35 = "3.5.5" PY36 = "3.6.8" PY37 = "3.7.0" PYPY2 = "pypy2.7-5.10.0" PYPY3 = "pypy3.5-5.10.1" @task() def install_core(): install.python_executable(PY27) install.python_executable(PY36) ALIASES = {PYPY2: "pypy", PYPY3: "pypy3"} for n in [PY27, PY35, PY36, PY37]: major, minor, patch = n.split(".") ALIASES[n] = "python%s.%s" % (major, minor) python_tests = task( if_changed=( hp.PYTHON_SRC, hp.PYTHON_TESTS, os.path.join(hp.HYPOTHESIS_PYTHON, "scripts"), ) ) @python_tests def check_py27(): run_tox("py27-full", PY27) @python_tests def check_py35(): run_tox("py35-full", PY35) @python_tests def check_py36(): run_tox("py36-full", PY36) @python_tests def check_py37(): run_tox("py37-full", PY37) @python_tests def check_pypy(): run_tox("pypy-full", PYPY2) @python_tests def check_pypy3(): run_tox("pypy3-full", PYPY3) @python_tests def check_py27_typing(): run_tox("py27typing", PY27) @python_tests def check_pypy_with_tracer(): run_tox("pypy-with-tracer", PYPY2) def standard_tox_task(name): TASKS["check-" + name] = python_tests(lambda: run_tox(name, PY36)) standard_tox_task("nose") standard_tox_task("pytest30") for n in [20, 21, 22, 111]: standard_tox_task("django%d" % (n,)) for n in [19, 20, 21, 22, 23, 24, 25]: standard_tox_task("pandas%d" % (n,)) standard_tox_task("coverage") @task() def check_quality(): run_tox("quality", PY36) examples_task = task( if_changed=(hp.PYTHON_SRC, os.path.join(hp.HYPOTHESIS_PYTHON, "examples")) ) @examples_task def check_examples2(): run_tox("examples2", PY27) @examples_task def check_examples3(): run_tox("examples3", PY36) @python_tests def check_unicode(): run_tox("unicode", PY27) @task() def check_whole_repo_tests(): install.ensure_shellcheck() subprocess.check_call([sys.executable, "-m", "pytest", tools.REPO_TESTS]) @task() def shell(): import IPython IPython.start_ipython([]) def ruby_task(fn): return task(if_changed=(hr.HYPOTHESIS_RUBY,))(fn) @ruby_task def lint_ruby(): hr.rake_task("checkformat") @ruby_task def check_ruby_tests(): hr.rake_task("test") @task() def python(*args): os.execv(sys.executable, (sys.executable,) + args) @task() def bundle(*args): hr.bundle(*args) rust_task = task(if_changed=(cr.BASE_DIR,)) @rust_task def check_rust_tests(): cr.cargo("test") if __name__ == "__main__": if "SNAKEPIT" not in os.environ: print( "This module should not be executed directly, but instead via " "build.sh (which sets up its environment)" ) sys.exit(1) if len(sys.argv) > 1: task_to_run = sys.argv[1] args = sys.argv[2:] else: task_to_run = os.environ.get("TASK") args = () if task_to_run is None: print( "No task specified. Either pass the task to run as an " "argument or as an environment variable TASK." ) sys.exit(1) try: TASKS[task_to_run](*args) except subprocess.CalledProcessError as e: sys.exit(e.returncode) hypothesis-hypothesis-python-4.36.2/tooling/src/hypothesistooling/installers.py000066400000000000000000000071211354103617500303160ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """Module for obtaining various versions of Python. Currently this is a thin shim around pyenv, but it would be nice to have this work on Windows as well by using Anaconda (as our build already does). """ from __future__ import absolute_import, division, print_function import os import shutil import subprocess import hypothesistooling.scripts as scripts from hypothesistooling import git from hypothesistooling.junkdrawer import once HOME = os.environ["HOME"] def __python_executable(version): return os.path.join(scripts.SNAKEPIT, version, "bin", "python") def python_executable(version): ensure_python(version) return __python_executable(version) PYTHONS = set() def ensure_python(version): if version in PYTHONS: return scripts.run_script("ensure-python.sh", version) target = __python_executable(version) assert os.path.exists(target), target PYTHONS.add(version) STACK = os.path.join(HOME, ".local", "bin", "stack") GHC = os.path.join(HOME, ".local", "bin", "ghc") SHELLCHECK = shutil.which("shellcheck") or os.path.join( HOME, ".local", "bin", "shellcheck" ) def ensure_stack(): if os.path.exists(STACK): return subprocess.check_call("mkdir -p ~/.local/bin", shell=True) subprocess.check_call( "curl -L https://www.stackage.org/stack/linux-x86_64 " "| tar xz --wildcards --strip-components=1 -C $HOME" "/.local/bin '*/stack'", shell=True, ) @once def update_stack(): ensure_stack() subprocess.check_call([STACK, "update"]) @once def ensure_ghc(): if os.path.exists(GHC): return update_stack() subprocess.check_call([STACK, "setup"]) @once def ensure_shellcheck(): if os.path.exists(SHELLCHECK): return update_stack() ensure_ghc() subprocess.check_call([STACK, "install", "ShellCheck"]) @once def ensure_rustup(): scripts.run_script("ensure-rustup.sh") RUBY_BUILD = os.path.join(scripts.RBENV_ROOT, "plugins", "ruby-build") RUBY_BIN_DIR = os.path.join(scripts.INSTALLED_RUBY_DIR, "bin") BUNDLER_EXECUTABLE = os.path.join(RUBY_BIN_DIR, "bundle") GEM_EXECUTABLE = os.path.join(RUBY_BIN_DIR, "gem") RBENV_COMMAND = os.path.join(scripts.RBENV_ROOT, "bin", "rbenv") @once def ensure_ruby(): if not os.path.exists(scripts.RBENV_ROOT): git("clone", "https://github.com/rbenv/rbenv.git", scripts.RBENV_ROOT) if not os.path.exists(RUBY_BUILD): git("clone", "https://github.com/rbenv/ruby-build.git", RUBY_BUILD) if not os.path.exists( os.path.join(scripts.RBENV_ROOT, "versions", scripts.RBENV_VERSION) ): subprocess.check_call([RBENV_COMMAND, "install", scripts.RBENV_VERSION]) subprocess.check_call([GEM_EXECUTABLE, "update", "--system"]) if not ( os.path.exists(BUNDLER_EXECUTABLE) and subprocess.call([BUNDLER_EXECUTABLE, "version"]) == 0 ): subprocess.check_call([GEM_EXECUTABLE, "install", "bundler"]) assert os.path.exists(BUNDLER_EXECUTABLE) hypothesis-hypothesis-python-4.36.2/tooling/src/hypothesistooling/junkdrawer.py000066400000000000000000000026311354103617500303130ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """Dumping ground module for things that don't have anywhere better to go. See https://twitter.com/betsythemuffin/status/1003313844108824584 """ from __future__ import absolute_import, division, print_function import ast import os from contextlib import contextmanager @contextmanager def in_dir(d): prev = os.getcwd() try: os.chdir(d) yield finally: os.chdir(prev) def once(fn): def accept(): if accept.has_been_called: return fn() accept.has_been_called = True accept.has_been_called = False accept.__name__ = fn.__name__ return accept def unlink_if_present(path): try: os.unlink(path) except FileNotFoundError: pass def unquote_string(s): return ast.literal_eval(s) hypothesis-hypothesis-python-4.36.2/tooling/src/hypothesistooling/projects/000077500000000000000000000000001354103617500274145ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/tooling/src/hypothesistooling/projects/__init__.py000066400000000000000000000012751354103617500315320ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function hypothesis-hypothesis-python-4.36.2/tooling/src/hypothesistooling/projects/conjecturerust.py000066400000000000000000000063741354103617500330570ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import subprocess import hypothesistooling as tools import hypothesistooling.installers as install import hypothesistooling.releasemanagement as rm from hypothesistooling.junkdrawer import in_dir, unlink_if_present, unquote_string PACKAGE_NAME = "conjecture-rust" CONJECTURE_RUST = os.path.join(tools.ROOT, PACKAGE_NAME) BASE_DIR = CONJECTURE_RUST TAG_PREFIX = PACKAGE_NAME + "-" RELEASE_FILE = os.path.join(BASE_DIR, "RELEASE.md") CHANGELOG_FILE = os.path.join(BASE_DIR, "CHANGELOG.md") CARGO_FILE = os.path.join(BASE_DIR, "Cargo.toml") SRC = os.path.join(BASE_DIR, "lib") def has_release(): """Is there a version of this package ready to release?""" return os.path.exists(RELEASE_FILE) def update_changelog_and_version(): """Update the changelog and version based on the current release file.""" release_type, release_contents = rm.parse_release_file(RELEASE_FILE) version = current_version() version_info = rm.parse_version(version) version, version_info = rm.bump_version_info(version_info, release_type) rm.replace_assignment(CARGO_FILE, "version", repr(version)) rm.update_markdown_changelog( CHANGELOG_FILE, name="Conjecture for Rust", version=version, entry=release_contents, ) os.unlink(RELEASE_FILE) def cargo(*args): install.ensure_rustup() with in_dir(BASE_DIR): subprocess.check_call(("cargo",) + args) IN_TEST = False def build_distribution(): """Build the crate.""" if IN_TEST: cargo("package", "--allow-dirty") else: cargo("package") def tag_name(): """The tag name for the upcoming release.""" return TAG_PREFIX + current_version() def has_source_changes(): """Returns True if any source files have changed.""" return tools.has_changes([SRC]) def current_version(): """Returns the current version as specified by the Cargo.toml.""" return unquote_string(rm.extract_assignment(CARGO_FILE, "version")) CARGO_CREDENTIALS = os.path.expanduser("~/.cargo/credentials") def upload_distribution(): """Upload the built package to crates.io.""" tools.assert_can_release() # Yes, cargo really will only look in this file. Yes this is terrible. # This only runs on Travis, so we may be assumed to own it, but still. unlink_if_present(CARGO_CREDENTIALS) # symlink so that the actual secret credentials can't be leaked via the # cache. os.symlink(tools.CARGO_API_KEY, CARGO_CREDENTIALS) # Give the key the right permissions. os.chmod(CARGO_CREDENTIALS, int("0600", 8)) cargo("publish") hypothesis-hypothesis-python-4.36.2/tooling/src/hypothesistooling/projects/hypothesispython.py000066400000000000000000000161051354103617500334320ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import re import shutil import subprocess import sys import requests import hypothesistooling as tools import hypothesistooling.releasemanagement as rm PACKAGE_NAME = "hypothesis-python" HYPOTHESIS_PYTHON = os.path.join(tools.ROOT, PACKAGE_NAME) PYTHON_TAG_PREFIX = "hypothesis-python-" BASE_DIR = HYPOTHESIS_PYTHON PYTHON_SRC = os.path.join(HYPOTHESIS_PYTHON, "src") PYTHON_TESTS = os.path.join(HYPOTHESIS_PYTHON, "tests") RELEASE_FILE = os.path.join(HYPOTHESIS_PYTHON, "RELEASE.rst") assert os.path.exists(PYTHON_SRC) __version__ = None __version_info__ = None VERSION_FILE = os.path.join(PYTHON_SRC, "hypothesis/version.py") with open(VERSION_FILE) as o: exec(o.read()) assert __version__ is not None assert __version_info__ is not None def has_release(): return os.path.exists(RELEASE_FILE) def parse_release_file(): return rm.parse_release_file(RELEASE_FILE) def has_source_changes(): return tools.has_changes([PYTHON_SRC]) def build_docs(builder="html"): # See https://www.sphinx-doc.org/en/stable/man/sphinx-build.html # (unfortunately most options only have the short flag version) tools.scripts.pip_tool( "sphinx-build", "-n", "-W", "--keep-going", "-T", "-E", "-b", builder, "docs", "docs/_build/" + builder, cwd=HYPOTHESIS_PYTHON, ) CHANGELOG_ANCHOR = re.compile(r"^\.\. _v\d+\.\d+\.\d+:$") CHANGELOG_BORDER = re.compile(r"^-+$") CHANGELOG_HEADER = re.compile(r"^\d+\.\d+\.\d+ - \d\d\d\d-\d\d-\d\d$") def update_changelog_and_version(): global __version_info__ global __version__ contents = changelog() assert "\r" not in contents lines = contents.split("\n") for i, l in enumerate(lines): if CHANGELOG_ANCHOR.match(l): assert CHANGELOG_BORDER.match(lines[i + 2]), repr(lines[i + 2]) assert CHANGELOG_HEADER.match(lines[i + 3]), repr(lines[i + 3]) assert CHANGELOG_BORDER.match(lines[i + 4]), repr(lines[i + 4]) beginning = "\n".join(lines[:i]) rest = "\n".join(lines[i:]) assert "\n".join((beginning, rest)) == contents break release_type, release_contents = parse_release_file() new_version_string, new_version_info = rm.bump_version_info( __version_info__, release_type ) __version_info__ = new_version_info __version__ = new_version_string rm.replace_assignment(VERSION_FILE, "__version_info__", repr(new_version_info)) heading_for_new_version = " - ".join((new_version_string, rm.release_date_string())) border_for_new_version = "-" * len(heading_for_new_version) new_changelog_parts = [ beginning.strip(), "", ".. _v%s:" % (new_version_string), "", border_for_new_version, heading_for_new_version, border_for_new_version, "", release_contents, "", rest, ] with open(CHANGELOG_FILE, "w") as o: o.write("\n".join(new_changelog_parts)) # Replace the `since="RELEASEDAY"` argument to `note_deprecation` # with today's date, to record it for future reference. before = 'since="RELEASEDAY"' after = before.replace("RELEASEDAY", rm.release_date_string()) for root, _, files in os.walk(PYTHON_SRC): for fname in (os.path.join(root, f) for f in files if f.endswith(".py")): with open(fname) as f: contents = f.read() if before in contents: with open(fname, "w") as f: f.write(contents.replace(before, after)) CHANGELOG_FILE = os.path.join(HYPOTHESIS_PYTHON, "docs", "changes.rst") DIST = os.path.join(HYPOTHESIS_PYTHON, "dist") def changelog(): with open(CHANGELOG_FILE) as i: return i.read() def build_distribution(): if os.path.exists(DIST): shutil.rmtree(DIST) subprocess.check_output( [sys.executable, "setup.py", "sdist", "bdist_wheel", "--dist-dir", DIST] ) def upload_distribution(): tools.assert_can_release() subprocess.check_call( [ sys.executable, "-m", "twine", "upload", "--skip-existing", "--config-file", tools.PYPIRC, os.path.join(DIST, "*"), ] ) # Construct plain-text + markdown version of this changelog entry, # with link to canonical source. build_docs(builder="text") textfile = os.path.join(HYPOTHESIS_PYTHON, "docs", "_build", "text", "changes.txt") with open(textfile) as f: lines = f.readlines() entries = [i for i, l in enumerate(lines) if CHANGELOG_HEADER.match(l)] changelog_body = "".join(lines[entries[0] + 2 : entries[1]]).strip() + ( "\n\n*[The canonical version of these notes (with links) is on readthedocs.]" "(https://hypothesis.readthedocs.io/en/latest/changes.html#v%s)*" % (current_version().replace(".", "-"),) ) # Create a GitHub release, to trigger Zenodo DOI minting. See # https://developer.github.com/v3/repos/releases/#create-a-release requests.post( "https://api.github.com/repos/HypothesisWorks/hypothesis/releases", json=dict( tag_name=tag_name(), name="Hypothesis for Python - version " + current_version(), body=changelog_body, ), timeout=120, # seconds # Scoped personal access token, stored in Travis environ variable auth=("Zac-HD", os.environ["Zac_release_token"]), ).raise_for_status() # Post the release notes to Tidelift too - see https://tidelift.com/docs/api requests.post( "https://api.tidelift.com/external-api/lifting/pypi/hypothesis/release-notes/" + current_version(), json={"body": changelog_body}, headers={"Authorization": "Bearer {}".format(os.environ["TIDELIFT_API_TOKEN"])}, timeout=120, # seconds ).raise_for_status() def current_version(): return __version__ def latest_version(): versions = [] for t in tools.tags(): if t.startswith(PYTHON_TAG_PREFIX): t = t[len(PYTHON_TAG_PREFIX) :] else: continue assert t == t.strip() parts = t.split(".") assert len(parts) == 3 v = tuple(map(int, parts)) versions.append((v, t)) _, latest = max(versions) return latest def tag_name(): return PYTHON_TAG_PREFIX + __version__ hypothesis-hypothesis-python-4.36.2/tooling/src/hypothesistooling/projects/hypothesisruby.py000066400000000000000000000116731354103617500330770ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import subprocess from glob import glob import hypothesistooling as tools import hypothesistooling.installers as install import hypothesistooling.projects.conjecturerust as cr import hypothesistooling.releasemanagement as rm from hypothesistooling.junkdrawer import in_dir, once, unlink_if_present PACKAGE_NAME = "hypothesis-ruby" HYPOTHESIS_RUBY = os.path.join(tools.ROOT, PACKAGE_NAME) BASE_DIR = HYPOTHESIS_RUBY TAG_PREFIX = PACKAGE_NAME + "-" RELEASE_FILE = os.path.join(BASE_DIR, "RELEASE.md") CHANGELOG_FILE = os.path.join(BASE_DIR, "CHANGELOG.md") GEMSPEC_FILE = os.path.join(BASE_DIR, "hypothesis-specs.gemspec") CARGO_FILE = os.path.join(BASE_DIR, "Cargo.toml") GEMFILE_LOCK_FILE = os.path.join(BASE_DIR, "Gemfile.lock") CONJECTURE_CARGO_FILE = cr.CARGO_FILE RUST_SRC = os.path.join(BASE_DIR, "src") RUBY_SRC = os.path.join(BASE_DIR, "lib") def has_release(): """Is there a version of this package ready to release?""" return os.path.exists(RELEASE_FILE) def parse_release_file(): return rm.parse_release_file(RELEASE_FILE) def update_changelog_and_version(): """Update the changelog and version based on the current release file.""" release_type, release_contents = parse_release_file() version = current_version() version_info = rm.parse_version(version) version, version_info = rm.bump_version_info(version_info, release_type) rm.replace_assignment(GEMSPEC_FILE, "s.version", repr(version)) rm.replace_assignment(GEMSPEC_FILE, "s.date", repr(rm.release_date_string())) rm.update_markdown_changelog( CHANGELOG_FILE, name="Hypothesis for Ruby", version=version, entry=release_contents, ) os.unlink(RELEASE_FILE) LOCAL_PATH_DEPENDENCY = "{ path = '../conjecture-rust' }" def update_conjecture_dependency(dependency): rm.replace_assignment(CARGO_FILE, "conjecture", dependency) def build_distribution(): """Build the rubygem.""" current_dependency = rm.extract_assignment(CARGO_FILE, "conjecture") assert current_dependency == LOCAL_PATH_DEPENDENCY, ( "Cargo file in a bad state. Expected conjecture dependency to be %s " "but it was instead %s" ) % (LOCAL_PATH_DEPENDENCY, current_dependency) conjecture_version = cr.current_version() # Update to use latest version of conjecture-rust. try: update_conjecture_dependency(repr(conjecture_version)) rake_task("gem") finally: update_conjecture_dependency(LOCAL_PATH_DEPENDENCY) def tag_name(): """The tag name for the upcoming release.""" return TAG_PREFIX + current_version() def has_source_changes(): """Returns True if any source files have changed.""" return tools.has_changes([RUST_SRC, RUBY_SRC]) or cr.has_release() def current_version(): """Returns the current version as specified by the gemspec.""" ensure_bundler() return ( subprocess.check_output( [install.BUNDLER_EXECUTABLE, "exec", "ruby", "-e", RUBY_TO_PRINT_VERSION] ) .decode("ascii") .strip() ) def bundle(*args): ensure_bundler() bundle_command(*args) def bundle_command(*args): with in_dir(BASE_DIR): subprocess.check_call([install.BUNDLER_EXECUTABLE, *args]) def rake_task(*args): bundle("exec", "rake", *args) @once def ensure_bundler(): install.ensure_rustup() install.ensure_ruby() bundle_command("install") RUBY_TO_PRINT_VERSION = """ require 'rubygems' spec = Gem::Specification::load(%r) puts spec.version """.strip().replace( "\n", "; " ) % ( GEMSPEC_FILE, ) RUBYGEMS_CREDENTIALS = os.path.expanduser("~/.gem/credentials") def upload_distribution(): """Upload the built package to rubygems.""" tools.assert_can_release() # Yes, rubygems really will only look in this file. Yes this is terrible. # This only runs on Travis, so we may be assumed to own it, but still. unlink_if_present(RUBYGEMS_CREDENTIALS) # symlink so that the actual secret credentials can't be leaked via the # cache. os.symlink(tools.RUBYGEMS_API_KEY, RUBYGEMS_CREDENTIALS) # Give the key the right permissions. os.chmod(RUBYGEMS_CREDENTIALS, int("0600", 8)) subprocess.check_call( [install.GEM_EXECUTABLE, "push", *glob("hypothesis-specs-*.gem")] ) hypothesis-hypothesis-python-4.36.2/tooling/src/hypothesistooling/releasemanagement.py000066400000000000000000000134661354103617500316240ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER """Helpful common code for release management tasks that is shared across multiple projects. Note that most code in here is brittle and specific to our build and probably makes all sorts of undocumented assumptions, even as it looks like a nice tidy reusable set of functionality. """ from __future__ import absolute_import, division, print_function import re from datetime import datetime import hypothesistooling as tools __RELEASE_DATE_STRING = None def release_date_string(): """Returns a date string that represents what should be considered "today" for the purposes of releasing, and ensure that we don't change part way through a release.""" global __RELEASE_DATE_STRING if __RELEASE_DATE_STRING is None: __RELEASE_DATE_STRING = datetime.utcnow().strftime("%Y-%m-%d") return __RELEASE_DATE_STRING def assignment_matcher(name): """ Matches a single line of the form (some space)name = (some value). e.g. " foo = 1". The whole line up to the assigned value is the first matching group, the rest of the line is the second matching group. i.e. group 1 is the assignment, group 2 is the value. In the above example group 1 would be " foo = " and group 2 would be "1" """ return re.compile(r"\A(\s*%s\s*=\s*)(.+)\Z" % (re.escape(name),)) def extract_assignment_from_string(contents, name): lines = contents.split("\n") matcher = assignment_matcher(name) for i, l in enumerate(lines): match = matcher.match(l) if match is not None: return match[2].strip() raise ValueError("Key %s not found in %s" % (name, contents)) def extract_assignment(filename, name): with open(filename) as i: return extract_assignment_from_string(i.read(), name) def replace_assignment_in_string(contents, name, value): lines = contents.split("\n") matcher = assignment_matcher(name) count = 0 for i, l in enumerate(lines): match = matcher.match(l) if match is not None: count += 1 lines[i] = match[1] + value if count == 0: raise ValueError("Key %s not found in %s" % (name, contents)) if count > 1: raise ValueError("Key %s found %d times in %s" % (name, count, contents)) return "\n".join(lines) def replace_assignment(filename, name, value): """Replaces a single assignment of the form key = value in a file with a new value, attempting to preserve the existing format. This is fairly fragile - in particular it knows nothing about the file format. The existing value is simply the rest of the line after the last space after the equals. """ with open(filename) as i: contents = i.read() result = replace_assignment_in_string(contents, name, value) with open(filename, "w") as o: o.write(result) RELEASE_TYPE = re.compile(r"^RELEASE_TYPE: +(major|minor|patch)") MAJOR = "major" MINOR = "minor" PATCH = "patch" VALID_RELEASE_TYPES = (MAJOR, MINOR, PATCH) def parse_release_file(filename): with open(filename) as i: return parse_release_file_contents(i.read(), filename) def parse_release_file_contents(release_contents, filename): release_lines = [l.rstrip() for l in release_contents.split("\n")] m = RELEASE_TYPE.match(release_lines[0]) if m is not None: release_type = m.group(1) if release_type not in VALID_RELEASE_TYPES: raise ValueError("Unrecognised release type %r" % (release_type,)) del release_lines[0] release_contents = "\n".join(release_lines).strip() else: raise ValueError( "%s does not start by specifying release type. The first " "line of the file should be RELEASE_TYPE: followed by one of " "major, minor, or patch, to specify the type of release that " "this is (i.e. which version number to increment). Instead the " "first line was %r" % (filename, release_lines[0]) ) return release_type, release_contents def bump_version_info(version_info, release_type): new_version = list(version_info) bump = VALID_RELEASE_TYPES.index(release_type) new_version[bump] += 1 for i in range(bump + 1, len(new_version)): new_version[i] = 0 new_version = tuple(new_version) new_version_string = ".".join(map(str, new_version)) return new_version_string, new_version def update_markdown_changelog(changelog, name, version, entry): with open(changelog) as i: prev_contents = i.read() title = "# %(name)s %(version)s (%(date)s)\n\n" % { "name": name, "version": version, "date": release_date_string(), } with open(changelog, "w") as o: o.write(title) o.write(entry.strip()) o.write("\n\n") o.write(prev_contents) def parse_version(version): return tuple(map(int, version.split("."))) def commit_pending_release(project): """Create a commit with the new release.""" tools.git("rm", project.RELEASE_FILE) tools.git("add", "-u", project.BASE_DIR) tools.git( "commit", "-m", "Bump %s version to %s and update changelog" "\n\n[skip ci]" % (project.PACKAGE_NAME, project.current_version()), ) hypothesis-hypothesis-python-4.36.2/tooling/src/hypothesistooling/scripts.py000066400000000000000000000040641354103617500276300ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import re import shlex import subprocess import sys from hypothesistooling import ROOT def print_command(command, args): args = list(args) ranges = [] for i, v in enumerate(args): if os.path.exists(v): if not ranges or ranges[-1][-1] < i - 1: ranges.append([i, i]) elif ranges[-1][-1] + 1 == i: ranges[-1][-1] += 1 for i, j in ranges: if j > i: args[i] = "..." for k in range(i + 1, j + 1): args[k] = None args = [v for v in args if v is not None] print(command, *map(shlex.quote, args)) def run_script(script, *args, **kwargs): print_command(script, args) return subprocess.check_call([os.path.join(SCRIPTS, script), *args], **kwargs) SCRIPTS = os.path.join(ROOT, "tooling", "scripts") COMMON = os.path.join(SCRIPTS, "common.sh") def __calc_script_variables(): exports = re.compile(r"export ([A-Z_]+)(=|$)") with open(COMMON) as i: common = i.read() for name, _ in exports.findall(common): globals()[name] = os.environ[name] __calc_script_variables() def tool_path(name): return os.path.join(os.path.dirname(sys.executable), name) def pip_tool(name, *args, **kwargs): print_command(name, args) r = subprocess.call([tool_path(name), *args], **kwargs) if r != 0: sys.exit(r) hypothesis-hypothesis-python-4.36.2/whole-repo-tests/000077500000000000000000000000001354103617500227475ustar00rootroot00000000000000hypothesis-hypothesis-python-4.36.2/whole-repo-tests/test_deploy.py000066400000000000000000000033211354103617500256530ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import pytest import hypothesistooling as tools import hypothesistooling.__main__ as main import hypothesistooling.releasemanagement as rm @pytest.mark.parametrize( "project", [p for p in tools.all_projects() if p.has_release()] ) def test_release_file_exists_and_is_valid(project, monkeypatch): assert not tools.has_uncommitted_changes(project.BASE_DIR) monkeypatch.setattr(tools, "create_tag", lambda *args, **kwargs: None) monkeypatch.setattr(tools, "push_tag", lambda name: None) monkeypatch.setattr(rm, "commit_pending_release", lambda p: None) monkeypatch.setattr(project, "upload_distribution", lambda: None) monkeypatch.setattr(project, "IN_TEST", True, raising=False) try: main.do_release(project) with open(project.CHANGELOG_FILE) as i: changelog = i.read() assert project.current_version() in changelog assert rm.release_date_string() in changelog finally: tools.git("checkout", project.BASE_DIR) os.chdir(tools.ROOT) hypothesis-hypothesis-python-4.36.2/whole-repo-tests/test_documentation.py000066400000000000000000000014431354103617500272330ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesistooling.__main__ import documentation def test_documentation(): documentation() hypothesis-hypothesis-python-4.36.2/whole-repo-tests/test_pyup_yml.py000066400000000000000000000020111354103617500262300ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import yaml from pyup.config import Config import hypothesistooling as tools def test_pyup_yml_is_valid(): with open(tools.PYUP_FILE, "r") as i: data = yaml.safe_load(i.read()) config = Config() config.update_config(data) assert config.is_valid_schedule(), "Schedule %r is invalid" % (config.schedule,) hypothesis-hypothesis-python-4.36.2/whole-repo-tests/test_release_files.py000066400000000000000000000022211354103617500271570ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest import hypothesistooling as tools import hypothesistooling.releasemanagement as rm @pytest.mark.parametrize("project", tools.all_projects()) def test_release_file_exists_and_is_valid(project): if project.has_source_changes(): assert project.has_release(), ( "There are source changes but no RELEASE.rst. Please create " "one to describe your changes." ) rm.parse_release_file(project.RELEASE_FILE) hypothesis-hypothesis-python-4.36.2/whole-repo-tests/test_release_management.py000066400000000000000000000055021354103617500301760ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import pytest from hypothesistooling.releasemanagement import ( bump_version_info, parse_release_file_contents, release_date_string, replace_assignment_in_string as replace, update_markdown_changelog, ) def parse_release(contents): return parse_release_file_contents(contents, "") def test_update_single_line(): assert replace("a = 1", "a", "2") == "a = 2" def test_update_without_spaces(): assert replace("a=1", "a", "2") == "a=2" def test_update_in_middle(): assert replace("a = 1\nb=2\nc = 3", "b", "4") == "a = 1\nb=4\nc = 3" def test_quotes_string_to_assign(): assert replace("a.c = 1", "a.c", "2") == "a.c = 2" with pytest.raises(ValueError): replace("abc = 1", "a.c", "2") def test_duplicates_are_errors(): with pytest.raises(ValueError): replace("a = 1\na=1", "a", "2") def test_missing_is_error(): with pytest.raises(ValueError): replace("", "a", "1") def test_bump_minor_version(): assert bump_version_info((1, 1, 1), "minor")[0] == "1.2.0" def test_parse_release_file(): assert parse_release("RELEASE_TYPE: patch\nhi") == ("patch", "hi") assert parse_release("RELEASE_TYPE: minor\n\n\n\nhi") == ("minor", "hi") assert parse_release("RELEASE_TYPE: major\n \n\nhi") == ("major", "hi") def test_invalid_release(): with pytest.raises(ValueError): parse_release("RELEASE_TYPE: wrong\nstuff") with pytest.raises(ValueError): parse_release("") TEST_CHANGELOG = """ # A test project 1.2.3 (%s) some stuff happened # some previous log entry """ % ( release_date_string(), ) def test_update_changelog(tmpdir): path = tmpdir.join("CHANGELOG.md") path.write("# some previous log entry\n") update_markdown_changelog( str(path), "A test project", "1.2.3", "some stuff happened" ) assert path.read().strip() == TEST_CHANGELOG.strip() def test_changelog_parsing_strips_trailing_whitespace(): header = "RELEASE_TYPE: patch\n\n" contents = "Adds a feature\n indented.\n" level, out = parse_release(header + contents.replace("feature", "feature ")) assert contents.strip() == out hypothesis-hypothesis-python-4.36.2/whole-repo-tests/test_requirements.py000066400000000000000000000014541354103617500271070ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesistooling.__main__ import check_requirements def test_requirements(): check_requirements() hypothesis-hypothesis-python-4.36.2/whole-repo-tests/test_rst_is_valid.py000066400000000000000000000024021354103617500270400ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import hypothesistooling as tools import hypothesistooling.projects.hypothesispython as hp from hypothesistooling.scripts import pip_tool def is_sphinx(f): f = os.path.abspath(f) return f.startswith(os.path.join(hp.HYPOTHESIS_PYTHON, "docs")) ALL_RST = [ f for f in tools.all_files() if os.path.basename(f) != "RELEASE.rst" and f.endswith(".rst") ] def test_passes_rst_lint(): pip_tool("rst-lint", *[f for f in ALL_RST if not is_sphinx(f)]) def test_passes_flake8(): pip_tool("flake8", "--select=W191,W291,W292,W293,W391", *ALL_RST) hypothesis-hypothesis-python-4.36.2/whole-repo-tests/test_secrets.py000066400000000000000000000017771354103617500260440ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import pytest import hypothesistooling as tools @pytest.mark.skipif( os.environ.get("TRAVIS_SECURE_ENV_VARS", None) != "true", reason="Not running in an environment with travis secure vars", ) def test_can_descrypt_secrets(): tools.decrypt_secrets() assert os.path.exists(tools.DEPLOY_KEY) hypothesis-hypothesis-python-4.36.2/whole-repo-tests/test_security.py000066400000000000000000000020011354103617500262200ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function from hypothesistooling.projects.hypothesispython import PYTHON_SRC from hypothesistooling.scripts import pip_tool def test_bandit_passes_on_hypothesis(): # pypi.org/project/bandit has the table of error codes, or `bandit --help` pip_tool("bandit", "--skip", "B101,B102,B110,B303,B311", "--recursive", PYTHON_SRC) hypothesis-hypothesis-python-4.36.2/whole-repo-tests/test_shellcheck.py000066400000000000000000000017761354103617500265000ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import subprocess import hypothesistooling as tools import hypothesistooling.installers as install SCRIPTS = [f for f in tools.all_files() if f.endswith(".sh")] def test_all_shell_scripts_are_valid(): subprocess.check_call( [install.SHELLCHECK, "--exclude=SC1073,SC1072", *SCRIPTS], cwd=tools.ROOT ) hypothesis-hypothesis-python-4.36.2/whole-repo-tests/test_type_hints.py000066400000000000000000000063701354103617500265540ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import os import subprocess import pytest from hypothesistooling.projects.hypothesispython import PYTHON_SRC from hypothesistooling.scripts import pip_tool, tool_path def test_mypy_passes_on_hypothesis(): pip_tool("mypy", PYTHON_SRC) def get_mypy_analysed_type(fname, val): out = subprocess.Popen( [tool_path("mypy"), fname], stdout=subprocess.PIPE, encoding="utf-8", universal_newlines=True, # We set the MYPYPATH explicitly, because PEP561 discovery wasn't # working in CI as of mypy==0.600 - hopefully a temporary workaround. env=dict(os.environ, MYPYPATH=PYTHON_SRC), ).stdout.read() assert len(out.splitlines()) == 1 # See https://mypy.readthedocs.io/en/latest/common_issues.html#reveal-type # The shell output for `reveal_type([1, 2, 3])` looks like a literal: # file.py:2: error: Revealed type is 'builtins.list[builtins.int*]' return ( out.split("Revealed type is ")[1] .strip() .strip("'") .replace("builtins.", "") .replace("*", "") ) @pytest.mark.parametrize( "val,expect", [ ("integers()", "int"), ("text()", "str"), ("integers().map(str)", "str"), ("booleans().filter(bool)", "bool"), ("lists(none())", "list[None]"), ("dictionaries(integers(), datetimes())", "dict[int, datetime.datetime]"), ("data()", "hypothesis._strategies.DataObject"), # Ex`-1 stands for recursion in the whole type, i.e. Ex`0 == Union[...] ("recursive(integers(), lists)", "Union[list[Ex`-1], int]"), # See https://github.com/python/mypy/issues/5269 - fix the hints on # `one_of` and document the minimum Mypy version when the issue is fixed. ("one_of(integers(), text())", "Any"), ], ) def test_revealed_types(tmpdir, val, expect): """Check that Mypy picks up the expected `X` in SearchStrategy[`X`].""" f = tmpdir.join(expect + ".py") f.write( "from hypothesis.strategies import *\n" "s = {}\n" "reveal_type(s)\n".format(val) ) typ = get_mypy_analysed_type(str(f.realpath()), val) assert typ == "hypothesis.searchstrategy.strategies.SearchStrategy[%s]" % expect def test_data_object_type_tracing(tmpdir): f = tmpdir.join("chech_mypy_on_st_data.py") f.write( "from hypothesis.strategies import data, integers\n" "d = data().example()\n" "s = d.draw(integers())\n" "reveal_type(s)\n" ) got = get_mypy_analysed_type(str(f.realpath()), "data().draw(integers())") assert got == "int" hypothesis-hypothesis-python-4.36.2/whole-repo-tests/test_version_sync.py000066400000000000000000000021431354103617500271010ustar00rootroot00000000000000# coding=utf-8 # # This file is part of Hypothesis, which may be found at # https://github.com/HypothesisWorks/hypothesis/ # # Most of this work is copyright (C) 2013-2019 David R. MacIver # (david@drmaciver.com), but it contains contributions by others. See # CONTRIBUTING.rst for a full list of people who may hold copyright, and # consult the git log if you need to determine who owns an individual # contribution. # # This Source Code Form is subject to the terms of the Mozilla Public License, # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at https://mozilla.org/MPL/2.0/. # # END HEADER from __future__ import absolute_import, division, print_function import toml from hypothesistooling.projects.hypothesisruby import CARGO_FILE, GEMFILE_LOCK_FILE def test_helix_version_sync(): cargo = toml.load(CARGO_FILE) helix_version = cargo["dependencies"]["helix"] gem_lock = open(GEMFILE_LOCK_FILE).read() assert ( "helix_runtime (%s)" % (helix_version,) in gem_lock ), "helix version must be the same in %s and %s" % (CARGO_FILE, GEMFILE_LOCK_FILE)