pytest-2.5.1/0000775000175000017500000000000012254002202012424 5ustar hpkhpk00000000000000pytest-2.5.1/LICENSE0000664000175000017500000000204512254002202013432 0ustar hpkhpk00000000000000 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. pytest-2.5.1/setup.py0000664000175000017500000000556612254002202014152 0ustar hpkhpk00000000000000import os, sys from setuptools import setup, Command classifiers=['Development Status :: 6 - Mature', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX', 'Operating System :: Microsoft :: Windows', 'Operating System :: MacOS :: MacOS X', 'Topic :: Software Development :: Testing', 'Topic :: Software Development :: Libraries', 'Topic :: Utilities', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3'] + [ ("Programming Language :: Python :: %s" % x) for x in "2.6 2.7 3.0 3.1 3.2 3.3".split()] long_description = open("README.rst").read() def main(): install_requires = ["py>=1.4.19"] if sys.version_info < (2,7): install_requires.append("argparse") if sys.platform == "win32": install_requires.append("colorama") setup( name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, version='2.5.1', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], author='Holger Krekel, Benjamin Peterson, Ronny Pfannschmidt, Floris Bruynooghe and others', author_email='holger at merlinux.eu', entry_points= make_entry_points(), classifiers=classifiers, cmdclass = {'test': PyTest}, # the following should be enabled for release install_requires=install_requires, packages=['_pytest', '_pytest.assertion'], py_modules=['pytest'], zip_safe=False, ) def cmdline_entrypoints(versioninfo, platform, basename): target = 'pytest:main' if platform.startswith('java'): points = {'py.test-jython': target} else: if basename.startswith("pypy"): points = {'py.test-%s' % basename: target} else: # cpython points = {'py.test-%s.%s' % versioninfo[:2] : target,} points['py.test'] = target return points def make_entry_points(): basename = os.path.basename(sys.executable) points = cmdline_entrypoints(sys.version_info, sys.platform, basename) keys = list(points.keys()) keys.sort() l = ["%s = %s" % (x, points[x]) for x in keys] return {'console_scripts': l} class PyTest(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): import sys,subprocess PPATH=[x for x in os.environ.get("PYTHONPATH", "").split(":") if x] PPATH.insert(0, os.getcwd()) os.environ["PYTHONPATH"] = ":".join(PPATH) errno = subprocess.call([sys.executable, 'pytest.py']) raise SystemExit(errno) if __name__ == '__main__': main() pytest-2.5.1/doc/0000775000175000017500000000000012254002202013171 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/0000775000175000017500000000000012254002202013573 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/plugins.txt0000664000175000017500000003145212254002202016022 0ustar hpkhpk00000000000000.. _plugins: Working with plugins and conftest files ============================================= py.test implements all aspects of configuration, collection, running and reporting by calling `well specified hooks`_. Virtually any Python module can be registered as a plugin. It can implement any number of hook functions (usually two or three) which all have a ``pytest_`` prefix, making hook functions easy to distinguish and find. There are three basic location types: * `builtin plugins`_: loaded from py.test's internal ``_pytest`` directory. * `external plugins`_: modules discovered through `setuptools entry points`_ * `conftest.py plugins`_: modules auto-discovered in test directories .. _`pytest/plugin`: http://bitbucket.org/hpk42/pytest/src/tip/pytest/plugin/ .. _`conftest.py plugins`: .. _`conftest.py`: .. _`localplugin`: .. _`conftest`: conftest.py: local per-directory plugins -------------------------------------------------------------- local ``conftest.py`` plugins contain directory-specific hook implementations. Session and test running activities will invoke all hooks defined in ``conftest.py`` files closer to the root of the filesystem. Example: Assume the following layout and content of files:: a/conftest.py: def pytest_runtest_setup(item): # called for running each test in 'a' directory print ("setting up", item) a/test_in_subdir.py: def test_sub(): pass test_flat.py: def test_flat(): pass Here is how you might run it:: py.test test_flat.py # will not show "setting up" py.test a/test_sub.py # will show "setting up" .. Note:: If you have ``conftest.py`` files which do not reside in a python package directory (i.e. one containing an ``__init__.py``) then "import conftest" can be ambiguous because there might be other ``conftest.py`` files as well on your PYTHONPATH or ``sys.path``. It is thus good practise for projects to either put ``conftest.py`` under a package scope or to never import anything from a conftest.py file. .. _`external plugins`: .. _`extplugins`: Installing External Plugins / Searching ------------------------------------------------------ Installing a plugin happens through any usual Python installation tool, for example:: pip install pytest-NAME pip uninstall pytest-NAME If a plugin is installed, py.test automatically finds and integrates it, there is no need to activate it. Here is a initial list of known plugins: .. _`django`: https://www.djangoproject.com/ * `pytest-django `_: write tests for `django`_ apps, using pytest integration. * `pytest-twisted `_: write tests for `twisted `_ apps, starting a reactor and processing deferreds from test functions. * `pytest-capturelog `_: to capture and assert about messages from the logging module * `pytest-cov `_: coverage reporting, compatible with distributed testing * `pytest-xdist `_: to distribute tests to CPUs and remote hosts, to run in boxed mode which allows to survive segmentation faults, to run in looponfailing mode, automatically re-running failing tests on file changes, see also :ref:`xdist` * `pytest-instafail `_: to report failures while the test run is happening. * `pytest-bdd `_ and `pytest-konira `_ to write tests using behaviour-driven testing. * `pytest-timeout `_: to timeout tests based on function marks or global definitions. * `pytest-cache `_: to interactively re-run failing tests and help other plugins to store test run information across invocations. * `pytest-pep8 `_: a ``--pep8`` option to enable PEP8 compliance checking. * `oejskit `_: a plugin to run javascript unittests in life browsers You may discover more plugins through a `pytest- pypi.python.org search`_. .. _`available installable plugins`: .. _`pytest- pypi.python.org search`: http://pypi.python.org/pypi?%3Aaction=search&term=pytest-&submit=search Writing a plugin by looking at examples ------------------------------------------------------ .. _`Distribute`: http://pypi.python.org/pypi/distribute .. _`setuptools`: http://pypi.python.org/pypi/setuptools If you want to write a plugin, there are many real-life examples you can copy from: * a custom collection example plugin: :ref:`yaml plugin` * around 20 `builtin plugins`_ which provide py.test's own functionality * many `external plugins`_ providing additional features All of these plugins implement the documented `well specified hooks`_ to extend and add functionality. .. _`setuptools entry points`: Making your plugin installable by others ----------------------------------------------- If you want to make your plugin externally available, you may define a so-called entry point for your distribution so that ``py.test`` finds your plugin module. Entry points are a feature that is provided by `setuptools`_ or `Distribute`_. py.test looks up the ``pytest11`` entrypoint to discover its plugins and you can thus make your plugin available by definig it in your setuptools/distribute-based setup-invocation: .. sourcecode:: python # sample ./setup.py file from setuptools import setup setup( name="myproject", packages = ['myproject'] # the following makes a plugin available to py.test entry_points = { 'pytest11': [ 'name_of_plugin = myproject.pluginmodule', ] }, ) If a package is installed this way, py.test will load ``myproject.pluginmodule`` as a plugin which can define `well specified hooks`_. .. _`pluginorder`: Plugin discovery order at tool startup -------------------------------------------- py.test loads plugin modules at tool startup in the following way: * by loading all builtin plugins * by loading all plugins registered through `setuptools entry points`_. * by pre-scanning the command line for the ``-p name`` option and loading the specified plugin before actual command line parsing. * by loading all :file:`conftest.py` files as inferred by the command line invocation (test files and all of its *parent* directories). Note that ``conftest.py`` files from *sub* directories are by default not loaded at tool startup. * by recursively loading all plugins specified by the ``pytest_plugins`` variable in ``conftest.py`` files Requiring/Loading plugins in a test module or conftest file ------------------------------------------------------------- You can require plugins in a test module or a conftest file like this:: pytest_plugins = "name1", "name2", When the test module or conftest plugin is loaded the specified plugins will be loaded as well. You can also use dotted path like this:: pytest_plugins = "myapp.testsupport.myplugin" which will import the specified module as a py.test plugin. Accessing another plugin by name -------------------------------------------- If a plugin wants to collaborate with code from another plugin it can obtain a reference through the plugin manager like this: .. sourcecode:: python plugin = config.pluginmanager.getplugin("name_of_plugin") If you want to look at the names of existing plugins, use the ``--traceconfig`` option. .. _`findpluginname`: Finding out which plugins are active ---------------------------------------------------------------------------- If you want to find out which plugins are active in your environment you can type:: py.test --traceconfig and will get an extended test header which shows activated plugins and their names. It will also print local plugins aka :ref:`conftest.py ` files when they are loaded. .. _`cmdunregister`: Deactivating / unregistering a plugin by name ---------------------------------------------------------------------------- You can prevent plugins from loading or unregister them:: py.test -p no:NAME This means that any subsequent try to activate/load the named plugin will it already existing. See :ref:`findpluginname` for how to obtain the name of a plugin. .. _`builtin plugins`: py.test default plugin reference ==================================== You can find the source code for the following plugins in the `pytest repository `_. .. autosummary:: _pytest.assertion _pytest.capture _pytest.config _pytest.doctest _pytest.genscript _pytest.helpconfig _pytest.junitxml _pytest.mark _pytest.monkeypatch _pytest.nose _pytest.pastebin _pytest.pdb _pytest.pytester _pytest.python _pytest.recwarn _pytest.resultlog _pytest.runner _pytest.main _pytest.skipping _pytest.terminal _pytest.tmpdir _pytest.unittest .. _`well specified hooks`: py.test hook reference ==================================== Hook specification and validation ----------------------------------------- py.test calls hook functions to implement initialization, running, test execution and reporting. When py.test loads a plugin it validates that each hook function conforms to its respective hook specification. Each hook function name and its argument names need to match a hook specification. However, a hook function may accept *fewer* parameters by simply not specifying them. If you mistype argument names or the hook name itself you get an error showing the available arguments. Initialization, command line and configuration hooks -------------------------------------------------------------------- .. currentmodule:: _pytest.hookspec .. autofunction:: pytest_cmdline_preparse .. autofunction:: pytest_cmdline_parse .. autofunction:: pytest_namespace .. autofunction:: pytest_addoption .. autofunction:: pytest_cmdline_main .. autofunction:: pytest_configure .. autofunction:: pytest_unconfigure Generic "runtest" hooks ------------------------------ All all runtest related hooks receive a :py:class:`pytest.Item` object. .. autofunction:: pytest_runtest_protocol .. autofunction:: pytest_runtest_setup .. autofunction:: pytest_runtest_call .. autofunction:: pytest_runtest_teardown .. autofunction:: pytest_runtest_makereport For deeper understanding you may look at the default implementation of these hooks in :py:mod:`_pytest.runner` and maybe also in :py:mod:`_pytest.pdb` which interacts with :py:mod:`_pytest.capture` and its input/output capturing in order to immediately drop into interactive debugging when a test failure occurs. The :py:mod:`_pytest.terminal` reported specifically uses the reporting hook to print information about a test run. Collection hooks ------------------------------ py.test calls the following hooks for collecting files and directories: .. autofunction:: pytest_ignore_collect .. autofunction:: pytest_collect_directory .. autofunction:: pytest_collect_file For influencing the collection of objects in Python modules you can use the following hook: .. autofunction:: pytest_pycollect_makeitem .. autofunction:: pytest_generate_tests After collection is complete, you can modify the order of items, delete or otherwise amend the test items: .. autofunction:: pytest_collection_modifyitems Reporting hooks ------------------------------ Session related reporting hooks: .. autofunction:: pytest_collectstart .. autofunction:: pytest_itemcollected .. autofunction:: pytest_collectreport .. autofunction:: pytest_deselected And here is the central hook for reporting about test execution: .. autofunction:: pytest_runtest_logreport Debugging/Interaction hooks -------------------------------------- There are few hooks which can be used for special reporting or interaction with exceptions: .. autofunction:: pytest_internalerror .. autofunction:: pytest_keyboard_interrupt .. autofunction:: pytest_exception_interact Reference of objects involved in hooks =========================================================== .. autoclass:: _pytest.config.Config() :members: .. autoclass:: _pytest.config.Parser() :members: .. autoclass:: _pytest.main.Node() :members: .. autoclass:: _pytest.main.Collector() :members: :show-inheritance: .. autoclass:: _pytest.main.Item() :members: :show-inheritance: .. autoclass:: _pytest.python.Module() :members: :show-inheritance: .. autoclass:: _pytest.python.Class() :members: :show-inheritance: .. autoclass:: _pytest.python.Function() :members: :show-inheritance: .. autoclass:: _pytest.runner.CallInfo() :members: .. autoclass:: _pytest.runner.TestReport() :members: pytest-2.5.1/doc/en/recwarn.txt0000664000175000017500000000223312254002202015775 0ustar hpkhpk00000000000000 Asserting deprecation and other warnings ===================================================== .. _function_argument: The recwarn function argument ------------------------------------ You can use the ``recwarn`` funcarg to assert that code triggers warnings through the Python warnings system. Here is a simple self-contained test:: # content of test_recwarn.py def test_hello(recwarn): from warnings import warn warn("hello", DeprecationWarning) w = recwarn.pop(DeprecationWarning) assert issubclass(w.category, DeprecationWarning) assert 'hello' in str(w.message) assert w.filename assert w.lineno The ``recwarn`` function argument provides these methods: * ``pop(category=None)``: return last warning matching the category. * ``clear()``: clear list of warnings .. _ensuring_function_triggers: Ensuring a function triggers a deprecation warning ------------------------------------------------------- You can also call a global helper for checking that a certain function call triggers a Deprecation warning:: import pytest def test_global(): pytest.deprecated_call(myfunction, 17) pytest-2.5.1/doc/en/naming20.txt0000664000175000017500000000131612254002202015750 0ustar hpkhpk00000000000000 .. _naming20: New pytest names in 2.0 (flat is better than nested) ---------------------------------------------------- If you used older version of the ``py`` distribution (which included the py.test command line tool and Python name space) you accessed helpers and possibly collection classes through the ``py.test`` Python namespaces. The new ``pytest`` Python module flaty provides the same objects, following these renaming rules:: py.test.XYZ -> pytest.XYZ py.test.collect.XYZ -> pytest.XYZ py.test.cmdline.main -> pytest.main The old ``py.test.*`` ways to access functionality remain valid but you are encouraged to do global renaming according to the above rules in your test code. pytest-2.5.1/doc/en/index.txt0000664000175000017500000000501412254002202015443 0ustar hpkhpk00000000000000 .. _features: .. second training: `professional testing with Python `_ , 25-27th November 2013, Leipzig. pytest: helps you write better programs ============================================= **a mature full-featured Python testing tool** - runs on Posix/Windows, Python 2.5-3.3, PyPy and Jython-2.5.1 - **zero-reported-bugs** policy with >1000 tests against itself - **strict backward compatibility policy** for safe pytest upgrades - :ref:`comprehensive online ` and `PDF documentation `_ - many :ref:`third party plugins ` and :ref:`builtin helpers `, - used in :ref:`many small and large projects and organisations ` - comes with many :ref:`tested examples ` **provides easy no-boilerplate testing** - makes it :ref:`easy to get started `, has many :ref:`usage options ` - :ref:`assert with the assert statement` - helpful :ref:`traceback and failing assertion reporting ` - :ref:`print debugging ` and :ref:`the capturing of standard output during test execution ` **scales from simple unit to complex functional testing** - :ref:`modular parametrizeable fixtures ` (new in 2.3, continously improved) - :ref:`parametrized test functions ` - :ref:`mark` - :ref:`skipping` (improved in 2.4) - :ref:`distribute tests to multiple CPUs ` through :ref:`xdist plugin ` - :ref:`continuously re-run failing tests ` - flexible :ref:`Python test discovery` **integrates with other testing methods and tools**: - multi-paradigm: pytest can run ``nose``, ``unittest`` and ``doctest`` style test suites, including running testcases made for Django and trial - supports :ref:`good integration practises ` - supports extended :ref:`xUnit style setup ` - supports domain-specific :ref:`non-python tests` - supports generating `test coverage reports `_ - supports :pep:`8` compliant coding styles in tests **extensive plugin and customization system**: - all collection, reporting, running aspects are delegated to hook functions - customizations can be per-directory, per-project or per PyPI released plugin - it is easy to add command line options or customize existing behaviour .. _`easy`: http://bruynooghe.blogspot.com/2009/12/skipping-slow-test-by-default-in-pytest.html pytest-2.5.1/doc/en/usage.txt0000664000175000017500000001265012254002202015444 0ustar hpkhpk00000000000000 .. _usage: Usage and Invocations ========================================== .. _cmdline: Calling pytest through ``python -m pytest`` ----------------------------------------------------- .. versionadded:: 2.0 If you use Python-2.5 or later you can invoke testing through the Python interpreter from the command line:: python -m pytest [...] This is equivalent to invoking the command line script ``py.test [...]`` directly. Getting help on version, option names, environment variables -------------------------------------------------------------- :: py.test --version # shows where pytest was imported from py.test --fixtures # show available builtin function arguments py.test -h | --help # show help on command line and config file options Stopping after the first (or N) failures --------------------------------------------------- To stop the testing process after the first (N) failures:: py.test -x # stop after first failure py.test --maxfail=2 # stop after two failures Specifying tests / selecting tests --------------------------------------------------- Several test run options:: py.test test_mod.py # run tests in module py.test somepath # run all tests below somepath py.test -k stringexpr # only run tests with names that match the # the "string expression", e.g. "MyClass and not method" # will select TestMyClass.test_something # but not TestMyClass.test_method_simple Import 'pkg' and use its filesystem location to find and run tests:: py.test --pyargs pkg # run all tests found below directory of pypkg Modifying Python traceback printing ---------------------------------------------- Examples for modifying traceback printing:: py.test --showlocals # show local variables in tracebacks py.test -l # show local variables (shortcut) py.test --tb=long # the default informative traceback formatting py.test --tb=native # the Python standard library formatting py.test --tb=short # a shorter traceback format py.test --tb=line # only one line per failure Dropping to PDB (Python Debugger) on failures ---------------------------------------------- .. _PDB: http://docs.python.org/library/pdb.html Python comes with a builtin Python debugger called PDB_. ``py.test`` allows one to drop into the PDB prompt via a command line option:: py.test --pdb This will invoke the Python debugger on every failure. Often you might only want to do this for the first failing test to understand a certain failure situation:: py.test -x --pdb # drop to PDB on first failure, then end test session py.test --pdb --maxfail=3 # drop to PDB for the first three failures Setting a breakpoint / aka ``set_trace()`` ---------------------------------------------------- If you want to set a breakpoint and enter the ``pdb.set_trace()`` you can use a helper:: import pytest def test_function(): ... pytest.set_trace() # invoke PDB debugger and tracing .. versionadded: 2.0.0 In previous versions you could only enter PDB tracing if you disabled capturing on the command line via ``py.test -s``. .. _durations: Profiling test execution duration ------------------------------------- .. versionadded: 2.2 To get a list of the slowest 10 test durations:: py.test --durations=10 Creating JUnitXML format files ---------------------------------------------------- To create result files which can be read by Hudson_ or other Continuous integration servers, use this invocation:: py.test --junitxml=path to create an XML file at ``path``. Creating resultlog format files ---------------------------------------------------- To create plain-text machine-readable result files you can issue:: py.test --resultlog=path and look at the content at the ``path`` location. Such files are used e.g. by the `PyPy-test`_ web page to show test results over several revisions. .. _`PyPy-test`: http://buildbot.pypy.org/summary Sending test report to online pastebin service ----------------------------------------------------- **Creating a URL for each test failure**:: py.test --pastebin=failed This will submit test run information to a remote Paste service and provide a URL for each failure. You may select tests as usual or add for example ``-x`` if you only want to send one particular failure. **Creating a URL for a whole test session log**:: py.test --pastebin=all Currently only pasting to the http://bpaste.net service is implemented. .. _`pytest.main-usage`: Calling pytest from Python code ---------------------------------------------------- .. versionadded:: 2.0 You can invoke ``py.test`` from Python code directly:: pytest.main() this acts as if you would call "py.test" from the command line. It will not raise ``SystemExit`` but return the exitcode instead. You can pass in options and arguments:: pytest.main(['-x', 'mytestdir']) or pass in a string:: pytest.main("-x mytestdir") You can specify additional plugins to ``pytest.main``:: # content of myinvoke.py import pytest class MyPlugin: def pytest_sessionfinish(self): print("*** test run reporting finishing") pytest.main("-qq", plugins=[MyPlugin()]) Running it will show that ``MyPlugin`` was added and its hook was invoked:: $ python myinvoke.py *** test run reporting finishing .. include:: links.inc pytest-2.5.1/doc/en/goodpractises.txt0000664000175000017500000002222012254002202017200 0ustar hpkhpk00000000000000 .. highlightlang:: python .. _`goodpractises`: Good Integration Practises ================================================= Work with virtual environments ----------------------------------------------------------- We recommend to use virtualenv_ environments and use pip_ (or easy_install_) for installing your application and any dependencies as well as the ``pytest`` package itself. This way you will get an isolated and reproducible environment. Given you have installed virtualenv_ and execute it from the command line, here is an example session for unix or windows:: virtualenv . # create a virtualenv directory in the current directory source bin/activate # on unix scripts/activate # on Windows We can now install pytest:: pip install pytest Due to the ``activate`` step above the ``pip`` will come from the virtualenv directory and install any package into the isolated virtual environment. Choosing a test layout / import rules ------------------------------------------ py.test supports two common test layouts: * putting tests into an extra directory outside your actual application code, useful if you have many functional tests or for other reasons want to keep tests separate from actual application code (often a good idea):: setup.py # your distutils/setuptools Python package metadata mypkg/ __init__.py appmodule.py tests/ test_app.py ... * inlining test directories into your application package, useful if you have direct relation between (unit-)test and application modules and want to distribute your tests along with your application:: setup.py # your distutils/setuptools Python package metadata mypkg/ __init__.py appmodule.py ... test/ test_app.py ... Important notes relating to both schemes: - **make sure that "mypkg" is importable**, for example by typing once:: pip install -e . # install package using setup.py in editable mode - **avoid "__init__.py" files in your test directories**. This way your tests can run easily against an installed version of ``mypkg``, independently from if the installed package contains the tests or not. - With inlined tests you might put ``__init__.py`` into test directories and make them installable as part of your application. Using the ``py.test --pyargs mypkg`` invocation pytest will discover where mypkg is installed and collect tests from there. With the "external" test you can still distribute tests but they will not be installed or become importable. Typically you can run tests by pointing to test directories or modules:: py.test tests/test_app.py # for external test dirs py.test mypkg/test/test_app.py # for inlined test dirs py.test mypkg # run tests in all below test directories py.test # run all tests below current dir ... Because of the above ``editable install`` mode you can change your source code (both tests and the app) and rerun tests at will. Once you are done with your work, you can `use tox`_ to make sure that the package is really correct and tests pass in all required configurations. .. note:: You can use Python3 namespace packages (PEP420) for your application but pytest will still perform `test package name`_ discovery based on the presence of ``__init__.py`` files. If you use one of the two recommended file system layouts above but leave away the ``__init__.py`` files from your directories it should just work on Python3.3 and above. From "inlined tests", however, you will need to use absolute imports for getting at your application code. .. _`test package name`: .. note:: If py.test finds a "a/b/test_module.py" test file while recursing into the filesystem it determines the import name as follows: * determine ``basedir``: this is the first "upward" (towards the root) directory not containing an ``__init__.py``. If e.g. both ``a`` and ``b`` contain an ``__init__.py`` file then the parent directory of ``a`` will become the ``basedir``. * perform ``sys.path.insert(0, basedir)`` to make the test module importable under the fully qualified import name. * ``import a.b.test_module`` where the path is determined by converting path separators ``/`` into "." characters. This means you must follow the convention of having directory and file names map directly to the import names. The reason for this somewhat evolved importing technique is that in larger projects multiple test modules might import from each other and thus deriving a canonical import name helps to avoid surprises such as a test modules getting imported twice. .. _`virtualenv`: http://pypi.python.org/pypi/virtualenv .. _`buildout`: http://www.buildout.org/ .. _pip: http://pypi.python.org/pypi/pip .. _`use tox`: Use tox and Continuous Integration servers ------------------------------------------------- If you frequently release code and want to make sure that your actual package passes all tests you may want to look into `tox`_, the virtualenv test automation tool and its `pytest support `_. Tox helps you to setup virtualenv environments with pre-defined dependencies and then executing a pre-configured test command with options. It will run tests against the installed package and not against your source code checkout, helping to detect packaging glitches. If you want to use Jenkins_ you can use the ``--junitxml=PATH`` option to create a JUnitXML file that Jenkins_ can pick up and generate reports. .. _standalone: .. _`genscript method`: Create a py.test standalone script ------------------------------------------- If you are a maintainer or application developer and want people who don't deal with python much to easily run tests you may generate a standalone "py.test" script:: py.test --genscript=runtests.py This generates a ``runtests.py`` script which is a fully functional basic ``py.test`` script, running unchanged under Python2 and Python3. You can tell people to download the script and then e.g. run it like this:: python runtests.py Integrating with distutils / ``python setup.py test`` -------------------------------------------------------- You can integrate test runs into your distutils or setuptools based project. Use the `genscript method`_ to generate a standalone py.test script:: py.test --genscript=runtests.py and make this script part of your distribution and then add this to your ``setup.py`` file:: from distutils.core import setup, Command # you can also import from setuptools class PyTest(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): import sys,subprocess errno = subprocess.call([sys.executable, 'runtests.py']) raise SystemExit(errno) setup( #..., cmdclass = {'test': PyTest}, #..., ) If you now type:: python setup.py test this will execute your tests using ``runtests.py``. As this is a standalone version of ``py.test`` no prior installation whatsoever is required for calling the test command. You can also pass additional arguments to the subprocess-calls such as your test directory or other options. Integration with setuptools test commands ---------------------------------------------------- Setuptools supports writing our own Test command for invoking pytest. Most often it is better to use tox_ instead, but here is how you can get started with setuptools integration:: from setuptools.command.test import test as TestCommand import sys class PyTest(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): #import here, cause outside the eggs aren't loaded import pytest errno = pytest.main(self.test_args) sys.exit(errno) setup( #..., tests_require=['pytest'], cmdclass = {'test': PyTest}, ) Now if you run:: python setup.py test this will download py.test if needed and then run py.test as you would expect it to. .. _`test discovery`: .. _`Python test discovery`: Conventions for Python test discovery ------------------------------------------------- ``py.test`` implements the following standard test discovery: * collection starts from the initial command line arguments which may be directories, filenames or test ids. * recurse into directories, unless they match :confval:`norecursedirs` * ``test_*.py`` or ``*_test.py`` files, imported by their `package name`_. * ``Test`` prefixed test classes (without an ``__init__`` method) * ``test_`` prefixed test functions or methods are test items For examples of how to customize your test discovery :doc:`example/pythoncollection`. Within Python modules, py.test also discovers tests using the standard :ref:`unittest.TestCase ` subclassing technique. .. include:: links.inc pytest-2.5.1/doc/en/test/0000775000175000017500000000000012254002202014552 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/test/attic.txt0000664000175000017500000001011512254002202016415 0ustar hpkhpk00000000000000=============================================== ATTIC documentation =============================================== XXX REVIEW and remove the below XXX Customizing the testing process =============================== writing conftest.py files ----------------------------------- You may put conftest.py files containing project-specific configuration in your project's root directory, it's usually best to put it just into the same directory level as your topmost ``__init__.py``. In fact, ``py.test`` performs an "upwards" search starting from the directory that you specify to be tested and will lookup configuration values right-to-left. You may have options that reside e.g. in your home directory but note that project specific settings will be considered first. There is a flag that helps you debugging your conftest.py configurations:: py.test --traceconfig customizing the collecting and running process ----------------------------------------------- To introduce different test items you can create one or more ``conftest.py`` files in your project. When the collection process traverses directories and modules the default collectors will produce custom Collectors and Items if they are found in a local ``conftest.py`` file. Customizing the collection process in a module ---------------------------------------------- If you have a module where you want to take responsibility for collecting your own test Items and possibly even for executing a test then you can provide `generative tests`_ that yield callables and possibly arguments as a tuple. This is especially useful for calling application test machinery with different parameter sets but counting each of the calls as a separate tests. .. _`generative tests`: features.html#generative-tests The other extension possibility is about specifying a custom test ``Item`` class which is responsible for setting up and executing an underlying test. Or you can extend the collection process for a whole directory tree by putting Items in a ``conftest.py`` configuration file. The collection process dynamically consults the *chain of conftest.py* modules to determine collectors and items at ``Directory``, ``Module``, ``Class``, ``Function`` or ``Generator`` level respectively. Customizing execution of Items and Functions ---------------------------------------------------- - ``pytest.Function`` test items control execution of a test function through its ``function.runtest()`` method. This method is responsible for performing setup and teardown ("Test Fixtures") for a test Function. - ``Function.execute(target, *args)`` methods are invoked by the default ``Function.run()`` to actually execute a python function with the given (usually empty set of) arguments. .. _`py-dev mailing list`: http://codespeak.net/mailman/listinfo/py-dev .. _`test generators`: funcargs.html#test-generators .. _`generative tests`: generative tests: yielding parametrized tests ==================================================== Deprecated since 1.0 in favour of `test generators`_. *Generative tests* are test methods that are *generator functions* which ``yield`` callables and their arguments. This is useful for running a test function multiple times against different parameters. Example:: def test_generative(): for x in (42,17,49): yield check, x def check(arg): assert arg % 7 == 0 # second generated tests fails! Note that ``test_generative()`` will cause three tests to get run, notably ``check(42)``, ``check(17)`` and ``check(49)`` of which the middle one will obviously fail. To make it easier to distinguish the generated tests it is possible to specify an explicit name for them, like for example:: def test_generative(): for x in (42,17,49): yield "case %d" % x, check, x disabling a test class ---------------------- If you want to disable a complete test class you can set the class-level attribute ``disabled``. For example, in order to avoid running some tests on Win32:: class TestPosixOnly: disabled = sys.platform == 'win32' def test_xxx(self): ... pytest-2.5.1/doc/en/test/index.txt0000664000175000017500000000167512254002202016433 0ustar hpkhpk00000000000000======================================= py.test documentation index ======================================= features_: overview and discussion of features. quickstart_: getting started with writing a simple test. `talks, tutorials, examples`_: tutorial examples, slides funcargs_: powerful parametrized test function setup `plugins`_: list of available plugins with usage examples and feature details. customize_: configuration, customization, extensions changelog_: history of changes covering last releases **Continuous Integration of py.test's own tests and plugins with Hudson**: `http://hudson.testrun.org/view/pytest`_ .. _`http://hudson.testrun.org/view/pytest`: http://hudson.testrun.org/view/pytest/ .. _changelog: ../changelog.html .. _`plugins`: plugin/index.html .. _`talks, tutorials, examples`: talks.html .. _quickstart: quickstart.html .. _features: features.html .. _funcargs: funcargs.html .. _customize: customize.html pytest-2.5.1/doc/en/test/plugin/0000775000175000017500000000000012254002202016050 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/test/plugin/terminal.txt0000664000175000017500000000206612254002202020430 0ustar hpkhpk00000000000000 Implements terminal reporting of the full testing process. ========================================================== .. contents:: :local: This is a good source for looking at the various reporting hooks. command line options -------------------- ``-v, --verbose`` increase verbosity. ``-r chars`` show extra test summary info as specified by chars (f)ailed, (s)skipped, (x)failed, (X)passed. ``-l, --showlocals`` show locals in tracebacks (disabled by default). ``--report=opts`` (deprecated, use -r) ``--tb=style`` traceback print mode (long/short/line/no). ``--fulltrace`` don't cut any tracebacks (default is to cut). ``--fixtures`` show available function arguments, sorted by plugin Start improving this plugin in 30 seconds ========================================= 1. Download `pytest_terminal.py`_ plugin source code 2. put it somewhere as ``pytest_terminal.py`` into your import path 3. a subsequent ``py.test`` run will use your local version Checkout customize_, other plugins_ or `get in contact`_. .. include:: links.txt pytest-2.5.1/doc/en/test/plugin/helpconfig.txt0000664000175000017500000000165112254002202020732 0ustar hpkhpk00000000000000 provide version info, conftest/environment config names. ======================================================== .. contents:: :local: command line options -------------------- ``--version`` display py lib version and import information. ``-p name`` early-load given plugin (multi-allowed). ``--traceconfig`` trace considerations of conftest.py files. ``--nomagic`` don't reinterpret asserts, no traceback cutting. ``--debug`` generate and show internal debugging information. ``--help-config`` show available conftest.py and ENV-variable names. Start improving this plugin in 30 seconds ========================================= 1. Download `pytest_helpconfig.py`_ plugin source code 2. put it somewhere as ``pytest_helpconfig.py`` into your import path 3. a subsequent ``py.test`` run will use your local version Checkout customize_, other plugins_ or `get in contact`_. .. include:: links.txt pytest-2.5.1/doc/en/test/plugin/genscript.txt0000664000175000017500000000122512254002202020607 0ustar hpkhpk00000000000000 generate standalone test script to be distributed along with an application. ============================================================================ .. contents:: :local: command line options -------------------- ``--genscript=path`` create standalone py.test script at given target path. Start improving this plugin in 30 seconds ========================================= 1. Download `pytest_genscript.py`_ plugin source code 2. put it somewhere as ``pytest_genscript.py`` into your import path 3. a subsequent ``py.test`` run will use your local version Checkout customize_, other plugins_ or `get in contact`_. .. include:: links.txt pytest-2.5.1/doc/en/test/plugin/index.txt0000664000175000017500000000410612254002202017721 0ustar hpkhpk00000000000000 advanced python testing ======================= skipping_ advanced skipping for python test functions, classes or modules. mark_ generic mechanism for marking python functions. pdb_ interactive debugging with the Python Debugger. figleaf_ (external) report test coverage using the 'figleaf' package. monkeypatch_ safely patch object attributes, dicts and environment variables. coverage_ (external) Write and report coverage data with the 'coverage' package. cov_ (external) produce code coverage reports using the 'coverage' package, including support for distributed testing. capture_ configurable per-test stdout/stderr capturing mechanisms. capturelog_ (external) capture output of logging module. recwarn_ helpers for asserting deprecation and other warnings. tmpdir_ provide temporary directories to test functions. distributed testing, CI and deployment ====================================== xdist_ (external) loop on failing tests, distribute test runs to CPUs and hosts. pastebin_ submit failure or test session information to a pastebin service. junitxml_ logging of test results in JUnit-XML format, for use with Hudson resultlog_ non-xml machine-readable logging of test results. genscript_ generate standalone test script to be distributed along with an application. testing domains and conventions codecheckers ============================================ oejskit_ (external) run javascript tests in real life browsers django_ (external) for testing django applications unittest_ automatically discover and run traditional "unittest.py" style tests. nose_ nose-compatibility plugin: allow to run nose test suites natively. doctest_ collect and execute doctests from modules and test files. restdoc_ perform ReST syntax, local and remote reference tests on .rst/.txt files. internal, debugging, help functionality ======================================= helpconfig_ provide version info, conftest/environment config names. terminal_ Implements terminal reporting of the full testing process. hooklog_ log invocations of extension hooks to a file. .. include:: links.txt pytest-2.5.1/doc/en/test/plugin/figleaf.txt0000664000175000017500000000165212254002202020212 0ustar hpkhpk00000000000000 report test coverage using the 'figleaf' package. ================================================= .. contents:: :local: Install --------------- To install the plugin issue:: easy_install pytest-figleaf # or pip install pytest-figleaf and if you are using pip you can also uninstall:: pip uninstall pytest-figleaf Usage --------------- After installation you can simply type:: py.test --figleaf [...] to enable figleaf coverage in your test run. A default ".figleaf" data file and "html" directory will be created. You can use command line options to control where data and html files are created. command line options -------------------- ``--figleaf`` trace python coverage with figleaf and write HTML for files below the current working dir ``--fig-data=dir`` set tracing file, default: ".figleaf". ``--fig-html=dir`` set html reporting dir, default "html". .. include:: links.txt pytest-2.5.1/doc/en/test/plugin/nose.txt0000664000175000017500000000255112254002202017560 0ustar hpkhpk00000000000000 nose-compatibility plugin: allow to run nose test suites natively. ================================================================== .. contents:: :local: This is an experimental plugin for allowing to run tests written in 'nosetests style with py.test. Usage ------------- type:: py.test # instead of 'nosetests' and you should be able to run nose style tests and at the same time can make full use of py.test's capabilities. Supported nose Idioms ---------------------- * setup and teardown at module/class/method level * SkipTest exceptions and markers * setup/teardown decorators * yield-based tests and their setup * general usage of nose utilities Unsupported idioms / issues ---------------------------------- - nose-style doctests are not collected and executed correctly, also fixtures don't work. - no nose-configuration is recognized If you find other issues or have suggestions please run:: py.test --pastebin=all and send the resulting URL to a py.test contact channel, at best to the mailing list. Start improving this plugin in 30 seconds ========================================= 1. Download `pytest_nose.py`_ plugin source code 2. put it somewhere as ``pytest_nose.py`` into your import path 3. a subsequent ``py.test`` run will use your local version Checkout customize_, other plugins_ or `get in contact`_. .. include:: links.txt pytest-2.5.1/doc/en/test/plugin/xdist.txt0000664000175000017500000001312412254002202017745 0ustar hpkhpk00000000000000 loop on failing tests, distribute test runs to CPUs and hosts. ============================================================== .. contents:: :local: The `pytest-xdist`_ plugin extends py.test with some unique test execution modes: * Looponfail: run your tests repeatedly in a subprocess. After each run py.test waits until a file in your project changes and then re-runs the previously failing tests. This is repeated until all tests pass after which again a full run is performed. * Load-balancing: if you have multiple CPUs or hosts you can use those for a combined test run. This allows to speed up development or to use special resources of remote machines. * Multi-Platform coverage: you can specify different Python interpreters or different platforms and run tests in parallel on all of them. Before running tests remotely, ``py.test`` efficiently synchronizes your program source code to the remote place. All test results are reported back and displayed to your local test session. You may specify different Python versions and interpreters. .. _`pytest-xdist`: http://pypi.python.org/pypi/pytest-xdist Usage examples --------------------- Speed up test runs by sending tests to multiple CPUs +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ To send tests to multiple CPUs, type:: py.test -n NUM Especially for longer running tests or tests requiring a lot of IO this can lead to considerable speed ups. Running tests in a Python subprocess +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ To instantiate a python2.4 sub process and send tests to it, you may type:: py.test -d --tx popen//python=python2.4 This will start a subprocess which is run with the "python2.4" Python interpreter, found in your system binary lookup path. If you prefix the --tx option value like this:: --tx 3*popen//python=python2.4 then three subprocesses would be created and tests will be load-balanced across these three processes. Sending tests to remote SSH accounts +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Suppose you have a package ``mypkg`` which contains some tests that you can successfully run locally. And you have a ssh-reachable machine ``myhost``. Then you can ad-hoc distribute your tests by typing:: py.test -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg This will synchronize your ``mypkg`` package directory to an remote ssh account and then locally collect tests and send them to remote places for execution. You can specify multiple ``--rsyncdir`` directories to be sent to the remote side. **NOTE:** For py.test to collect and send tests correctly you not only need to make sure all code and tests directories are rsynced, but that any test (sub) directory also has an ``__init__.py`` file because internally py.test references tests as a fully qualified python module path. **You will otherwise get strange errors** during setup of the remote side. Sending tests to remote Socket Servers +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Download the single-module `socketserver.py`_ Python program and run it like this:: python socketserver.py It will tell you that it starts listening on the default port. You can now on your home machine specify this new socket host with something like this:: py.test -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg .. _`atonce`: Running tests on many platforms at once +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ The basic command to run tests on multiple platforms is:: py.test --dist=each --tx=spec1 --tx=spec2 If you specify a windows host, an OSX host and a Linux environment this command will send each tests to all platforms - and report back failures from all platforms at once. The specifications strings use the `xspec syntax`_. .. _`xspec syntax`: http://codespeak.net/execnet/trunk/basics.html#xspec .. _`socketserver.py`: http://codespeak.net/svn/py/dist/py/execnet/script/socketserver.py .. _`execnet`: http://codespeak.net/execnet Specifying test exec environments in a conftest.py +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Instead of specifying command line options, you can put options values in a ``conftest.py`` file like this:: option_tx = ['ssh=myhost//python=python2.5', 'popen//python=python2.5'] option_dist = True Any commandline ``--tx`` specifications will add to the list of available execution environments. Specifying "rsync" dirs in a conftest.py +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ In your ``mypkg/conftest.py`` you may specify directories to synchronise or to exclude:: rsyncdirs = ['.', '../plugins'] rsyncignore = ['_cache'] These directory specifications are relative to the directory where the ``conftest.py`` is found. command line options -------------------- ``-f, --looponfail`` run tests in subprocess, wait for modified files and re-run failing test set until all pass. ``-n numprocesses`` shortcut for '--dist=load --tx=NUM*popen' ``--boxed`` box each test run in a separate process (unix) ``--dist=distmode`` set mode for distributing tests to exec environments. each: send each test to each available environment. load: send each test to available environment. (default) no: run tests inprocess, don't distribute. ``--tx=xspec`` add a test execution environment. some examples: --tx popen//python=python2.5 --tx socket=192.168.1.102:8888 --tx ssh=user@codespeak.net//chdir=testcache ``-d`` load-balance tests. shortcut for '--dist=load' ``--rsyncdir=dir1`` add directory for rsyncing to remote tx nodes. .. include:: links.txt pytest-2.5.1/doc/en/test/plugin/links.txt0000664000175000017500000000520412254002202017732 0ustar hpkhpk00000000000000.. _`helpconfig`: helpconfig.html .. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_recwarn.py .. _`unittest`: unittest.html .. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_monkeypatch.py .. _`pytest_genscript.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_genscript.py .. _`pastebin`: pastebin.html .. _`skipping`: skipping.html .. _`genscript`: genscript.html .. _`plugins`: index.html .. _`mark`: mark.html .. _`tmpdir`: tmpdir.html .. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_doctest.py .. _`capture`: capture.html .. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_nose.py .. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_restdoc.py .. _`restdoc`: restdoc.html .. _`xdist`: xdist.html .. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_pastebin.py .. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_tmpdir.py .. _`terminal`: terminal.html .. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_hooklog.py .. _`capturelog`: capturelog.html .. _`junitxml`: junitxml.html .. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_skipping.py .. _`checkout the py.test development version`: ../../install.html#checkout .. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_helpconfig.py .. _`oejskit`: oejskit.html .. _`doctest`: doctest.html .. _`pytest_mark.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_mark.py .. _`get in contact`: ../../contact.html .. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_capture.py .. _`figleaf`: figleaf.html .. _`customize`: ../customize.html .. _`hooklog`: hooklog.html .. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_terminal.py .. _`recwarn`: recwarn.html .. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_pdb.py .. _`monkeypatch`: monkeypatch.html .. _`coverage`: coverage.html .. _`resultlog`: resultlog.html .. _`cov`: cov.html .. _`pytest_junitxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_junitxml.py .. _`django`: django.html .. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_unittest.py .. _`nose`: nose.html .. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_resultlog.py .. _`pdb`: pdb.html pytest-2.5.1/doc/en/test/plugin/django.txt0000664000175000017500000000050312254002202020051 0ustar hpkhpk00000000000000pytest_django plugin (EXTERNAL) ========================================== pytest_django is a plugin for py.test that provides a set of useful tools for testing Django applications, checkout Ben Firshman's `pytest_django github page`_. .. _`pytest_django github page`: http://github.com/bfirsh/pytest_django/tree/master pytest-2.5.1/doc/en/test/plugin/oejskit.txt0000664000175000017500000000153412254002202020264 0ustar hpkhpk00000000000000pytest_oejskit plugin (EXTERNAL) ========================================== The `oejskit`_ offers a py.test plugin for running Javascript tests in life browsers. Running inside the browsers comes with some speed cost, on the other hand it means for example the code is tested against the real-word DOM implementations. The approach enables to write integration tests such that the JavaScript code is tested against server-side Python code mocked as necessary. Any server-side framework that can already be exposed through WSGI (or for which a subset of WSGI can be written to accommodate the jskit own needs) can play along. For more info and download please visit the `oejskit PyPI`_ page. .. _`oejskit`: .. _`oejskit PyPI`: http://pypi.python.org/pypi/oejskit .. source link 'http://bitbucket.org/pedronis/js-infrastructure/src/tip/pytest_jstests.py', pytest-2.5.1/doc/en/test/plugin/cov.txt0000664000175000017500000001726512254002202017413 0ustar hpkhpk00000000000000 produce code coverage reports using the 'coverage' package, including support for distributed testing. ====================================================================================================== .. contents:: :local: This plugin produces coverage reports. It supports centralised testing and distributed testing in both load and each modes. It also supports coverage of subprocesses. All features offered by the coverage package should be available, either through pytest-cov or through coverage's config file. Installation ------------ The `pytest-cov`_ package may be installed with pip or easy_install:: pip install pytest-cov easy_install pytest-cov .. _`pytest-cov`: http://pypi.python.org/pypi/pytest-cov/ Uninstallation -------------- Uninstalling packages is supported by pip:: pip uninstall pytest-cov However easy_install does not provide an uninstall facility. .. IMPORTANT:: Ensure that you manually delete the init_covmain.pth file in your site-packages directory. This file starts coverage collection of subprocesses if appropriate during site initialization at python startup. Usage ----- Centralised Testing ~~~~~~~~~~~~~~~~~~~ Centralised testing will report on the combined coverage of the main process and all of it's subprocesses. Running centralised testing:: py.test --cov myproj tests/ Shows a terminal report:: -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- Name Stmts Miss Cover ---------------------------------------- myproj/__init__ 2 0 100% myproj/myproj 257 13 94% myproj/feature4286 94 7 92% ---------------------------------------- TOTAL 353 20 94% Distributed Testing: Load ~~~~~~~~~~~~~~~~~~~~~~~~~ Distributed testing with dist mode set to load will report on the combined coverage of all slaves. The slaves may be spread out over any number of hosts and each slave may be located anywhere on the file system. Each slave will have it's subprocesses measured. Running distributed testing with dist mode set to load:: py.test --cov myproj -n 2 tests/ Shows a terminal report:: -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- Name Stmts Miss Cover ---------------------------------------- myproj/__init__ 2 0 100% myproj/myproj 257 13 94% myproj/feature4286 94 7 92% ---------------------------------------- TOTAL 353 20 94% Again but spread over different hosts and different directories:: py.test --cov myproj --dist load --tx ssh=memedough@host1//chdir=testenv1 --tx ssh=memedough@host2//chdir=/tmp/testenv2//python=/tmp/env1/bin/python --rsyncdir myproj --rsyncdir tests --rsync examples tests/ Shows a terminal report:: -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- Name Stmts Miss Cover ---------------------------------------- myproj/__init__ 2 0 100% myproj/myproj 257 13 94% myproj/feature4286 94 7 92% ---------------------------------------- TOTAL 353 20 94% Distributed Testing: Each ~~~~~~~~~~~~~~~~~~~~~~~~~ Distributed testing with dist mode set to each will report on the combined coverage of all slaves. Since each slave is running all tests this allows generating a combined coverage report for multiple environments. Running distributed testing with dist mode set to each:: py.test --cov myproj --dist each --tx popen//chdir=/tmp/testenv3//python=/usr/local/python27/bin/python --tx ssh=memedough@host2//chdir=/tmp/testenv4//python=/tmp/env2/bin/python --rsyncdir myproj --rsyncdir tests --rsync examples tests/ Shows a terminal report:: ---------------------------------------- coverage ---------------------------------------- platform linux2, python 2.6.5-final-0 platform linux2, python 2.7.0-final-0 Name Stmts Miss Cover ---------------------------------------- myproj/__init__ 2 0 100% myproj/myproj 257 13 94% myproj/feature4286 94 7 92% ---------------------------------------- TOTAL 353 20 94% Reporting --------- It is possible to generate any combination of the reports for a single test run. The available reports are terminal (with or without missing line numbers shown), HTML, XML and annotated source code. The terminal report without line numbers (default):: py.test --cov-report term --cov myproj tests/ -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- Name Stmts Miss Cover ---------------------------------------- myproj/__init__ 2 0 100% myproj/myproj 257 13 94% myproj/feature4286 94 7 92% ---------------------------------------- TOTAL 353 20 94% The terminal report with line numbers:: py.test --cov-report term-missing --cov myproj tests/ -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- Name Stmts Miss Cover Missing -------------------------------------------------- myproj/__init__ 2 0 100% myproj/myproj 257 13 94% 24-26, 99, 149, 233-236, 297-298, 369-370 myproj/feature4286 94 7 92% 183-188, 197 -------------------------------------------------- TOTAL 353 20 94% The remaining three reports output to files without showing anything on the terminal (useful for when the output is going to a continuous integration server):: py.test --cov-report html --cov-report xml --cov-report annotate --cov myproj tests/ Coverage Data File ------------------ The data file is erased at the beginning of testing to ensure clean data for each test run. The data file is left at the end of testing so that it is possible to use normal coverage tools to examine it. Limitations ----------- For distributed testing the slaves must have the pytest-cov package installed. This is needed since the plugin must be registered through setuptools / distribute for pytest to start the plugin on the slave. For subprocess measurement environment variables must make it from the main process to the subprocess. The python used by the subprocess must have pytest-cov installed. The subprocess must do normal site initialization so that the environment variables can be detected and coverage started. Acknowledgments ---------------- Holger Krekel for pytest with its distributed testing support. Ned Batchelder for coverage and its ability to combine the coverage results of parallel runs. Whilst this plugin has been built fresh from the ground up to support distributed testing it has been influenced by the work done on pytest-coverage (Ross Lawley, James Mills, Holger Krekel) and nose-cover (Jason Pellerin) which are other coverage plugins for pytest and nose respectively. No doubt others have contributed to these tools as well. command line options -------------------- ``--cov=path`` measure coverage for filesystem path (multi-allowed) ``--cov-report=type`` type of report to generate: term, term-missing, annotate, html, xml (multi-allowed) ``--cov-config=path`` config file for coverage, default: .coveragerc .. include:: links.txt pytest-2.5.1/doc/en/test/plugin/coverage.txt0000664000175000017500000000245412254002202020411 0ustar hpkhpk00000000000000 Write and report coverage data with the 'coverage' package. =========================================================== .. contents:: :local: Note: Original code by Ross Lawley. Install -------------- Use pip to (un)install:: pip install pytest-coverage pip uninstall pytest-coverage or alternatively use easy_install to install:: easy_install pytest-coverage Usage ------------- To get full test coverage reports for a particular package type:: py.test --cover-report=report command line options -------------------- ``--cover=COVERPACKAGES`` (multi allowed) only include info from specified package. ``--cover-report=REPORT_TYPE`` html: Directory for html output. report: Output a text report. annotate: Annotate your source code for which lines were executed and which were not. xml: Output an xml report compatible with the cobertura plugin for hudson. ``--cover-directory=DIRECTORY`` Directory for the reports (html / annotate results) defaults to ./coverage ``--cover-xml-file=XML_FILE`` File for the xml report defaults to ./coverage.xml ``--cover-show-missing`` Show missing files ``--cover-ignore-errors=IGNORE_ERRORS`` Ignore errors of finding source files for code. .. include:: links.txt pytest-2.5.1/doc/en/test/test.html0000664000175000017500000000103112254002202016412 0ustar hpkhpk00000000000000 pytest-2.5.1/doc/en/test/extend.html0000664000175000017500000000103512254002202016726 0ustar hpkhpk00000000000000 pytest-2.5.1/doc/en/test/mission.txt0000664000175000017500000000076112254002202017000 0ustar hpkhpk00000000000000 Mission ==================================== py.test strives to make testing a fun and no-boilerplate effort. The tool is distributed as part of the `py` package which contains supporting APIs that are also usable independently. The project independent ``py.test`` command line tool helps you to: * rapidly collect and run tests * run unit- or doctests, functional or integration tests * distribute tests to multiple environments * use local or global plugins for custom test types and setup pytest-2.5.1/doc/en/test/config.html0000664000175000017500000000103512254002202016704 0ustar hpkhpk00000000000000 pytest-2.5.1/doc/en/test/dist.html0000664000175000017500000000104012254002202016376 0ustar hpkhpk00000000000000 pytest-2.5.1/doc/en/overview.txt0000664000175000017500000000037612254002202016210 0ustar hpkhpk00000000000000================================================== Getting started basics ================================================== .. toctree:: :maxdepth: 2 index.txt getting-started.txt usage.txt goodpractises.txt projects.txt faq.txt pytest-2.5.1/doc/en/nose.txt0000664000175000017500000000367512254002202015313 0ustar hpkhpk00000000000000Running tests written for nose ======================================= .. include:: links.inc py.test has basic support for running tests written for nose_. .. _nosestyle: Usage ------------- After :ref:`installation` type:: python setup.py develop # make sure tests can import our package py.test # instead of 'nosetests' and you should be able to run your nose style tests and make use of py.test's capabilities. Supported nose Idioms ---------------------- * setup and teardown at module/class/method level * SkipTest exceptions and markers * setup/teardown decorators * yield-based tests and their setup * general usage of nose utilities Unsupported idioms / known issues ---------------------------------- - unittest-style ``setUp, tearDown, setUpClass, tearDownClass`` are recognized only on ``unittest.TestCase`` classes but not on plain classes. ``nose`` supports these methods also on plain classes but pytest deliberately does not. As nose and pytest already both support ``setup_class, teardown_class, setup_method, teardown_method`` it doesn't seem useful to duplicate the unittest-API like nose does. If you however rather think pytest should support the unittest-spelling on plain classes please post `to this issue `_. - nose imports test modules with the same import path (e.g. ``tests.test_mod``) but different file system paths (e.g. ``tests/test_mode.py`` and ``other/tests/test_mode.py``) by extending sys.path/import semantics. pytest does not do that but there is discussion in `issue268 `_ for adding some support. Note that `nose2 choose to avoid this sys.path/import hackery `_. - nose-style doctests are not collected and executed correctly, also doctest fixtures don't work. - no nose-configuration is recognized pytest-2.5.1/doc/en/getting-started.txt0000664000175000017500000001663612254002202017455 0ustar hpkhpk00000000000000Installation and Getting Started =================================== **Pythons**: Python 2.5-3.3, Jython, PyPy **Platforms**: Unix/Posix and Windows **PyPI package name**: `pytest `_ **documentation as PDF**: `download latest `_ .. _`getstarted`: .. _installation: Installation ---------------------------------------- Installation options:: pip install -U pytest # or easy_install -U pytest To check your installation has installed the correct version:: $ py.test --version This is py.test version 2.5.1, imported from /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/pytest.pyc If you get an error checkout :ref:`installation issues`. .. _`simpletest`: Our first test run ---------------------------------------------------------- Let's create a first test file with a simple test function:: # content of test_sample.py def func(x): return x + 1 def test_answer(): assert func(3) == 5 That's it. You can execute the test function now:: $ py.test =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 1 items test_sample.py F ================================= FAILURES ================================= _______________________________ test_answer ________________________________ def test_answer(): > assert func(3) == 5 E assert 4 == 5 E + where 4 = func(3) test_sample.py:5: AssertionError ========================= 1 failed in 0.01 seconds ========================= py.test found the ``test_answer`` function by following :ref:`standard test discovery rules `, basically detecting the ``test_`` prefixes. We got a failure report because our little ``func(3)`` call did not return ``5``. .. note:: You can simply use the ``assert`` statement for asserting test expectations. pytest's :ref:`assert introspection` will intelligently report intermediate values of the assert expression freeing you from the need to learn the many names of `JUnit legacy methods`_. .. _`JUnit legacy methods`: http://docs.python.org/library/unittest.html#test-cases .. _`assert statement`: http://docs.python.org/reference/simple_stmts.html#the-assert-statement Asserting that a certain exception is raised -------------------------------------------------------------- If you want to assert that some code raises an exception you can use the ``raises`` helper:: # content of test_sysexit.py import pytest def f(): raise SystemExit(1) def test_mytest(): with pytest.raises(SystemExit): f() Running it with, this time in "quiet" reporting mode:: $ py.test -q test_sysexit.py . 1 passed in 0.00 seconds .. todo:: For further ways to assert exceptions see the `raises` Grouping multiple tests in a class -------------------------------------------------------------- Once you start to have more than a few tests it often makes sense to group tests logically, in classes and modules. Let's write a class containing two tests:: # content of test_class.py class TestClass: def test_one(self): x = "this" assert 'h' in x def test_two(self): x = "hello" assert hasattr(x, 'check') The two tests are found because of the standard :ref:`test discovery`. There is no need to subclass anything. We can simply run the module by passing its filename:: $ py.test -q test_class.py .F ================================= FAILURES ================================= ____________________________ TestClass.test_two ____________________________ self = def test_two(self): x = "hello" > assert hasattr(x, 'check') E assert hasattr('hello', 'check') test_class.py:8: AssertionError 1 failed, 1 passed in 0.01 seconds The first test passed, the second failed. Again we can easily see the intermediate values used in the assertion, helping us to understand the reason for the failure. Going functional: requesting a unique temporary directory -------------------------------------------------------------- For functional tests one often needs to create some files and pass them to application objects. pytest provides :ref:`builtinfixtures` which allow to request arbitrary resources, for example a unique temporary directory:: # content of test_tmpdir.py def test_needsfiles(tmpdir): print tmpdir assert 0 We list the name ``tmpdir`` in the test function signature and py.test will lookup and call a fixture factory to create the resource before performing the test function call. Let's just run it:: $ py.test -q test_tmpdir.py F ================================= FAILURES ================================= _____________________________ test_needsfiles ______________________________ tmpdir = local('/tmp/pytest-38/test_needsfiles0') def test_needsfiles(tmpdir): print tmpdir > assert 0 E assert 0 test_tmpdir.py:3: AssertionError ----------------------------- Captured stdout ------------------------------ /tmp/pytest-38/test_needsfiles0 1 failed in 0.04 seconds Before the test runs, a unique-per-test-invocation temporary directory was created. More info at :ref:`tmpdir handling`. You can find out what kind of builtin :ref:`fixtures` exist by typing:: py.test --fixtures # shows builtin and custom fixtures Where to go next ------------------------------------- Here are a few suggestions where to go next: * :ref:`cmdline` for command line invocation examples * :ref:`good practises ` for virtualenv, test layout, genscript support * :ref:`fixtures` for providing a functional baseline to your tests * :ref:`apiref` for documentation and examples on using py.test * :ref:`plugins` managing and writing plugins .. _`installation issues`: Known Installation issues ------------------------------ easy_install or pip not found? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. _`install pip`: http://www.pip-installer.org/en/latest/index.html `Install pip`_ for a state of the art python package installer. Install `setuptools`_ to get ``easy_install`` which allows to install ``.egg`` binary format packages in addition to source-based ones. py.test not found on Windows despite installation? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. _`Python for Windows`: http://www.imladris.com/Scripts/PythonForWindows.html - **Windows**: If "easy_install" or "py.test" are not found you need to add the Python script path to your ``PATH``, see here: `Python for Windows`_. You may alternatively use an `ActivePython install`_ which does this for you automatically. .. _`ActivePython install`: http://www.activestate.com/activepython/downloads .. _`Jython does not create command line launchers`: http://bugs.jython.org/issue1491 - **Jython2.5.1 on Windows XP**: `Jython does not create command line launchers`_ so ``py.test`` will not work correctly. You may install py.test on CPython and type ``py.test --genscript=mytest`` and then use ``jython mytest`` to run py.test for your tests to run with Jython. :ref:`examples` for more complex examples .. include:: links.inc pytest-2.5.1/doc/en/xdist.txt0000664000175000017500000001427312254002202015476 0ustar hpkhpk00000000000000 .. _`xdist`: xdist: pytest distributed testing plugin =============================================================== The `pytest-xdist`_ plugin extends py.test with some unique test execution modes: * Looponfail: run your tests repeatedly in a subprocess. After each run, py.test waits until a file in your project changes and then re-runs the previously failing tests. This is repeated until all tests pass. At this point a full run is again performed. * multiprocess Load-balancing: if you have multiple CPUs or hosts you can use them for a combined test run. This allows to speed up development or to use special resources of remote machines. * Multi-Platform coverage: you can specify different Python interpreters or different platforms and run tests in parallel on all of them. Before running tests remotely, ``py.test`` efficiently "rsyncs" your program source code to the remote place. All test results are reported back and displayed to your local terminal. You may specify different Python versions and interpreters. Installation of xdist plugin ------------------------------ Install the plugin with:: easy_install pytest-xdist # or pip install pytest-xdist or use the package in develop/in-place mode with a checkout of the `pytest-xdist repository`_ :: python setup.py develop Usage examples --------------------- .. _`xdistcpu`: Speed up test runs by sending tests to multiple CPUs +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ To send tests to multiple CPUs, type:: py.test -n NUM Especially for longer running tests or tests requiring a lot of I/O this can lead to considerable speed ups. Running tests in a Python subprocess +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ To instantiate a Python-2.4 subprocess and send tests to it, you may type:: py.test -d --tx popen//python=python2.4 This will start a subprocess which is run with the "python2.4" Python interpreter, found in your system binary lookup path. If you prefix the --tx option value like this:: py.test -d --tx 3*popen//python=python2.4 then three subprocesses would be created and the tests will be distributed to three subprocesses and run simultanously. .. _looponfailing: Running tests in looponfailing mode +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ For refactoring a project with a medium or large test suite you can use the looponfailing mode. Simply add the ``--f`` option:: py.test -f and py.test will run your tests. Assuming you have failures it will then wait for file changes and re-run the failing test set. File changes are detected by looking at ``looponfailingroots`` root directories and all of their contents (recursively). If the default for this value does not work for you you can change it in your project by setting a configuration option:: # content of a pytest.ini, setup.cfg or tox.ini file [pytest] looponfailroots = mypkg testdir This would lead to only looking for file changes in the respective directories, specified relatively to the ini-file's directory. Sending tests to remote SSH accounts +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Suppose you have a package ``mypkg`` which contains some tests that you can successfully run locally. And you also have a ssh-reachable machine ``myhost``. Then you can ad-hoc distribute your tests by typing:: py.test -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg This will synchronize your ``mypkg`` package directory with a remote ssh account and then collect and run your tests at the remote side. You can specify multiple ``--rsyncdir`` directories to be sent to the remote side. .. XXX CHECK **NOTE:** For py.test to collect and send tests correctly you not only need to make sure all code and tests directories are rsynced, but that any test (sub) directory also has an ``__init__.py`` file because internally py.test references tests as a fully qualified python module path. **You will otherwise get strange errors** during setup of the remote side. Sending tests to remote Socket Servers +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Download the single-module `socketserver.py`_ Python program and run it like this:: python socketserver.py It will tell you that it starts listening on the default port. You can now on your home machine specify this new socket host with something like this:: py.test -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg .. _`atonce`: Running tests on many platforms at once +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ The basic command to run tests on multiple platforms is:: py.test --dist=each --tx=spec1 --tx=spec2 If you specify a windows host, an OSX host and a Linux environment this command will send each tests to all platforms - and report back failures from all platforms at once. The specifications strings use the `xspec syntax`_. .. _`xspec syntax`: http://codespeak.net/execnet/basics.html#xspec .. _`socketserver.py`: http://bitbucket.org/hpk42/execnet/raw/2af991418160/execnet/script/socketserver.py .. _`execnet`: http://codespeak.net/execnet Specifying test exec environments in an ini file +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ pytest (since version 2.0) supports ini-style configuration. For example, you could make running with three subprocesses your default:: [pytest] addopts = -n3 You can also add default environments like this:: [pytest] addopts = --tx ssh=myhost//python=python2.5 --tx ssh=myhost//python=python2.6 and then just type:: py.test --dist=each to run tests in each of the environments. Specifying "rsync" dirs in an ini-file +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ In a ``tox.ini`` or ``setup.cfg`` file in your root project directory you may specify directories to include or to exclude in synchronisation:: [pytest] rsyncdirs = . mypkg helperpkg rsyncignore = .hg These directory specifications are relative to the directory where the configuration file was found. .. _`pytest-xdist`: http://pypi.python.org/pypi/pytest-xdist .. _`pytest-xdist repository`: http://bitbucket.org/hpk42/pytest-xdist .. _`pytest`: http://pytest.org pytest-2.5.1/doc/en/develop.txt0000664000175000017500000000202612254002202015772 0ustar hpkhpk00000000000000================================================= Feedback and contribute to py.test ================================================= .. toctree:: :maxdepth: 2 contact.txt .. _checkout: Working from version control or a tarball ================================================= To follow development or start experiments, checkout the complete code and documentation source with mercurial_:: hg clone https://bitbucket.org/hpk42/pytest/ You can also go to the python package index and download and unpack a TAR file:: http://pypi.python.org/pypi/pytest/ Activating a checkout with setuptools -------------------------------------------- With a working Distribute_ or setuptools_ installation you can type:: python setup.py develop in order to work inline with the tools and the lib of your checkout. If this command complains that it could not find the required version of "py" then you need to use the development pypi repository:: python setup.py develop -i http://pypi.testrun.org .. include:: links.inc pytest-2.5.1/doc/en/mark.txt0000664000175000017500000000164512254002202015274 0ustar hpkhpk00000000000000 .. _mark: Marking test functions with attributes ================================================================= .. currentmodule:: _pytest.mark By using the ``pytest.mark`` helper you can easily set metadata on your test functions. There are some builtin markers, for example: * :ref:`skipif ` - skip a test function if a certain condition is met * :ref:`xfail ` - produce an "expected failure" outcome if a certain condition is met * :ref:`parametrize ` to perform multiple calls to the same test function. It's easy to create custom markers or to apply markers to whole test classes or modules. See :ref:`mark examples` for examples which also serve as documentation. API reference for mark related objects ------------------------------------------------ .. autoclass:: MarkGenerator :members: .. autoclass:: MarkDecorator :members: .. autoclass:: MarkInfo :members: pytest-2.5.1/doc/en/parametrize.txt0000664000175000017500000001706012254002202016663 0ustar hpkhpk00000000000000 .. _`test generators`: .. _`parametrizing-tests`: .. _`parametrized test functions`: .. _`parametrize`: .. _`parametrize-basics`: Parametrizing fixtures and test functions ========================================================================== pytest supports test parametrization in several well-integrated ways: - :py:func:`pytest.fixture` allows to define :ref:`parametrization at the level of fixture functions `. * `@pytest.mark.parametrize`_ allows to define parametrization at the function or class level, provides multiple argument/fixture sets for a particular test function or class. * `pytest_generate_tests`_ enables implementing your own custom dynamic parametrization scheme or extensions. .. _parametrizemark: .. _`@pytest.mark.parametrize`: ``@pytest.mark.parametrize``: parametrizing test functions --------------------------------------------------------------------- .. regendoc: wipe .. versionadded:: 2.2, improved in 2.4 The builtin ``pytest.mark.parametrize`` decorator enables parametrization of arguments for a test function. Here is a typical example of a test function that implements checking that a certain input leads to an expected output:: # content of test_expectation.py import pytest @pytest.mark.parametrize("input,expected", [ ("3+5", 8), ("2+4", 6), ("6*9", 42), ]) def test_eval(input, expected): assert eval(input) == expected Here, the ``@parametrize`` decorator defines three different ``(input,output)`` tuples so that that the ``test_eval`` function will run three times using them in turn:: $ py.test =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 3 items test_expectation.py ..F ================================= FAILURES ================================= ____________________________ test_eval[6*9-42] _____________________________ input = '6*9', expected = 42 @pytest.mark.parametrize("input,expected", [ ("3+5", 8), ("2+4", 6), ("6*9", 42), ]) def test_eval(input, expected): > assert eval(input) == expected E assert 54 == 42 E + where 54 = eval('6*9') test_expectation.py:8: AssertionError ==================== 1 failed, 2 passed in 0.01 seconds ==================== As designed in this example, only one pair of input/output values fails the simple test function. And as usual with test function arguments, you can see the ``input`` and ``output`` values in the traceback. Note that you could also use the parametrize marker on a class or a module (see :ref:`mark`) which would invoke several functions with the argument sets. It is also possible to mark individual test instances within parametrize, for example with the builtin ``mark.xfail``:: # content of test_expectation.py import pytest @pytest.mark.parametrize("input,expected", [ ("3+5", 8), ("2+4", 6), pytest.mark.xfail(("6*9", 42)), ]) def test_eval(input, expected): assert eval(input) == expected Let's run this:: $ py.test =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 3 items test_expectation.py ..x =================== 2 passed, 1 xfailed in 0.01 seconds ==================== The one parameter set which caused a failure previously now shows up as an "xfailed (expected to fail)" test. .. note:: In versions prior to 2.4 one needed to specify the argument names as a tuple. This remains valid but the simpler ``"name1,name2,..."`` comma-separated-string syntax is now advertised fist because it's easier to write, produces less line noise. .. _`pytest_generate_tests`: Basic ``pytest_generate_tests`` example --------------------------------------------- Sometimes you may want to implement your own parametrization scheme or implement some dynamism for determining the parameters or scope of a fixture. For this, you can use the ``pytest_generate_tests`` hook which is called when collecting a test function. Through the passed in `metafunc` object you can inspect the requesting test context and, most importantly, you can call ``metafunc.parametrize()`` to cause parametrization. For example, let's say we want to run a test taking string inputs which we want to set via a new py.test command line option. Let's first write a simple test accepting a ``stringinput`` fixture function argument:: # content of test_strings.py def test_valid_string(stringinput): assert stringinput.isalpha() Now we add a ``conftest.py`` file containing the addition of a command line option and the parametrization of our test function:: # content of conftest.py def pytest_addoption(parser): parser.addoption("--stringinput", action="append", default=[], help="list of stringinputs to pass to test functions") def pytest_generate_tests(metafunc): if 'stringinput' in metafunc.fixturenames: metafunc.parametrize("stringinput", metafunc.config.option.stringinput) If we now pass two stringinput values, our test will run twice:: $ py.test -q --stringinput="hello" --stringinput="world" test_strings.py .. 2 passed in 0.01 seconds Let's also run with a stringinput that will lead to a failing test:: $ py.test -q --stringinput="!" test_strings.py F ================================= FAILURES ================================= ___________________________ test_valid_string[!] ___________________________ stringinput = '!' def test_valid_string(stringinput): > assert stringinput.isalpha() E assert () E + where = '!'.isalpha test_strings.py:3: AssertionError 1 failed in 0.01 seconds As expected our test function fails. If you don't specify a stringinput it will be skipped because ``metafunc.parametrize()`` will be called with an empty parameter listlist:: $ py.test -q -rs test_strings.py s ========================= short test summary info ========================== SKIP [1] /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:1094: got empty parameter set, function test_valid_string at /tmp/doc-exec-24/test_strings.py:1 1 skipped in 0.01 seconds For further examples, you might want to look at :ref:`more parametrization examples `. .. _`metafunc object`: The **metafunc** object ------------------------------------------- .. currentmodule:: _pytest.python metafunc objects are passed to the ``pytest_generate_tests`` hook. They help to inspect a testfunction and to generate tests according to test configuration or values specified in the class or module where a test function is defined: ``metafunc.fixturenames``: set of required function arguments for given function ``metafunc.function``: underlying python test function ``metafunc.cls``: class object where the test function is defined in or None. ``metafunc.module``: the module object where the test function is defined in. ``metafunc.config``: access to command line opts and general config ``metafunc.funcargnames``: alias for ``fixturenames``, for pre-2.3 compatibility .. automethod:: Metafunc.parametrize .. automethod:: Metafunc.addcall(funcargs=None,id=_notexists,param=_notexists) pytest-2.5.1/doc/en/builtin.txt0000664000175000017500000000734712254002202016015 0ustar hpkhpk00000000000000 .. _`pytest helpers`: Pytest API and builtin fixtures ================================================ This is a list of ``pytest.*`` API functions and fixtures. For information on plugin hooks and objects, see :ref:`plugins`. For information on the ``pytest.mark`` mechanism, see :ref:`mark`. For the below objects, you can also interactively ask for help, e.g. by typing on the Python interactive prompt something like:: import pytest help(pytest) .. currentmodule:: pytest Invoking pytest interactively --------------------------------------------------- .. autofunction:: main More examples at :ref:`pytest.main-usage` Helpers for assertions about Exceptions/Warnings -------------------------------------------------------- .. autofunction:: raises Examples at :ref:`assertraises`. .. autofunction:: deprecated_call Raising a specific test outcome -------------------------------------- You can use the following functions in your test, fixture or setup functions to force a certain test outcome. Note that most often you can rather use declarative marks, see :ref:`skipping`. .. autofunction:: _pytest.runner.fail .. autofunction:: _pytest.runner.skip .. autofunction:: _pytest.runner.importorskip .. autofunction:: _pytest.skipping.xfail .. autofunction:: _pytest.runner.exit fixtures and requests ----------------------------------------------------- To mark a fixture function: .. autofunction:: _pytest.python.fixture Tutorial at :ref:`fixtures`. The ``request`` object that can be used from fixture functions. .. autoclass:: _pytest.python.FixtureRequest() :members: .. _builtinfixtures: .. _builtinfuncargs: Builtin fixtures/function arguments ----------------------------------------- You can ask for available builtin or project-custom :ref:`fixtures ` by typing:: $ py.test -q --fixtures capsys enables capturing of writes to sys.stdout/sys.stderr and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. capfd enables capturing of writes to file descriptors 1 and 2 and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. monkeypatch The returned ``monkeypatch`` funcarg provides these helper methods to modify objects, dictionaries or os.environ:: monkeypatch.setattr(obj, name, value, raising=True) monkeypatch.delattr(obj, name, raising=True) monkeypatch.setitem(mapping, name, value) monkeypatch.delitem(obj, name, raising=True) monkeypatch.setenv(name, value, prepend=False) monkeypatch.delenv(name, value, raising=True) monkeypatch.syspath_prepend(path) monkeypatch.chdir(path) All modifications will be undone after the requesting test function has finished. The ``raising`` parameter determines if a KeyError or AttributeError will be raised if the set/deletion operation has no target. pytestconfig the pytest config object with access to command line opts. recwarn Return a WarningsRecorder instance that provides these methods: * ``pop(category=None)``: return last warning matching the category. * ``clear()``: clear list of warnings See http://docs.python.org/library/warnings.html for information on warning categories. tmpdir return a temporary directory path object which is unique to each test function invocation, created as a sub directory of the base temporary directory. The returned object is a `py.path.local`_ path object. in 0.00 seconds pytest-2.5.1/doc/en/unittest.txt0000664000175000017500000001643712254002202016226 0ustar hpkhpk00000000000000 .. _`unittest.TestCase`: Support for unittest.TestCase / Integration of fixtures ===================================================================== .. _`unittest.py style`: http://docs.python.org/library/unittest.html py.test has support for running Python `unittest.py style`_ tests. It's meant for leveraging existing unittest-style projects to use pytest features. Concretely, pytest will automatically collect ``unittest.TestCase`` subclasses and their ``test`` methods in test files. It will invoke typical setup/teardown methods and generally try to make test suites written to run on unittest, to also run using ``py.test``. We assume here that you are familiar with writing ``unittest.TestCase`` style tests and rather focus on integration aspects. Usage ------------------------------------------------------------------- After :ref:`installation` type:: py.test and you should be able to run your unittest-style tests if they are contained in ``test_*`` modules. If that works for you then you can make use of most :ref:`pytest features `, for example ``--pdb`` debugging in failures, using :ref:`plain assert-statements `, :ref:`more informative tracebacks `, stdout-capturing or distributing tests to multiple CPUs via the ``-nNUM`` option if you installed the ``pytest-xdist`` plugin. Please refer to the general pytest documentation for many more examples. Mixing pytest fixtures into unittest.TestCase style tests ----------------------------------------------------------- Running your unittest with ``py.test`` allows you to use its :ref:`fixture mechanism ` with ``unittest.TestCase`` style tests. Assuming you have at least skimmed the pytest fixture features, let's jump-start into an example that integrates a pytest ``db_class`` fixture, setting up a class-cached database object, and then reference it from a unittest-style test:: # content of conftest.py # we define a fixture function below and it will be "used" by # referencing its name from tests import pytest @pytest.fixture(scope="class") def db_class(request): class DummyDB: pass # set a class attribute on the invoking test context request.cls.db = DummyDB() This defines a fixture function ``db_class`` which - if used - is called once for each test class and which sets the class-level ``db`` attribute to a ``DummyDB`` instance. The fixture function achieves this by receiving a special ``request`` object which gives access to :ref:`the requesting test context ` such as the ``cls`` attribute, denoting the class from which the fixture is used. This architecture de-couples fixture writing from actual test code and allows re-use of the fixture by a minimal reference, the fixture name. So let's write an actual ``unittest.TestCase`` class using our fixture definition:: # content of test_unittest_db.py import unittest import pytest @pytest.mark.usefixtures("db_class") class MyTest(unittest.TestCase): def test_method1(self): assert hasattr(self, "db") assert 0, self.db # fail for demo purposes def test_method2(self): assert 0, self.db # fail for demo purposes The ``@pytest.mark.usefixtures("db_class")`` class-decorator makes sure that the pytest fixture function ``db_class`` is called once per class. Due to the deliberately failing assert statements, we can take a look at the ``self.db`` values in the traceback:: $ py.test test_unittest_db.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 2 items test_unittest_db.py FF ================================= FAILURES ================================= ___________________________ MyTest.test_method1 ____________________________ self = def test_method1(self): assert hasattr(self, "db") > assert 0, self.db # fail for demo purposes E AssertionError: test_unittest_db.py:9: AssertionError ___________________________ MyTest.test_method2 ____________________________ self = def test_method2(self): > assert 0, self.db # fail for demo purposes E AssertionError: test_unittest_db.py:12: AssertionError ========================= 2 failed in 0.01 seconds ========================= This default pytest traceback shows that the two test methods share the same ``self.db`` instance which was our intention when writing the class-scoped fixture function above. autouse fixtures and accessing other fixtures ------------------------------------------------------------------- Although it's usually better to explicitely declare use of fixtures you need for a given test, you may sometimes want to have fixtures that are automatically used in a given context. After all, the traditional style of unittest-setup mandates the use of this implicit fixture writing and chances are, you are used to it or like it. You can flag fixture functions with ``@pytest.fixture(autouse=True)`` and define the fixture function in the context where you want it used. Let's look at an ``initdir`` fixture which makes all test methods of a ``TestCase`` class execute in a temporary directory with a pre-initialized ``samplefile.ini``. Our ``initdir`` fixture itself uses the pytest builtin :ref:`tmpdir ` fixture to delegate the creation of a per-test temporary directory:: # content of test_unittest_cleandir.py import pytest import unittest class MyTest(unittest.TestCase): @pytest.fixture(autouse=True) def initdir(self, tmpdir): tmpdir.chdir() # change to pytest-provided temporary directory tmpdir.join("samplefile.ini").write("# testdata") def test_method(self): s = open("samplefile.ini").read() assert "testdata" in s Due to the ``autouse`` flag the ``initdir`` fixture function will be used for all methods of the class where it is defined. This is a shortcut for using a ``@pytest.mark.usefixtures("initdir")`` marker on the class like in the previous example. Running this test module ...:: $ py.test -q test_unittest_cleandir.py . 1 passed in 0.01 seconds ... gives us one passed test because the ``initdir`` fixture function was executed ahead of the ``test_method``. .. note:: While pytest supports receiving fixtures via :ref:`test function arguments ` for non-unittest test methods, ``unittest.TestCase`` methods cannot directly receive fixture function arguments as implementing that is likely to inflict on the ability to run general unittest.TestCase test suites. Maybe optional support would be possible, though. If unittest finally grows a plugin system that should help as well. In the meanwhile, the above ``usefixtures`` and ``autouse`` examples should help to mix in pytest fixtures into unittest suites. And of course you can also start to selectively leave away the ``unittest.TestCase`` subclassing, use plain asserts and get the unlimited pytest feature set. pytest-2.5.1/doc/en/_themes/0000775000175000017500000000000012254002202015217 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/_themes/LICENSE0000664000175000017500000000337512254002202016234 0ustar hpkhpk00000000000000Copyright (c) 2010 by Armin Ronacher. Some rights reserved. Redistribution and use in source and binary forms of the theme, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. We kindly ask you to only use these themes in an unmodified manner just for Flask and Flask-related products, not for unrelated projects. If you like the visual style and want to use it for your own projects, please consider making some larger changes to the themes (such as changing font faces, sizes, colors or margins). THIS THEME IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS THEME, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pytest-2.5.1/doc/en/_themes/flask/0000775000175000017500000000000012254002202016317 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/_themes/flask/relations.html0000664000175000017500000000111612254002202021204 0ustar hpkhpk00000000000000

Related Topics

pytest-2.5.1/doc/en/_themes/flask/theme.conf0000664000175000017500000000024412254002202020270 0ustar hpkhpk00000000000000[theme] inherit = basic stylesheet = flasky.css pygments_style = flask_theme_support.FlaskyStyle [options] index_logo = '' index_logo_height = 120px touch_icon = pytest-2.5.1/doc/en/_themes/flask/static/0000775000175000017500000000000012254002202017606 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/_themes/flask/static/flasky.css_t0000664000175000017500000002110512254002202022133 0ustar hpkhpk00000000000000/* * flasky.css_t * ~~~~~~~~~~~~ * * :copyright: Copyright 2010 by Armin Ronacher. * :license: Flask Design License, see LICENSE for details. */ {% set page_width = '940px' %} {% set sidebar_width = '220px' %} {% set base_font = '"Gudea", sans-serif' %} {% set header_font = '"Gudea", sans-serif' %} {% set link_color = '#490' %} {% set link_hover_color = '#9c0' %} @import url("basic.css"); /* -- page layout ----------------------------------------------------------- */ body { font-family: {{ base_font }}; font-size: 17px; background-color: white; color: #000; margin: 0; padding: 0; } div.document { width: {{ page_width }}; margin: 30px auto 0 auto; } div.documentwrapper { float: left; width: 100%; } div.bodywrapper { margin: 0 0 0 {{ sidebar_width }}; } div.sphinxsidebar { width: {{ sidebar_width }}; } hr { border: 0; border-top: 1px solid #B1B4B6; } div.body { background-color: #ffffff; color: #3E4349; padding: 0 30px 0 30px; } img.floatingflask { padding: 0 0 10px 10px; float: right; } div.footer { width: {{ page_width }}; margin: 20px auto 30px auto; font-size: 14px; color: #888; text-align: right; } div.footer a { color: #888; } div.related { display: none; } div.sphinxsidebar a { color: #444; text-decoration: none; border-bottom: 1px dotted #999; } div.sphinxsidebar a:hover { border-bottom: 1px solid #999; } div.sphinxsidebar { font-size: 14px; line-height: 1.5; } div.sphinxsidebarwrapper { padding: 18px 10px; } div.sphinxsidebarwrapper p.logo { padding: 0 0 20px 0; margin: 0; text-align: center; } div.sphinxsidebar h3, div.sphinxsidebar h4 { font-family: {{ header_font }}; color: #444; font-size: 24px; font-weight: normal; margin: 0 0 5px 0; padding: 0; } div.sphinxsidebar h4 { font-size: 20px; } div.sphinxsidebar h3 a { color: #444; } div.sphinxsidebar p.logo a, div.sphinxsidebar h3 a, div.sphinxsidebar p.logo a:hover, div.sphinxsidebar h3 a:hover { border: none; } div.sphinxsidebar p { color: #555; margin: 10px 0; } div.sphinxsidebar ul { margin: 10px 0; padding: 0; color: #000; } div.sphinxsidebar input { border: 1px solid #ccc; font-family: {{ base_font }}; font-size: 1em; } /* -- body styles ----------------------------------------------------------- */ a { color: {{ link_color }}; text-decoration: underline; } a:hover { color: {{ link_hover_color }}; text-decoration: underline; } a.reference.internal em { font-style: normal; } div.body h1, div.body h2, div.body h3, div.body h4, div.body h5, div.body h6 { font-family: {{ header_font }}; font-weight: normal; margin: 30px 0px 10px 0px; padding: 0; } {% if theme_index_logo %} div.indexwrapper h1 { text-indent: -999999px; background: url({{ theme_index_logo }}) no-repeat center center; height: {{ theme_index_logo_height }}; } {% else %} div.indexwrapper div.body h1 { font-size: 200%; } {% endif %} div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } div.body h2 { font-size: 180%; } div.body h3 { font-size: 150%; } div.body h4 { font-size: 130%; } div.body h5 { font-size: 100%; } div.body h6 { font-size: 100%; } a.headerlink { color: #ddd; padding: 0 4px; text-decoration: none; } a.headerlink:hover { color: #444; background: #eaeaea; } div.body p, div.body dd, div.body li { line-height: 1.4em; } div.admonition { background: #fafafa; margin: 20px -30px; padding: 10px 30px; border-top: 1px solid #ccc; border-bottom: 1px solid #ccc; } div.admonition tt.xref, div.admonition a tt { border-bottom: 1px solid #fafafa; } dd div.admonition { margin-left: -60px; padding-left: 60px; } div.admonition p.admonition-title { font-family: {{ header_font }}; font-weight: normal; font-size: 24px; margin: 0 0 10px 0; padding: 0; line-height: 1; } div.admonition p.last { margin-bottom: 0; } div.highlight { background-color: white; } dt:target, .highlight { background: #FAF3E8; } div.note { background-color: #eee; border: 1px solid #ccc; } div.seealso { background-color: #ffc; border: 1px solid #ff6; } div.topic { background-color: #eee; } p.admonition-title { display: inline; } p.admonition-title:after { content: ":"; } pre, tt { font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.9em; } img.screenshot { } tt.descname, tt.descclassname { font-size: 0.95em; } tt.descname { padding-right: 0.08em; } img.screenshot { -moz-box-shadow: 2px 2px 4px #eee; -webkit-box-shadow: 2px 2px 4px #eee; box-shadow: 2px 2px 4px #eee; } table.docutils { border: 1px solid #888; -moz-box-shadow: 2px 2px 4px #eee; -webkit-box-shadow: 2px 2px 4px #eee; box-shadow: 2px 2px 4px #eee; } table.docutils td, table.docutils th { border: 1px solid #888; padding: 0.25em 0.7em; } table.field-list, table.footnote { border: none; -moz-box-shadow: none; -webkit-box-shadow: none; box-shadow: none; } table.footnote { margin: 15px 0; width: 100%; border: 1px solid #eee; background: #fdfdfd; font-size: 0.9em; } table.footnote + table.footnote { margin-top: -15px; border-top: none; } table.field-list th { padding: 0 0.8em 0 0; } table.field-list td { padding: 0; } table.footnote td.label { width: 0px; padding: 0.3em 0 0.3em 0.5em; } table.footnote td { padding: 0.3em 0.5em; } dl { margin: 0; padding: 0; } dl dd { margin-left: 30px; } blockquote { margin: 0 0 0 30px; padding: 0; } ul, ol { margin: 10px 0 10px 30px; padding: 0; } pre { background: #eee; padding: 7px 30px; margin: 15px -30px; line-height: 1.3em; } dl pre, blockquote pre, li pre { margin-left: -60px; padding-left: 60px; } dl dl pre { margin-left: -90px; padding-left: 90px; } tt { background-color: #ecf0f3; color: #222; /* padding: 1px 2px; */ } tt.xref, a tt { background-color: #FBFBFB; border-bottom: 1px solid white; } a.reference { text-decoration: none; border-bottom: 1px dotted {{ link_color }}; } a.reference:hover { border-bottom: 1px solid {{ link_hover_color }}; } a.footnote-reference { text-decoration: none; font-size: 0.7em; vertical-align: top; border-bottom: 1px dotted {{ link_color }}; } a.footnote-reference:hover { border-bottom: 1px solid {{ link_hover_color }}; } a:hover tt { background: #EEE; } @media screen and (max-width: 870px) { div.sphinxsidebar { display: none; } div.document { width: 100%; } div.documentwrapper { margin-left: 0; margin-top: 0; margin-right: 0; margin-bottom: 0; } div.bodywrapper { margin-top: 0; margin-right: 0; margin-bottom: 0; margin-left: 0; } ul { margin-left: 0; } .document { width: auto; } .footer { width: auto; } .bodywrapper { margin: 0; } .footer { width: auto; } .github { display: none; } } @media screen and (max-width: 875px) { body { margin: 0; padding: 20px 30px; } div.documentwrapper { float: none; background: white; } div.sphinxsidebar { display: block; float: none; width: 102.5%; margin: 50px -30px -20px -30px; padding: 10px 20px; background: #333; color: white; } div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, div.sphinxsidebar h3 a, div.sphinxsidebar ul { color: white; } div.sphinxsidebar a { color: #aaa; } div.sphinxsidebar p.logo { display: none; } div.document { width: 100%; margin: 0; } div.related { display: block; margin: 0; padding: 10px 0 20px 0; } div.related ul, div.related ul li { margin: 0; padding: 0; } div.footer { display: none; } div.bodywrapper { margin: 0; } div.body { min-height: 0; padding: 0; } .rtd_doc_footer { display: none; } .document { width: auto; } .footer { width: auto; } .footer { width: auto; } .github { display: none; } } /* misc. */ .revsys-inline { display: none!important; } pytest-2.5.1/doc/en/_themes/flask/layout.html0000664000175000017500000000143112254002202020521 0ustar hpkhpk00000000000000{%- extends "basic/layout.html" %} {%- block extrahead %} {{ super() }} {% if theme_touch_icon %} {% endif %} {% endblock %} {%- block relbar2 %}{% endblock %} {% block header %} {{ super() }} {% if pagename == 'index' %}
{% endif %} {% endblock %} {%- block footer %} {% if pagename == 'index' %}
{% endif %} {%- endblock %} pytest-2.5.1/doc/en/_themes/flask_theme_support.py0000664000175000017500000001141312254002202021647 0ustar hpkhpk00000000000000# flasky extensions. flasky pygments style based on tango style from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace, Punctuation, Other, Literal class FlaskyStyle(Style): background_color = "#f8f8f8" default_style = "" styles = { # No corresponding class for the following: #Text: "", # class: '' Whitespace: "underline #f8f8f8", # class: 'w' Error: "#a40000 border:#ef2929", # class: 'err' Other: "#000000", # class 'x' Comment: "italic #8f5902", # class: 'c' Comment.Preproc: "noitalic", # class: 'cp' Keyword: "bold #004461", # class: 'k' Keyword.Constant: "bold #004461", # class: 'kc' Keyword.Declaration: "bold #004461", # class: 'kd' Keyword.Namespace: "bold #004461", # class: 'kn' Keyword.Pseudo: "bold #004461", # class: 'kp' Keyword.Reserved: "bold #004461", # class: 'kr' Keyword.Type: "bold #004461", # class: 'kt' Operator: "#582800", # class: 'o' Operator.Word: "bold #004461", # class: 'ow' - like keywords Punctuation: "bold #000000", # class: 'p' # because special names such as Name.Class, Name.Function, etc. # are not recognized as such later in the parsing, we choose them # to look the same as ordinary variables. Name: "#000000", # class: 'n' Name.Attribute: "#c4a000", # class: 'na' - to be revised Name.Builtin: "#004461", # class: 'nb' Name.Builtin.Pseudo: "#3465a4", # class: 'bp' Name.Class: "#000000", # class: 'nc' - to be revised Name.Constant: "#000000", # class: 'no' - to be revised Name.Decorator: "#888", # class: 'nd' - to be revised Name.Entity: "#ce5c00", # class: 'ni' Name.Exception: "bold #cc0000", # class: 'ne' Name.Function: "#000000", # class: 'nf' Name.Property: "#000000", # class: 'py' Name.Label: "#f57900", # class: 'nl' Name.Namespace: "#000000", # class: 'nn' - to be revised Name.Other: "#000000", # class: 'nx' Name.Tag: "bold #004461", # class: 'nt' - like a keyword Name.Variable: "#000000", # class: 'nv' - to be revised Name.Variable.Class: "#000000", # class: 'vc' - to be revised Name.Variable.Global: "#000000", # class: 'vg' - to be revised Name.Variable.Instance: "#000000", # class: 'vi' - to be revised Number: "#990000", # class: 'm' Literal: "#000000", # class: 'l' Literal.Date: "#000000", # class: 'ld' String: "#4e9a06", # class: 's' String.Backtick: "#4e9a06", # class: 'sb' String.Char: "#4e9a06", # class: 'sc' String.Doc: "italic #8f5902", # class: 'sd' - like a comment String.Double: "#4e9a06", # class: 's2' String.Escape: "#4e9a06", # class: 'se' String.Heredoc: "#4e9a06", # class: 'sh' String.Interpol: "#4e9a06", # class: 'si' String.Other: "#4e9a06", # class: 'sx' String.Regex: "#4e9a06", # class: 'sr' String.Single: "#4e9a06", # class: 's1' String.Symbol: "#4e9a06", # class: 'ss' Generic: "#000000", # class: 'g' Generic.Deleted: "#a40000", # class: 'gd' Generic.Emph: "italic #000000", # class: 'ge' Generic.Error: "#ef2929", # class: 'gr' Generic.Heading: "bold #000080", # class: 'gh' Generic.Inserted: "#00A000", # class: 'gi' Generic.Output: "#888", # class: 'go' Generic.Prompt: "#745334", # class: 'gp' Generic.Strong: "bold #000000", # class: 'gs' Generic.Subheading: "bold #800080", # class: 'gu' Generic.Traceback: "bold #a40000", # class: 'gt' } pytest-2.5.1/doc/en/_themes/.gitignore0000664000175000017500000000002612254002202017205 0ustar hpkhpk00000000000000*.pyc *.pyo .DS_Store pytest-2.5.1/doc/en/_themes/README0000664000175000017500000000210512254002202016075 0ustar hpkhpk00000000000000Flask Sphinx Styles =================== This repository contains sphinx styles for Flask and Flask related projects. To use this style in your Sphinx documentation, follow this guide: 1. put this folder as _themes into your docs folder. Alternatively you can also use git submodules to check out the contents there. 2. add this to your conf.py: sys.path.append(os.path.abspath('_themes')) html_theme_path = ['_themes'] html_theme = 'flask' The following themes exist: - 'flask' - the standard flask documentation theme for large projects - 'flask_small' - small one-page theme. Intended to be used by very small addon libraries for flask. The following options exist for the flask_small theme: [options] index_logo = '' filename of a picture in _static to be used as replacement for the h1 in the index.rst file. index_logo_height = 120px height of the index logo github_fork = '' repository name on github for the "fork me" badge pytest-2.5.1/doc/en/talks.txt0000664000175000017500000000776612254002202015472 0ustar hpkhpk00000000000000 Talks and Tutorials ========================== .. _`funcargs`: funcargs.html Tutorial examples and blog postings --------------------------------------------- .. _`tutorial1 repository`: http://bitbucket.org/hpk42/pytest-tutorial1/ .. _`pycon 2010 tutorial PDF`: http://bitbucket.org/hpk42/pytest-tutorial1/raw/tip/pytest-basic.pdf Basic usage and fixtures: - `pytest feature and release highlights (GERMAN, October 2013) `_ - `pytest introduction from Brian Okken (January 2013) `_ - `pycon australia 2012 pytest talk from Brianna Laugher `_ (`video `_, `slides `_, `code `_) - `pycon 2012 US talk video from Holger Krekel `_ - `pycon 2010 tutorial PDF`_ and `tutorial1 repository`_ Fixtures and Function arguments: - :ref:`fixtures` - `monkey patching done right`_ (blog post, consult `monkeypatch plugin`_ for up-to-date API) Test parametrization: - `generating parametrized tests with funcargs`_ (uses deprecated ``addcall()`` API. - `test generators and cached setup`_ - `parametrizing tests, generalized`_ (blog post) - `putting test-hooks into local or global plugins`_ (blog post) Assertion introspection: - `(07/2011) Behind the scenes of py.test's new assertion rewriting `_ Distributed testing: - `simultaneously test your code on all platforms`_ (blog entry) Plugin specific examples: - `skipping slow tests by default in py.test`_ (blog entry) - `many examples in the docs for plugins`_ .. _`skipping slow tests by default in py.test`: http://bruynooghe.blogspot.com/2009/12/skipping-slow-test-by-default-in-pytest.html .. _`many examples in the docs for plugins`: plugin/index.html .. _`monkeypatch plugin`: plugin/monkeypatch.html .. _`application setup in test functions with funcargs`: funcargs.html#appsetup .. _`simultaneously test your code on all platforms`: http://tetamap.wordpress.com/2009/03/23/new-simultanously-test-your-code-on-all-platforms/ .. _`monkey patching done right`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/ .. _`putting test-hooks into local or global plugins`: http://tetamap.wordpress.com/2009/05/14/putting-test-hooks-into-local-and-global-plugins/ .. _`parametrizing tests, generalized`: http://tetamap.wordpress.com/2009/05/13/parametrizing-python-tests-generalized/ .. _`generating parametrized tests with funcargs`: funcargs.html#test-generators .. _`test generators and cached setup`: http://bruynooghe.blogspot.com/2010/06/pytest-test-generators-and-cached-setup.html Older conference talks and tutorials ---------------------------------------- - `ep2009-rapidtesting.pdf`_ tutorial slides (July 2009): - testing terminology - basic py.test usage, file system layout - test function arguments (funcargs_) and test fixtures - existing plugins - distributed testing - `ep2009-pytest.pdf`_ 60 minute py.test talk, highlighting unique features and a roadmap (July 2009) - `pycon2009-pytest-introduction.zip`_ slides and files, extended version of py.test basic introduction, discusses more options, also introduces old-style xUnit setup, looponfailing and other features. - `pycon2009-pytest-advanced.pdf`_ contain a slightly older version of funcargs and distributed testing, compared to the EuroPython 2009 slides. .. _`ep2009-rapidtesting.pdf`: http://codespeak.net/download/py/ep2009-rapidtesting.pdf .. _`ep2009-pytest.pdf`: http://codespeak.net/download/py/ep2009-pytest.pdf .. _`pycon2009-pytest-introduction.zip`: http://codespeak.net/download/py/pycon2009-pytest-introduction.zip .. _`pycon2009-pytest-advanced.pdf`: http://codespeak.net/download/py/pycon2009-pytest-advanced.pdf pytest-2.5.1/doc/en/check_sphinx.py0000664000175000017500000000072712254002202016621 0ustar hpkhpk00000000000000import py import subprocess def test_build_docs(tmpdir): doctrees = tmpdir.join("doctrees") htmldir = tmpdir.join("html") subprocess.check_call([ "sphinx-build", "-W", "-bhtml", "-d", str(doctrees), ".", str(htmldir)]) def test_linkcheck(tmpdir): doctrees = tmpdir.join("doctrees") htmldir = tmpdir.join("html") subprocess.check_call( ["sphinx-build", "-blinkcheck", "-d", str(doctrees), ".", str(htmldir)]) pytest-2.5.1/doc/en/example/0000775000175000017500000000000012254002202015226 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/example/attic.txt0000664000175000017500000000516012254002202017075 0ustar hpkhpk00000000000000 .. _`accept example`: example: specifying and selecting acceptance tests -------------------------------------------------------------- .. sourcecode:: python # ./conftest.py def pytest_option(parser): group = parser.getgroup("myproject") group.addoption("-A", dest="acceptance", action="store_true", help="run (slow) acceptance tests") def pytest_funcarg__accept(request): return AcceptFixture(request) class AcceptFixture: def __init__(self, request): if not request.config.option.acceptance: pytest.skip("specify -A to run acceptance tests") self.tmpdir = request.config.mktemp(request.function.__name__, numbered=True) def run(self, cmd): """ called by test code to execute an acceptance test. """ self.tmpdir.chdir() return py.process.cmdexec(cmd) and the actual test function example: .. sourcecode:: python def test_some_acceptance_aspect(accept): accept.tmpdir.mkdir("somesub") result = accept.run("ls -la") assert "somesub" in result If you run this test without specifying a command line option the test will get skipped with an appropriate message. Otherwise you can start to add convenience and test support methods to your AcceptFixture and drive running of tools or applications and provide ways to do assertions about the output. .. _`decorate a funcarg`: example: decorating a funcarg in a test module -------------------------------------------------------------- For larger scale setups it's sometimes useful to decorate a funcarg just for a particular test module. We can extend the `accept example`_ by putting this in our test module: .. sourcecode:: python def pytest_funcarg__accept(request): # call the next factory (living in our conftest.py) arg = request.getfuncargvalue("accept") # create a special layout in our tempdir arg.tmpdir.mkdir("special") return arg class TestSpecialAcceptance: def test_sometest(self, accept): assert accept.tmpdir.join("special").check() Our module level factory will be invoked first and it can ask its request object to call the next factory and then decorate its result. This mechanism allows us to stay ignorant of how/where the function argument is provided - in our example from a `conftest plugin`_. sidenote: the temporary directory used here are instances of the `py.path.local`_ class which provides many of the os.path methods in a convenient way. .. _`py.path.local`: ../path.html#local .. _`conftest plugin`: customize.html#conftestplugin pytest-2.5.1/doc/en/example/index.txt0000664000175000017500000000173412254002202017103 0ustar hpkhpk00000000000000 .. _examples: Usages and Examples =========================================== Here is a (growing) list of examples. :ref:`Contact ` us if you need more examples or have questions. Also take a look at the :ref:`comprehensive documentation ` which contains many example snippets as well. Also, `pytest on stackoverflow.com `_ often comes with example answers. For basic examples, see - :doc:`../getting-started` for basic introductory examples - :ref:`assert` for basic assertion examples - :ref:`fixtures` for basic fixture/setup examples - :ref:`parametrize` for basic test function parametrization - :doc:`../unittest` for basic unittest integration - :doc:`../nose` for basic nosetests integration The following examples aim at various use cases you might encounter. .. toctree:: :maxdepth: 2 reportingdemo.txt simple.txt parametrize.txt markers.txt special.txt pythoncollection.txt nonpython.txt pytest-2.5.1/doc/en/example/special.txt0000664000175000017500000000367012254002202017415 0ustar hpkhpk00000000000000 A sesssion-fixture which can look at all collected tests ---------------------------------------------------------------- A session-scoped fixture effectively has access to all collected test items. Here is an example of a fixture function which walks all collected tests and looks if their test class defines a ``callme`` method and calls it:: # content of conftest.py import pytest @pytest.fixture(scope="session", autouse=True) def callattr_ahead_of_alltests(request): print "callattr_ahead_of_alltests called" seen = set([None]) session = request.node for item in session.items: cls = item.getparent(pytest.Class) if cls not in seen: if hasattr(cls.obj, "callme"): cls.obj.callme() seen.add(cls) test classes may now define a ``callme`` method which will be called ahead of running any tests:: # content of test_module.py class TestHello: @classmethod def callme(cls): print "callme called!" def test_method1(self): print "test_method1 called" def test_method2(self): print "test_method1 called" class TestOther: @classmethod def callme(cls): print "callme other called" def test_other(self): print "test other" # works with unittest as well ... import unittest class SomeTest(unittest.TestCase): @classmethod def callme(self): print "SomeTest callme called" def test_unit1(self): print "test_unit1 method called" If you run this without output capturing:: $ py.test -q -s test_module.py callattr_ahead_of_alltests called callme called! callme other called SomeTest callme called test_method1 called .test_method1 called .test other .test_unit1 method called . 4 passed in 0.01 seconds pytest-2.5.1/doc/en/example/costlysetup/0000775000175000017500000000000012254002202017624 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/example/costlysetup/conftest.py0000664000175000017500000000054412254002202022026 0ustar hpkhpk00000000000000 import pytest @pytest.fixture("session") def setup(request): setup = CostlySetup() request.addfinalizer(setup.finalize) return setup class CostlySetup: def __init__(self): import time print ("performing costly setup") time.sleep(5) self.timecostly = 1 def finalize(self): del self.timecostly pytest-2.5.1/doc/en/example/costlysetup/sub1/0000775000175000017500000000000012254002202020476 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/example/costlysetup/sub1/test_quick.py0000664000175000017500000000004112254002202023216 0ustar hpkhpk00000000000000 def test_quick(setup): pass pytest-2.5.1/doc/en/example/costlysetup/sub1/__init__.py0000664000175000017500000000000212254002202022577 0ustar hpkhpk00000000000000# pytest-2.5.1/doc/en/example/costlysetup/sub2/0000775000175000017500000000000012254002202020477 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/example/costlysetup/sub2/__init__.py0000664000175000017500000000000212254002202022600 0ustar hpkhpk00000000000000# pytest-2.5.1/doc/en/example/costlysetup/sub2/test_two.py0000664000175000017500000000017712254002202022726 0ustar hpkhpk00000000000000def test_something(setup): assert setup.timecostly == 1 def test_something_more(setup): assert setup.timecostly == 1 pytest-2.5.1/doc/en/example/parametrize.txt0000664000175000017500000002757712254002202020334 0ustar hpkhpk00000000000000 .. _paramexamples: Parametrizing tests ================================================= .. currentmodule:: _pytest.python py.test allows to easily parametrize test functions. For basic docs, see :ref:`parametrize-basics`. In the following we provide some examples using the builtin mechanisms. Generating parameters combinations, depending on command line ---------------------------------------------------------------------------- .. regendoc:wipe Let's say we want to execute a test with different computation parameters and the parameter range shall be determined by a command line argument. Let's first write a simple (do-nothing) computation test:: # content of test_compute.py def test_compute(param1): assert param1 < 4 Now we add a test configuration like this:: # content of conftest.py def pytest_addoption(parser): parser.addoption("--all", action="store_true", help="run all combinations") def pytest_generate_tests(metafunc): if 'param1' in metafunc.fixturenames: if metafunc.config.option.all: end = 5 else: end = 2 metafunc.parametrize("param1", range(end)) This means that we only run 2 tests if we do not pass ``--all``:: $ py.test -q test_compute.py .. 2 passed in 0.01 seconds We run only two computations, so we see two dots. let's run the full monty:: $ py.test -q --all ....F ================================= FAILURES ================================= _____________________________ test_compute[4] ______________________________ param1 = 4 def test_compute(param1): > assert param1 < 4 E assert 4 < 4 test_compute.py:3: AssertionError 1 failed, 4 passed in 0.01 seconds As expected when running the full range of ``param1`` values we'll get an error on the last one. A quick port of "testscenarios" ------------------------------------ .. _`test scenarios`: http://pypi.python.org/pypi/testscenarios/ Here is a quick port to run tests configured with `test scenarios`_, an add-on from Robert Collins for the standard unittest framework. We only have to work a bit to construct the correct arguments for pytest's :py:func:`Metafunc.parametrize`:: # content of test_scenarios.py def pytest_generate_tests(metafunc): idlist = [] argvalues = [] for scenario in metafunc.cls.scenarios: idlist.append(scenario[0]) items = scenario[1].items() argnames = [x[0] for x in items] argvalues.append(([x[1] for x in items])) metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class") scenario1 = ('basic', {'attribute': 'value'}) scenario2 = ('advanced', {'attribute': 'value2'}) class TestSampleWithScenarios: scenarios = [scenario1, scenario2] def test_demo1(self, attribute): assert isinstance(attribute, str) def test_demo2(self, attribute): assert isinstance(attribute, str) this is a fully self-contained example which you can run with:: $ py.test test_scenarios.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 4 items test_scenarios.py .... ========================= 4 passed in 0.01 seconds ========================= If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:: $ py.test --collect-only test_scenarios.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 4 items ============================= in 0.01 seconds ============================= Note that we told ``metafunc.parametrize()`` that your scenario values should be considered class-scoped. With pytest-2.3 this leads to a resource-based ordering. Deferring the setup of parametrized resources --------------------------------------------------- .. regendoc:wipe The parametrization of test functions happens at collection time. It is a good idea to setup expensive resources like DB connections or subprocess only when the actual test is run. Here is a simple example how you can achieve that, first the actual test requiring a ``db`` object:: # content of test_backends.py import pytest def test_db_initialized(db): # a dummy test if db.__class__.__name__ == "DB2": pytest.fail("deliberately failing for demo purposes") We can now add a test configuration that generates two invocations of the ``test_db_initialized`` function and also implements a factory that creates a database object for the actual test invocations:: # content of conftest.py import pytest def pytest_generate_tests(metafunc): if 'db' in metafunc.fixturenames: metafunc.parametrize("db", ['d1', 'd2'], indirect=True) class DB1: "one database object" class DB2: "alternative database object" @pytest.fixture def db(request): if request.param == "d1": return DB1() elif request.param == "d2": return DB2() else: raise ValueError("invalid internal test config") Let's first see how it looks like at collection time:: $ py.test test_backends.py --collect-only =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 2 items ============================= in 0.00 seconds ============================= And then when we run the test:: $ py.test -q test_backends.py .F ================================= FAILURES ================================= _________________________ test_db_initialized[d2] __________________________ db = def test_db_initialized(db): # a dummy test if db.__class__.__name__ == "DB2": > pytest.fail("deliberately failing for demo purposes") E Failed: deliberately failing for demo purposes test_backends.py:6: Failed 1 failed, 1 passed in 0.01 seconds The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``db`` fixture function has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase. .. regendoc:wipe Parametrizing test methods through per-class configuration -------------------------------------------------------------- .. _`unittest parameterizer`: http://code.google.com/p/unittest-ext/source/browse/trunk/params.py Here is an example ``pytest_generate_function`` function implementing a parametrization scheme similar to Michael Foord's `unittest parameterizer`_ but in a lot less code:: # content of ./test_parametrize.py import pytest def pytest_generate_tests(metafunc): # called once per each test function funcarglist = metafunc.cls.params[metafunc.function.__name__] argnames = list(funcarglist[0]) metafunc.parametrize(argnames, [[funcargs[name] for name in argnames] for funcargs in funcarglist]) class TestClass: # a map specifying multiple argument sets for a test method params = { 'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ], 'test_zerodivision': [dict(a=1, b=0), ], } def test_equals(self, a, b): assert a == b def test_zerodivision(self, a, b): pytest.raises(ZeroDivisionError, "a/b") Our test generator looks up a class-level definition which specifies which argument sets to use for each test function. Let's run it:: $ py.test -q F.. ================================= FAILURES ================================= ________________________ TestClass.test_equals[2-1] ________________________ self = , a = 1, b = 2 def test_equals(self, a, b): > assert a == b E assert 1 == 2 test_parametrize.py:18: AssertionError 1 failed, 2 passed in 0.01 seconds Indirect parametrization with multiple fixtures -------------------------------------------------------------- Here is a stripped down real-life example of using parametrized testing for testing serialization of objects between different python interpreters. We define a ``test_basic_objects`` function which is to be run with different sets of arguments for its three arguments: * ``python1``: first python interpreter, run to pickle-dump an object to a file * ``python2``: second interpreter, run to pickle-load an object from a file * ``obj``: object to be dumped/loaded .. literalinclude:: multipython.py Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize):: . $ py.test -rs -q multipython.py ............sss............sss............sss............ssssssssssssssssss ========================= short test summary info ========================== SKIP [27] /home/hpk/p/pytest/doc/en/example/multipython.py:21: 'python2.8' not found 48 passed, 27 skipped in 1.34 seconds Indirect parametrization of optional implementations/imports -------------------------------------------------------------------- If you want to compare the outcomes of several implementations of a given API, you can write test functions that receive the already imported implementations and get skipped in case the implementation is not importable/available. Let's say we have a "base" implementation and the other (possibly optimized ones) need to provide similar results:: # content of conftest.py import pytest @pytest.fixture(scope="session") def basemod(request): return pytest.importorskip("base") @pytest.fixture(scope="session", params=["opt1", "opt2"]) def optmod(request): return pytest.importorskip(request.param) And then a base implementation of a simple function:: # content of base.py def func1(): return 1 And an optimized version:: # content of opt1.py def func1(): return 1.0001 And finally a little test module:: # content of test_module.py def test_func1(basemod, optmod): assert round(basemod.func1(), 3) == round(optmod.func1(), 3) If you run this with reporting for skips enabled:: $ py.test -rs test_module.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 2 items test_module.py .s ========================= short test summary info ========================== SKIP [1] /tmp/doc-exec-65/conftest.py:10: could not import 'opt2' =================== 1 passed, 1 skipped in 0.01 seconds ==================== You'll see that we don't have a ``opt2`` module and thus the second test run of our ``test_func1`` was skipped. A few notes: - the fixture functions in the ``conftest.py`` file are "session-scoped" because we don't need to import more than once - if you have multiple test functions and a skipped import, you will see the ``[1]`` count increasing in the report - you can put :ref:`@pytest.mark.parametrize <@pytest.mark.parametrize>` style parametrization on the test functions to parametrize input/output values as well. pytest-2.5.1/doc/en/example/reportingdemo.txt0000664000175000017500000004636512254002202020663 0ustar hpkhpk00000000000000 .. _`tbreportdemo`: Demo of Python failure reports with py.test ================================================== Here is a nice run of several tens of failures and how py.test presents things (unfortunately not showing the nice colors here in the HTML that you get on the terminal - we are working on that): .. code-block:: python assertion $ py.test failure_demo.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 39 items failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF ================================= FAILURES ================================= ____________________________ test_generative[0] ____________________________ param1 = 3, param2 = 6 def test_generative(param1, param2): > assert param1 * 2 < param2 E assert (3 * 2) < 6 failure_demo.py:15: AssertionError _________________________ TestFailing.test_simple __________________________ self = def test_simple(self): def f(): return 42 def g(): return 43 > assert f() == g() E assert 42 == 43 E + where 42 = () E + and 43 = () failure_demo.py:28: AssertionError ____________________ TestFailing.test_simple_multiline _____________________ self = def test_simple_multiline(self): otherfunc_multi( 42, > 6*9) failure_demo.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ a = 42, b = 54 def otherfunc_multi(a,b): > assert (a == b) E assert 42 == 54 failure_demo.py:11: AssertionError ___________________________ TestFailing.test_not ___________________________ self = def test_not(self): def f(): return 42 > assert not f() E assert not 42 E + where 42 = () failure_demo.py:38: AssertionError _________________ TestSpecialisedExplanations.test_eq_text _________________ self = def test_eq_text(self): > assert 'spam' == 'eggs' E assert 'spam' == 'eggs' E - spam E + eggs failure_demo.py:42: AssertionError _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ self = def test_eq_similar_text(self): > assert 'foo 1 bar' == 'foo 2 bar' E assert 'foo 1 bar' == 'foo 2 bar' E - foo 1 bar E ? ^ E + foo 2 bar E ? ^ failure_demo.py:45: AssertionError ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ self = def test_eq_multiline_text(self): > assert 'foo\nspam\nbar' == 'foo\neggs\nbar' E assert 'foo\nspam\nbar' == 'foo\neggs\nbar' E foo E - spam E + eggs E bar failure_demo.py:48: AssertionError ______________ TestSpecialisedExplanations.test_eq_long_text _______________ self = def test_eq_long_text(self): a = '1'*100 + 'a' + '2'*100 b = '1'*100 + 'b' + '2'*100 > assert a == b E assert '111111111111...2222222222222' == '1111111111111...2222222222222' E Skipping 90 identical leading characters in diff, use -v to show E Skipping 91 identical trailing characters in diff, use -v to show E - 1111111111a222222222 E ? ^ E + 1111111111b222222222 E ? ^ failure_demo.py:53: AssertionError _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ self = def test_eq_long_text_multiline(self): a = '1\n'*100 + 'a' + '2\n'*100 b = '1\n'*100 + 'b' + '2\n'*100 > assert a == b E assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n1...n2\n2\n2\n2\n' E Skipping 190 identical leading characters in diff, use -v to show E Skipping 191 identical trailing characters in diff, use -v to show E 1 E 1 E 1 E 1 E 1 E - a2 E + b2 E 2 E 2 E 2 E 2 failure_demo.py:58: AssertionError _________________ TestSpecialisedExplanations.test_eq_list _________________ self = def test_eq_list(self): > assert [0, 1, 2] == [0, 1, 3] E assert [0, 1, 2] == [0, 1, 3] E At index 2 diff: 2 != 3 failure_demo.py:61: AssertionError ______________ TestSpecialisedExplanations.test_eq_list_long _______________ self = def test_eq_list_long(self): a = [0]*100 + [1] + [3]*100 b = [0]*100 + [2] + [3]*100 > assert a == b E assert [0, 0, 0, 0, 0, 0, ...] == [0, 0, 0, 0, 0, 0, ...] E At index 100 diff: 1 != 2 failure_demo.py:66: AssertionError _________________ TestSpecialisedExplanations.test_eq_dict _________________ self = def test_eq_dict(self): > assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} E assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} E Omitting 1 identical items, use -v to show E Differing items: E {'b': 1} != {'b': 2} E Left contains more items: E {'c': 0} E Right contains more items: E {'d': 0} failure_demo.py:69: AssertionError _________________ TestSpecialisedExplanations.test_eq_set __________________ self = def test_eq_set(self): > assert set([0, 10, 11, 12]) == set([0, 20, 21]) E assert set([0, 10, 11, 12]) == set([0, 20, 21]) E Extra items in the left set: E 10 E 11 E 12 E Extra items in the right set: E 20 E 21 failure_demo.py:72: AssertionError _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ self = def test_eq_longer_list(self): > assert [1,2] == [1,2,3] E assert [1, 2] == [1, 2, 3] E Right contains more items, first extra item: 3 failure_demo.py:75: AssertionError _________________ TestSpecialisedExplanations.test_in_list _________________ self = def test_in_list(self): > assert 1 in [0, 2, 3, 4, 5] E assert 1 in [0, 2, 3, 4, 5] failure_demo.py:78: AssertionError __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ self = def test_not_in_text_multiline(self): text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail' > assert 'foo' not in text E assert 'foo' not in 'some multiline\ntext\nw...ncludes foo\nand a\ntail' E 'foo' is contained here: E some multiline E text E which E includes foo E ? +++ E and a E tail failure_demo.py:82: AssertionError ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ self = def test_not_in_text_single(self): text = 'single foo line' > assert 'foo' not in text E assert 'foo' not in 'single foo line' E 'foo' is contained here: E single foo line E ? +++ failure_demo.py:86: AssertionError _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ self = def test_not_in_text_single_long(self): text = 'head ' * 50 + 'foo ' + 'tail ' * 20 > assert 'foo' not in text E assert 'foo' not in 'head head head head hea...ail tail tail tail tail ' E 'foo' is contained here: E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? +++ failure_demo.py:90: AssertionError ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ self = def test_not_in_text_single_long_term(self): text = 'head ' * 50 + 'f'*70 + 'tail ' * 20 > assert 'f'*70 not in text E assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail ' E 'ffffffffffffffffff...fffffffffffffffffff' is contained here: E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ failure_demo.py:94: AssertionError ______________________________ test_attribute ______________________________ def test_attribute(): class Foo(object): b = 1 i = Foo() > assert i.b == 2 E assert 1 == 2 E + where 1 = .b failure_demo.py:101: AssertionError _________________________ test_attribute_instance __________________________ def test_attribute_instance(): class Foo(object): b = 1 > assert Foo().b == 2 E assert 1 == 2 E + where 1 = .b E + where = () failure_demo.py:107: AssertionError __________________________ test_attribute_failure __________________________ def test_attribute_failure(): class Foo(object): def _get_b(self): raise Exception('Failed to get attrib') b = property(_get_b) i = Foo() > assert i.b == 2 failure_demo.py:116: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = def _get_b(self): > raise Exception('Failed to get attrib') E Exception: Failed to get attrib failure_demo.py:113: Exception _________________________ test_attribute_multiple __________________________ def test_attribute_multiple(): class Foo(object): b = 1 class Bar(object): b = 2 > assert Foo().b == Bar().b E assert 1 == 2 E + where 1 = .b E + where = () E + and 2 = .b E + where = () failure_demo.py:124: AssertionError __________________________ TestRaises.test_raises __________________________ self = def test_raises(self): s = 'qwe' > raises(TypeError, "int(s)") failure_demo.py:133: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ > int(s) E ValueError: invalid literal for int() with base 10: 'qwe' <0-codegen /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:983>:1: ValueError ______________________ TestRaises.test_raises_doesnt _______________________ self = def test_raises_doesnt(self): > raises(IOError, "int('3')") E Failed: DID NOT RAISE failure_demo.py:136: Failed __________________________ TestRaises.test_raise ___________________________ self = def test_raise(self): > raise ValueError("demo error") E ValueError: demo error failure_demo.py:139: ValueError ________________________ TestRaises.test_tupleerror ________________________ self = def test_tupleerror(self): > a,b = [1] E ValueError: need more than 1 value to unpack failure_demo.py:142: ValueError ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ self = def test_reinterpret_fails_with_print_for_the_fun_of_it(self): l = [1,2,3] print ("l is %r" % l) > a,b = l.pop() E TypeError: 'int' object is not iterable failure_demo.py:147: TypeError ----------------------------- Captured stdout ------------------------------ l is [1, 2, 3] ________________________ TestRaises.test_some_error ________________________ self = def test_some_error(self): > if namenotexi: E NameError: global name 'namenotexi' is not defined failure_demo.py:150: NameError ____________________ test_dynamic_compile_shows_nicely _____________________ def test_dynamic_compile_shows_nicely(): src = 'def foo():\n assert 1 == 0\n' name = 'abc-123' module = py.std.imp.new_module(name) code = py.code.compile(src, name, 'exec') py.builtin.exec_(code, module.__dict__) py.std.sys.modules[name] = module > module.foo() failure_demo.py:165: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def foo(): > assert 1 == 0 E assert 1 == 0 <2-codegen 'abc-123' /home/hpk/p/pytest/doc/en/example/assertion/failure_demo.py:162>:2: AssertionError ____________________ TestMoreErrors.test_complex_error _____________________ self = def test_complex_error(self): def f(): return 44 def g(): return 43 > somefunc(f(), g()) failure_demo.py:175: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ x = 44, y = 43 def somefunc(x,y): > otherfunc(x,y) failure_demo.py:8: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ a = 44, b = 43 def otherfunc(a,b): > assert a==b E assert 44 == 43 failure_demo.py:5: AssertionError ___________________ TestMoreErrors.test_z1_unpack_error ____________________ self = def test_z1_unpack_error(self): l = [] > a,b = l E ValueError: need more than 0 values to unpack failure_demo.py:179: ValueError ____________________ TestMoreErrors.test_z2_type_error _____________________ self = def test_z2_type_error(self): l = 3 > a,b = l E TypeError: 'int' object is not iterable failure_demo.py:183: TypeError ______________________ TestMoreErrors.test_startswith ______________________ self = def test_startswith(self): s = "123" g = "456" > assert s.startswith(g) E assert ('456') E + where = '123'.startswith failure_demo.py:188: AssertionError __________________ TestMoreErrors.test_startswith_nested ___________________ self = def test_startswith_nested(self): def f(): return "123" def g(): return "456" > assert f().startswith(g()) E assert ('456') E + where = '123'.startswith E + where '123' = () E + and '456' = () failure_demo.py:195: AssertionError _____________________ TestMoreErrors.test_global_func ______________________ self = def test_global_func(self): > assert isinstance(globf(42), float) E assert isinstance(43, float) E + where 43 = globf(42) failure_demo.py:198: AssertionError _______________________ TestMoreErrors.test_instance _______________________ self = def test_instance(self): self.x = 6*7 > assert self.x != 42 E assert 42 != 42 E + where 42 = .x failure_demo.py:202: AssertionError _______________________ TestMoreErrors.test_compare ________________________ self = def test_compare(self): > assert globf(10) < 5 E assert 11 < 5 E + where 11 = globf(10) failure_demo.py:205: AssertionError _____________________ TestMoreErrors.test_try_finally ______________________ self = def test_try_finally(self): x = 1 try: > assert x == 0 E assert 1 == 0 failure_demo.py:210: AssertionError ======================== 39 failed in 0.20 seconds ========================= pytest-2.5.1/doc/en/example/xfail_demo.py0000664000175000017500000000056212254002202017712 0ustar hpkhpk00000000000000import pytest xfail = pytest.mark.xfail @xfail def test_hello(): assert 0 @xfail(run=False) def test_hello2(): assert 0 @xfail("hasattr(os, 'sep')") def test_hello3(): assert 0 @xfail(reason="bug 110") def test_hello4(): assert 0 @xfail('pytest.__version__[0] != "17"') def test_hello5(): assert 0 def test_hello6(): pytest.xfail("reason") pytest-2.5.1/doc/en/example/layout1/0000775000175000017500000000000012254002202016624 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/example/layout1/setup.cfg0000664000175000017500000000015212254002202020443 0ustar hpkhpk00000000000000[pytest] testfilepatterns = ${topdir}/tests/unit/test_${basename} ${topdir}/tests/functional/*.py pytest-2.5.1/doc/en/example/conftest.py0000664000175000017500000000003712254002202017425 0ustar hpkhpk00000000000000collect_ignore = ["nonpython"] pytest-2.5.1/doc/en/example/py2py3/0000775000175000017500000000000012254002202016374 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/example/py2py3/test_py2.py0000664000175000017500000000014212254002202020514 0ustar hpkhpk00000000000000 def test_exception_syntax(): try: 0/0 except ZeroDivisionError, e: pass pytest-2.5.1/doc/en/example/py2py3/conftest.py0000664000175000017500000000050412254002202020572 0ustar hpkhpk00000000000000import sys import pytest py3 = sys.version_info[0] >= 3 class DummyCollector(pytest.collect.File): def collect(self): return [] def pytest_pycollect_makemodule(path, parent): bn = path.basename if "py3" in bn and not py3 or ("py2" in bn and py3): return DummyCollector(path, parent=parent) pytest-2.5.1/doc/en/example/py2py3/test_py3.py0000664000175000017500000000014412254002202020517 0ustar hpkhpk00000000000000 def test_exception_syntax(): try: 0/0 except ZeroDivisionError as e: pass pytest-2.5.1/doc/en/example/markers.txt0000664000175000017500000004467112254002202017447 0ustar hpkhpk00000000000000 .. _`mark examples`: Working with custom markers ================================================= Here are some example using the :ref:`mark` mechanism. Marking test functions and selecting them for a run ---------------------------------------------------- You can "mark" a test function with custom metadata like this:: # content of test_server.py import pytest @pytest.mark.webtest def test_send_http(): pass # perform some webtest test for your app def test_something_quick(): pass def test_another(): pass .. versionadded:: 2.2 You can then restrict a test run to only run tests marked with ``webtest``:: $ py.test -v -m webtest =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 3 items test_server.py:3: test_send_http PASSED =================== 2 tests deselected by "-m 'webtest'" =================== ================== 1 passed, 2 deselected in 0.01 seconds ================== Or the inverse, running all tests except the webtest ones:: $ py.test -v -m "not webtest" =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 3 items test_server.py:6: test_something_quick PASSED test_server.py:8: test_another PASSED ================= 1 tests deselected by "-m 'not webtest'" ================= ================== 2 passed, 1 deselected in 0.01 seconds ================== Using ``-k expr`` to select tests based on their name ------------------------------------------------------- .. versionadded: 2.0/2.3.4 You can use the ``-k`` command line option to specify an expression which implements a substring match on the test names instead of the exact match on markers that ``-m`` provides. This makes it easy to select tests based on their names:: $ py.test -v -k http # running with the above defined example module =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 3 items test_server.py:3: test_send_http PASSED ====================== 2 tests deselected by '-khttp' ====================== ================== 1 passed, 2 deselected in 0.01 seconds ================== And you can also run all tests except the ones that match the keyword:: $ py.test -k "not send_http" -v =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 3 items test_server.py:6: test_something_quick PASSED test_server.py:8: test_another PASSED ================= 1 tests deselected by '-knot send_http' ================== ================== 2 passed, 1 deselected in 0.01 seconds ================== Or to select "http" and "quick" tests:: $ py.test -k "http or quick" -v =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 3 items test_server.py:3: test_send_http PASSED test_server.py:6: test_something_quick PASSED ================= 1 tests deselected by '-khttp or quick' ================== ================== 2 passed, 1 deselected in 0.01 seconds ================== .. note:: If you are using expressions such as "X and Y" then both X and Y need to be simple non-keyword names. For example, "pass" or "from" will result in SyntaxErrors because "-k" evaluates the expression. However, if the "-k" argument is a simple string, no such restrictions apply. Also "-k 'not STRING'" has no restrictions. You can also specify numbers like "-k 1.3" to match tests which are parametrized with the float "1.3". Registering markers ------------------------------------- .. versionadded:: 2.2 .. ini-syntax for custom markers: Registering markers for your test suite is simple:: # content of pytest.ini [pytest] markers = webtest: mark a test as a webtest. You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` markers:: $ py.test --markers @pytest.mark.webtest: mark a test as a webtest. @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html @pytest.mark.xfail(condition, reason=None, run=True): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. See http://pytest.org/latest/skipping.html @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples. @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. For an example on how to add and work with markers from a plugin, see :ref:`adding a custom marker from a plugin`. .. note:: It is recommended to explicitely register markers so that: * there is one place in your test suite defining your markers * asking for existing markers via ``py.test --markers`` gives good output * typos in function markers are treated as an error if you use the ``--strict`` option. Later versions of py.test are probably going to treat non-registered markers as an error. .. _`scoped-marking`: Marking whole classes or modules ---------------------------------------------------- If you are programming with Python 2.6 or later you may use ``pytest.mark`` decorators with classes to apply markers to all of its test methods:: # content of test_mark_classlevel.py import pytest @pytest.mark.webtest class TestClass: def test_startup(self): pass def test_startup_and_more(self): pass This is equivalent to directly applying the decorator to the two test functions. To remain backward-compatible with Python 2.4 you can also set a ``pytestmark`` attribute on a TestClass like this:: import pytest class TestClass: pytestmark = pytest.mark.webtest or if you need to use multiple markers you can use a list:: import pytest class TestClass: pytestmark = [pytest.mark.webtest, pytest.mark.slowtest] You can also set a module level marker:: import pytest pytestmark = pytest.mark.webtest in which case it will be applied to all functions and methods defined in the module. .. _`marking individual tests when using parametrize`: Marking individual tests when using parametrize ----------------------------------------------- When using parametrize, applying a mark will make it apply to each individual test. However it is also possible to apply a marker to an individual test instance:: import pytest @pytest.mark.foo @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.bar((1, 3)), (2, 3), ]) def test_increment(n, expected): assert n + 1 == expected In this example the mark "foo" will apply to each of the three tests, whereas the "bar" mark is only applied to the second test. Skip and xfail marks can also be applied in this way, see :ref:`skip/xfail with parametrize`. .. _`adding a custom marker from a plugin`: Custom marker and command line option to control test runs ---------------------------------------------------------- .. regendoc:wipe Plugins can provide custom markers and implement specific behaviour based on it. This is a self-contained example which adds a command line option and a parametrized test function marker to run tests specifies via named environments:: # content of conftest.py import pytest def pytest_addoption(parser): parser.addoption("-E", action="store", metavar="NAME", help="only run tests matching the environment NAME.") def pytest_configure(config): # register an additional marker config.addinivalue_line("markers", "env(name): mark test to run only on named environment") def pytest_runtest_setup(item): envmarker = item.get_marker("env") if envmarker is not None: envname = envmarker.args[0] if envname != item.config.getoption("-E"): pytest.skip("test requires env %r" % envname) A test file using this local plugin:: # content of test_someenv.py import pytest @pytest.mark.env("stage1") def test_basic_db_operation(): pass and an example invocations specifying a different environment than what the test needs:: $ py.test -E stage2 =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 1 items test_someenv.py s ======================== 1 skipped in 0.01 seconds ========================= and here is one that specifies exactly the environment needed:: $ py.test -E stage1 =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 1 items test_someenv.py . ========================= 1 passed in 0.01 seconds ========================= The ``--markers`` option always gives you a list of available markers:: $ py.test --markers @pytest.mark.env(name): mark test to run only on named environment @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html @pytest.mark.xfail(condition, reason=None, run=True): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. See http://pytest.org/latest/skipping.html @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples. @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. Reading markers which were set from multiple places ---------------------------------------------------- .. versionadded: 2.2.2 .. regendoc:wipe If you are heavily using markers in your test suite you may encounter the case where a marker is applied several times to a test function. From plugin code you can read over all such settings. Example:: # content of test_mark_three_times.py import pytest pytestmark = pytest.mark.glob("module", x=1) @pytest.mark.glob("class", x=2) class TestClass: @pytest.mark.glob("function", x=3) def test_something(self): pass Here we have the marker "glob" applied three times to the same test function. From a conftest file we can read it like this:: # content of conftest.py import sys def pytest_runtest_setup(item): g = item.get_marker("glob") if g is not None: for info in g: print ("glob args=%s kwargs=%s" %(info.args, info.kwargs)) sys.stdout.flush() Let's run this without capturing output and see what we get:: $ py.test -q -s glob args=('function',) kwargs={'x': 3} glob args=('class',) kwargs={'x': 2} glob args=('module',) kwargs={'x': 1} . 1 passed in 0.01 seconds marking platform specific tests with pytest -------------------------------------------------------------- .. regendoc:wipe Consider you have a test suite which marks tests for particular platforms, namely ``pytest.mark.osx``, ``pytest.mark.win32`` etc. and you also have tests that run on all platforms and have no specific marker. If you now want to have a way to only run the tests for your particular platform, you could use the following plugin:: # content of conftest.py # import sys import pytest ALL = set("osx linux2 win32".split()) def pytest_runtest_setup(item): if isinstance(item, item.Function): plat = sys.platform if not item.get_marker(plat): if ALL.intersection(item.keywords): pytest.skip("cannot run on platform %s" %(plat)) then tests will be skipped if they were specified for a different platform. Let's do a little test file to show how this looks like:: # content of test_plat.py import pytest @pytest.mark.osx def test_if_apple_is_evil(): pass @pytest.mark.linux2 def test_if_linux_works(): pass @pytest.mark.win32 def test_if_win32_crashes(): pass def test_runs_everywhere(): pass then you will see two test skipped and two executed tests as expected:: $ py.test -rs # this option reports skip reasons =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 4 items test_plat.py s.s. ========================= short test summary info ========================== SKIP [2] /tmp/doc-exec-63/conftest.py:12: cannot run on platform linux2 =================== 2 passed, 2 skipped in 0.01 seconds ==================== Note that if you specify a platform via the marker-command line option like this:: $ py.test -m linux2 =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 4 items test_plat.py . =================== 3 tests deselected by "-m 'linux2'" ==================== ================== 1 passed, 3 deselected in 0.01 seconds ================== then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests. Automatically adding markers based on test names -------------------------------------------------------- .. regendoc:wipe If you a test suite where test function names indicate a certain type of test, you can implement a hook that automatically defines markers so that you can use the ``-m`` option with it. Let's look at this test module:: # content of test_module.py def test_interface_simple(): assert 0 def test_interface_complex(): assert 0 def test_event_simple(): assert 0 def test_something_else(): assert 0 We want to dynamically define two markers and can do it in a ``conftest.py`` plugin:: # content of conftest.py import pytest def pytest_collection_modifyitems(items): for item in items: if "interface" in item.nodeid: item.add_marker(pytest.mark.interface) elif "event" in item.nodeid: item.add_marker(pytest.mark.event) We can now use the ``-m option`` to select one set:: $ py.test -m interface --tb=short =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 4 items test_module.py FF ================================= FAILURES ================================= __________________________ test_interface_simple ___________________________ test_module.py:3: in test_interface_simple > assert 0 E assert 0 __________________________ test_interface_complex __________________________ test_module.py:6: in test_interface_complex > assert 0 E assert 0 ================== 2 tests deselected by "-m 'interface'" ================== ================== 2 failed, 2 deselected in 0.01 seconds ================== or to select both "event" and "interface" tests:: $ py.test -m "interface or event" --tb=short =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 4 items test_module.py FFF ================================= FAILURES ================================= __________________________ test_interface_simple ___________________________ test_module.py:3: in test_interface_simple > assert 0 E assert 0 __________________________ test_interface_complex __________________________ test_module.py:6: in test_interface_complex > assert 0 E assert 0 ____________________________ test_event_simple _____________________________ test_module.py:9: in test_event_simple > assert 0 E assert 0 ============= 1 tests deselected by "-m 'interface or event'" ============== ================== 3 failed, 1 deselected in 0.01 seconds ================== pytest-2.5.1/doc/en/example/nonpython/0000775000175000017500000000000012254002202017262 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/example/nonpython/__init__.py0000664000175000017500000000000012254002202021361 0ustar hpkhpk00000000000000pytest-2.5.1/doc/en/example/nonpython/conftest.py0000664000175000017500000000247112254002202021465 0ustar hpkhpk00000000000000# content of conftest.py import pytest def pytest_collect_file(parent, path): if path.ext == ".yml" and path.basename.startswith("test"): return YamlFile(path, parent) class YamlFile(pytest.File): def collect(self): import yaml # we need a yaml parser, e.g. PyYAML raw = yaml.safe_load(self.fspath.open()) for name, spec in raw.items(): yield YamlItem(name, self, spec) class YamlItem(pytest.Item): def __init__(self, name, parent, spec): super(YamlItem, self).__init__(name, parent) self.spec = spec def runtest(self): for name, value in self.spec.items(): # some custom test execution (dumb example follows) if name != value: raise YamlException(self, name, value) def repr_failure(self, excinfo): """ called when self.runtest() raises an exception. """ if isinstance(excinfo.value, YamlException): return "\n".join([ "usecase execution failed", " spec failed: %r: %r" % excinfo.value.args[1:3], " no further details known at this point." ]) def reportinfo(self): return self.fspath, 0, "usecase: %s" % self.name class YamlException(Exception): """ custom exception for error reporting. """ pytest-2.5.1/doc/en/example/nonpython/test_simple.yml0000664000175000017500000000011612254002202022333 0ustar hpkhpk00000000000000# test_simple.yml ok: sub1: sub1 hello: world: world some: other pytest-2.5.1/doc/en/example/multipython.py0000664000175000017500000000325712254002202020203 0ustar hpkhpk00000000000000""" module containing a parametrized tests testing cross-python serialization via the pickle module. """ import py, pytest pythonlist = ['python2.4', 'python2.5', 'python2.6', 'python2.7', 'python2.8'] @pytest.fixture(params=pythonlist) def python1(request, tmpdir): picklefile = tmpdir.join("data.pickle") return Python(request.param, picklefile) @pytest.fixture(params=pythonlist) def python2(request, python1): return Python(request.param, python1.picklefile) class Python: def __init__(self, version, picklefile): self.pythonpath = py.path.local.sysfind(version) if not self.pythonpath: py.test.skip("%r not found" %(version,)) self.picklefile = picklefile def dumps(self, obj): dumpfile = self.picklefile.dirpath("dump.py") dumpfile.write(py.code.Source(""" import pickle f = open(%r, 'wb') s = pickle.dump(%r, f) f.close() """ % (str(self.picklefile), obj))) py.process.cmdexec("%s %s" %(self.pythonpath, dumpfile)) def load_and_is_true(self, expression): loadfile = self.picklefile.dirpath("load.py") loadfile.write(py.code.Source(""" import pickle f = open(%r, 'rb') obj = pickle.load(f) f.close() res = eval(%r) if not res: raise SystemExit(1) """ % (str(self.picklefile), expression))) print (loadfile) py.process.cmdexec("%s %s" %(self.pythonpath, loadfile)) @pytest.mark.parametrize("obj", [42, {}, {1:3},]) def test_basic_objects(python1, python2, obj): python1.dumps(obj) python2.load_and_is_true("obj == %s" % obj) pytest-2.5.1/doc/en/example/simple.txt0000664000175000017500000005211212254002202017261 0ustar hpkhpk00000000000000 .. highlightlang:: python Basic patterns and examples ========================================================== Pass different values to a test function, depending on command line options ---------------------------------------------------------------------------- .. regendoc:wipe Suppose we want to write a test that depends on a command line option. Here is a basic pattern how to achieve this:: # content of test_sample.py def test_answer(cmdopt): if cmdopt == "type1": print ("first") elif cmdopt == "type2": print ("second") assert 0 # to see what was printed For this to work we need to add a command line option and provide the ``cmdopt`` through a :ref:`fixture function `:: # content of conftest.py import pytest def pytest_addoption(parser): parser.addoption("--cmdopt", action="store", default="type1", help="my option: type1 or type2") @pytest.fixture def cmdopt(request): return request.config.getoption("--cmdopt") Let's run this without supplying our new option:: $ py.test -q test_sample.py F ================================= FAILURES ================================= _______________________________ test_answer ________________________________ cmdopt = 'type1' def test_answer(cmdopt): if cmdopt == "type1": print ("first") elif cmdopt == "type2": print ("second") > assert 0 # to see what was printed E assert 0 test_sample.py:6: AssertionError ----------------------------- Captured stdout ------------------------------ first 1 failed in 0.01 seconds And now with supplying a command line option:: $ py.test -q --cmdopt=type2 F ================================= FAILURES ================================= _______________________________ test_answer ________________________________ cmdopt = 'type2' def test_answer(cmdopt): if cmdopt == "type1": print ("first") elif cmdopt == "type2": print ("second") > assert 0 # to see what was printed E assert 0 test_sample.py:6: AssertionError ----------------------------- Captured stdout ------------------------------ second 1 failed in 0.01 seconds You can see that the command line option arrived in our test. This completes the basic pattern. However, one often rather wants to process command line options outside of the test and rather pass in different or more complex objects. Dynamically adding command line options -------------------------------------------------------------- .. regendoc:wipe Through :confval:`addopts` you can statically add command line options for your project. You can also dynamically modify the command line arguments before they get processed:: # content of conftest.py import sys def pytest_cmdline_preparse(args): if 'xdist' in sys.modules: # pytest-xdist plugin import multiprocessing num = max(multiprocessing.cpu_count() / 2, 1) args[:] = ["-n", str(num)] + args If you have the :ref:`xdist plugin ` installed you will now always perform test runs using a number of subprocesses close to your CPU. Running in an empty directory with the above conftest.py:: $ py.test =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 0 items ============================= in 0.00 seconds ============================= .. _`excontrolskip`: Control skipping of tests according to command line option -------------------------------------------------------------- .. regendoc:wipe Here is a ``conftest.py`` file adding a ``--runslow`` command line option to control skipping of ``slow`` marked tests:: # content of conftest.py import pytest def pytest_addoption(parser): parser.addoption("--runslow", action="store_true", help="run slow tests") def pytest_runtest_setup(item): if 'slow' in item.keywords and not item.config.getoption("--runslow"): pytest.skip("need --runslow option to run") We can now write a test module like this:: # content of test_module.py import pytest slow = pytest.mark.slow def test_func_fast(): pass @slow def test_func_slow(): pass and when running it will see a skipped "slow" test:: $ py.test -rs # "-rs" means report details on the little 's' =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 2 items test_module.py .s ========================= short test summary info ========================== SKIP [1] /tmp/doc-exec-68/conftest.py:9: need --runslow option to run =================== 1 passed, 1 skipped in 0.01 seconds ==================== Or run it including the ``slow`` marked test:: $ py.test --runslow =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 2 items test_module.py .. ========================= 2 passed in 0.01 seconds ========================= Writing well integrated assertion helpers -------------------------------------------------- .. regendoc:wipe If you have a test helper function called from a test you can use the ``pytest.fail`` marker to fail a test with a certain message. The test support function will not show up in the traceback if you set the ``__tracebackhide__`` option somewhere in the helper function. Example:: # content of test_checkconfig.py import pytest def checkconfig(x): __tracebackhide__ = True if not hasattr(x, "config"): pytest.fail("not configured: %s" %(x,)) def test_something(): checkconfig(42) The ``__tracebackhide__`` setting influences py.test showing of tracebacks: the ``checkconfig`` function will not be shown unless the ``--fulltrace`` command line option is specified. Let's run our little function:: $ py.test -q test_checkconfig.py F ================================= FAILURES ================================= ______________________________ test_something ______________________________ def test_something(): > checkconfig(42) E Failed: not configured: 42 test_checkconfig.py:8: Failed 1 failed in 0.01 seconds Detect if running from within a py.test run -------------------------------------------------------------- .. regendoc:wipe Usually it is a bad idea to make application code behave differently if called from a test. But if you absolutely must find out if your application code is running from a test you can do something like this:: # content of conftest.py def pytest_configure(config): import sys sys._called_from_test = True def pytest_unconfigure(config): del sys._called_from_test and then check for the ``sys._called_from_test`` flag:: if hasattr(sys, '_called_from_test'): # called from within a test run else: # called "normally" accordingly in your application. It's also a good idea to use your own application module rather than ``sys`` for handling flag. Adding info to test report header -------------------------------------------------------------- .. regendoc:wipe It's easy to present extra information in a py.test run:: # content of conftest.py def pytest_report_header(config): return "project deps: mylib-1.1" which will add the string to the test header accordingly:: $ py.test =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 project deps: mylib-1.1 collected 0 items ============================= in 0.00 seconds ============================= .. regendoc:wipe You can also return a list of strings which will be considered as several lines of information. You can of course also make the amount of reporting information on e.g. the value of ``config.option.verbose`` so that you present more information appropriately:: # content of conftest.py def pytest_report_header(config): if config.option.verbose > 0: return ["info1: did you know that ...", "did you?"] which will add info only when run with "--v":: $ py.test -v =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python info1: did you know that ... did you? collecting ... collected 0 items ============================= in 0.00 seconds ============================= and nothing when run plainly:: $ py.test =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 0 items ============================= in 0.00 seconds ============================= profiling test duration -------------------------- .. regendoc:wipe .. versionadded: 2.2 If you have a slow running large test suite you might want to find out which tests are the slowest. Let's make an artifical test suite:: # content of test_some_are_slow.py import time def test_funcfast(): pass def test_funcslow1(): time.sleep(0.1) def test_funcslow2(): time.sleep(0.2) Now we can profile which test functions execute the slowest:: $ py.test --durations=3 =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 3 items test_some_are_slow.py ... ========================= slowest 3 test durations ========================= 0.20s call test_some_are_slow.py::test_funcslow2 0.10s call test_some_are_slow.py::test_funcslow1 0.00s setup test_some_are_slow.py::test_funcfast ========================= 3 passed in 0.31 seconds ========================= incremental testing - test steps --------------------------------------------------- .. regendoc:wipe Sometimes you may have a testing situation which consists of a series of test steps. If one step fails it makes no sense to execute further steps as they are all expected to fail anyway and their tracebacks add no insight. Here is a simple ``conftest.py`` file which introduces an ``incremental`` marker which is to be used on classes:: # content of conftest.py import pytest def pytest_runtest_makereport(item, call): if "incremental" in item.keywords: if call.excinfo is not None: parent = item.parent parent._previousfailed = item def pytest_runtest_setup(item): if "incremental" in item.keywords: previousfailed = getattr(item.parent, "_previousfailed", None) if previousfailed is not None: pytest.xfail("previous test failed (%s)" %previousfailed.name) These two hook implementations work together to abort incremental-marked tests in a class. Here is a test module example:: # content of test_step.py import pytest @pytest.mark.incremental class TestUserHandling: def test_login(self): pass def test_modification(self): assert 0 def test_deletion(self): pass def test_normal(): pass If we run this:: $ py.test -rx =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 4 items test_step.py .Fx. ================================= FAILURES ================================= ____________________ TestUserHandling.test_modification ____________________ self = def test_modification(self): > assert 0 E assert 0 test_step.py:9: AssertionError ========================= short test summary info ========================== XFAIL test_step.py::TestUserHandling::()::test_deletion reason: previous test failed (test_modification) ============== 1 failed, 2 passed, 1 xfailed in 0.01 seconds =============== We'll see that ``test_deletion`` was not executed because ``test_modification`` failed. It is reported as an "expected failure". Package/Directory-level fixtures (setups) ------------------------------------------------------- If you have nested test directories, you can have per-directory fixture scopes by placing fixture functions in a ``conftest.py`` file in that directory You can use all types of fixtures including :ref:`autouse fixtures ` which are the equivalent of xUnit's setup/teardown concept. It's however recommended to have explicit fixture references in your tests or test classes rather than relying on implicitely executing setup/teardown functions, especially if they are far away from the actual tests. Here is a an example for making a ``db`` fixture available in a directory:: # content of a/conftest.py import pytest class DB: pass @pytest.fixture(scope="session") def db(): return DB() and then a test module in that directory:: # content of a/test_db.py def test_a1(db): assert 0, db # to show value another test module:: # content of a/test_db2.py def test_a2(db): assert 0, db # to show value and then a module in a sister directory which will not see the ``db`` fixture:: # content of b/test_error.py def test_root(db): # no db here, will error out pass We can run this:: $ py.test =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 7 items test_step.py .Fx. a/test_db.py F a/test_db2.py F b/test_error.py E ================================== ERRORS ================================== _______________________ ERROR at setup of test_root ________________________ file /tmp/doc-exec-68/b/test_error.py, line 1 def test_root(db): # no db here, will error out fixture 'db' not found available fixtures: recwarn, capfd, pytestconfig, capsys, tmpdir, monkeypatch use 'py.test --fixtures [testpath]' for help on them. /tmp/doc-exec-68/b/test_error.py:1 ================================= FAILURES ================================= ____________________ TestUserHandling.test_modification ____________________ self = def test_modification(self): > assert 0 E assert 0 test_step.py:9: AssertionError _________________________________ test_a1 __________________________________ db = def test_a1(db): > assert 0, db # to show value E AssertionError: a/test_db.py:2: AssertionError _________________________________ test_a2 __________________________________ db = def test_a2(db): > assert 0, db # to show value E AssertionError: a/test_db2.py:2: AssertionError ========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.03 seconds ========== The two test modules in the ``a`` directory see the same ``db`` fixture instance while the one test in the sister-directory ``b`` doesn't see it. We could of course also define a ``db`` fixture in that sister directory's ``conftest.py`` file. Note that each fixture is only instantiated if there is a test actually needing it (unless you use "autouse" fixture which are always executed ahead of the first test executing). post-process test reports / failures --------------------------------------- If you want to postprocess test reports and need access to the executing environment you can implement a hook that gets called when the test "report" object is about to be created. Here we write out all failing test calls and also access a fixture (if it was used by the test) in case you want to query/look at it during your post processing. In our case we just write some informations out to a ``failures`` file:: # content of conftest.py import pytest import os.path @pytest.mark.tryfirst def pytest_runtest_makereport(item, call, __multicall__): # execute all other hooks to obtain the report object rep = __multicall__.execute() # we only look at actual failing test calls, not setup/teardown if rep.when == "call" and rep.failed: mode = "a" if os.path.exists("failures") else "w" with open("failures", mode) as f: # let's also access a fixture for the fun of it if "tmpdir" in item.funcargs: extra = " (%s)" % item.funcargs["tmpdir"] else: extra = "" f.write(rep.nodeid + extra + "\n") return rep if you then have failing tests:: # content of test_module.py def test_fail1(tmpdir): assert 0 def test_fail2(): assert 0 and run them:: $ py.test test_module.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 2 items test_module.py FF ================================= FAILURES ================================= ________________________________ test_fail1 ________________________________ tmpdir = local('/tmp/pytest-42/test_fail10') def test_fail1(tmpdir): > assert 0 E assert 0 test_module.py:2: AssertionError ________________________________ test_fail2 ________________________________ def test_fail2(): > assert 0 E assert 0 test_module.py:4: AssertionError ========================= 2 failed in 0.01 seconds ========================= you will have a "failures" file which contains the failing test ids:: $ cat failures test_module.py::test_fail1 (/tmp/pytest-42/test_fail10) test_module.py::test_fail2 Making test result information available in fixtures ----------------------------------------------------------- .. regendoc:wipe If you want to make test result reports available in fixture finalizers here is a little example implemented via a local plugin:: # content of conftest.py import pytest @pytest.mark.tryfirst def pytest_runtest_makereport(item, call, __multicall__): # execute all other hooks to obtain the report object rep = __multicall__.execute() # set an report attribute for each phase of a call, which can # be "setup", "call", "teardown" setattr(item, "rep_" + rep.when, rep) return rep @pytest.fixture def something(request): def fin(): # request.node is an "item" because we use the default # "function" scope if request.node.rep_setup.failed: print "setting up a test failed!", request.node.nodeid elif request.node.rep_setup.passed: if request.node.rep_call.failed: print "executing test failed", request.node.nodeid request.addfinalizer(fin) if you then have failing tests:: # content of test_module.py import pytest @pytest.fixture def other(): assert 0 def test_setup_fails(something, other): pass def test_call_fails(something): assert 0 def test_fail2(): assert 0 and run it:: $ py.test -s test_module.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 3 items test_module.py Esetting up a test failed! test_module.py::test_setup_fails Fexecuting test failed test_module.py::test_call_fails F ================================== ERRORS ================================== ____________________ ERROR at setup of test_setup_fails ____________________ @pytest.fixture def other(): > assert 0 E assert 0 test_module.py:6: AssertionError ================================= FAILURES ================================= _____________________________ test_call_fails ______________________________ something = None def test_call_fails(something): > assert 0 E assert 0 test_module.py:12: AssertionError ________________________________ test_fail2 ________________________________ def test_fail2(): > assert 0 E assert 0 test_module.py:15: AssertionError ==================== 2 failed, 1 error in 0.01 seconds ===================== You'll see that the fixture finalizers could use the precise reporting information. pytest-2.5.1/doc/en/example/pythoncollection.py0000664000175000017500000000030712254002202021175 0ustar hpkhpk00000000000000 # run this with $ py.test --collect-only test_collectonly.py # def test_function(): pass class TestClass: def test_method(self): pass def test_anothermethod(self): pass pytest-2.5.1/doc/en/example/assertion/0000775000175000017500000000000012254002202017235 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/example/assertion/test_setup_flow_example.py0000664000175000017500000000234212254002202024551 0ustar hpkhpk00000000000000def setup_module(module): module.TestStateFullThing.classcount = 0 class TestStateFullThing: def setup_class(cls): cls.classcount += 1 def teardown_class(cls): cls.classcount -= 1 def setup_method(self, method): self.id = eval(method.__name__[5:]) def test_42(self): assert self.classcount == 1 assert self.id == 42 def test_23(self): assert self.classcount == 1 assert self.id == 23 def teardown_module(module): assert module.TestStateFullThing.classcount == 0 """ For this example the control flow happens as follows:: import test_setup_flow_example setup_module(test_setup_flow_example) setup_class(TestStateFullThing) instance = TestStateFullThing() setup_method(instance, instance.test_42) instance.test_42() setup_method(instance, instance.test_23) instance.test_23() teardown_class(TestStateFullThing) teardown_module(test_setup_flow_example) Note that ``setup_class(TestStateFullThing)`` is called and not ``TestStateFullThing.setup_class()`` which would require you to insert ``setup_class = classmethod(setup_class)`` to make your setup function callable. """ pytest-2.5.1/doc/en/example/assertion/global_testmodule_config/0000775000175000017500000000000012254002202024267 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/example/assertion/global_testmodule_config/conftest.py0000664000175000017500000000050712254002202026470 0ustar hpkhpk00000000000000import pytest, py mydir = py.path.local(__file__).dirpath() def pytest_runtest_setup(item): if isinstance(item, pytest.Function): if not item.fspath.relto(mydir): return mod = item.getparent(pytest.Module).obj if hasattr(mod, 'hello'): print ("mod.hello %r" % (mod.hello,)) pytest-2.5.1/doc/en/example/assertion/global_testmodule_config/test_hello.py0000664000175000017500000000005412254002202027002 0ustar hpkhpk00000000000000 hello = "world" def test_func(): pass pytest-2.5.1/doc/en/example/assertion/test_failures.py0000664000175000017500000000066312254002202022465 0ustar hpkhpk00000000000000 import py failure_demo = py.path.local(__file__).dirpath('failure_demo.py') pytest_plugins = 'pytester', def test_failure_demo_fails_properly(testdir): target = testdir.tmpdir.join(failure_demo.basename) failure_demo.copy(target) failure_demo.copy(testdir.tmpdir.join(failure_demo.basename)) result = testdir.runpytest(target) result.stdout.fnmatch_lines([ "*39 failed*" ]) assert result.ret != 0 pytest-2.5.1/doc/en/example/assertion/failure_demo.py0000664000175000017500000001103212254002202022237 0ustar hpkhpk00000000000000from py.test import raises import py def otherfunc(a,b): assert a==b def somefunc(x,y): otherfunc(x,y) def otherfunc_multi(a,b): assert (a == b) def test_generative(param1, param2): assert param1 * 2 < param2 def pytest_generate_tests(metafunc): if 'param1' in metafunc.fixturenames: metafunc.addcall(funcargs=dict(param1=3, param2=6)) class TestFailing(object): def test_simple(self): def f(): return 42 def g(): return 43 assert f() == g() def test_simple_multiline(self): otherfunc_multi( 42, 6*9) def test_not(self): def f(): return 42 assert not f() class TestSpecialisedExplanations(object): def test_eq_text(self): assert 'spam' == 'eggs' def test_eq_similar_text(self): assert 'foo 1 bar' == 'foo 2 bar' def test_eq_multiline_text(self): assert 'foo\nspam\nbar' == 'foo\neggs\nbar' def test_eq_long_text(self): a = '1'*100 + 'a' + '2'*100 b = '1'*100 + 'b' + '2'*100 assert a == b def test_eq_long_text_multiline(self): a = '1\n'*100 + 'a' + '2\n'*100 b = '1\n'*100 + 'b' + '2\n'*100 assert a == b def test_eq_list(self): assert [0, 1, 2] == [0, 1, 3] def test_eq_list_long(self): a = [0]*100 + [1] + [3]*100 b = [0]*100 + [2] + [3]*100 assert a == b def test_eq_dict(self): assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} def test_eq_set(self): assert set([0, 10, 11, 12]) == set([0, 20, 21]) def test_eq_longer_list(self): assert [1,2] == [1,2,3] def test_in_list(self): assert 1 in [0, 2, 3, 4, 5] def test_not_in_text_multiline(self): text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail' assert 'foo' not in text def test_not_in_text_single(self): text = 'single foo line' assert 'foo' not in text def test_not_in_text_single_long(self): text = 'head ' * 50 + 'foo ' + 'tail ' * 20 assert 'foo' not in text def test_not_in_text_single_long_term(self): text = 'head ' * 50 + 'f'*70 + 'tail ' * 20 assert 'f'*70 not in text def test_attribute(): class Foo(object): b = 1 i = Foo() assert i.b == 2 def test_attribute_instance(): class Foo(object): b = 1 assert Foo().b == 2 def test_attribute_failure(): class Foo(object): def _get_b(self): raise Exception('Failed to get attrib') b = property(_get_b) i = Foo() assert i.b == 2 def test_attribute_multiple(): class Foo(object): b = 1 class Bar(object): b = 2 assert Foo().b == Bar().b def globf(x): return x+1 class TestRaises: def test_raises(self): s = 'qwe' raises(TypeError, "int(s)") def test_raises_doesnt(self): raises(IOError, "int('3')") def test_raise(self): raise ValueError("demo error") def test_tupleerror(self): a,b = [1] def test_reinterpret_fails_with_print_for_the_fun_of_it(self): l = [1,2,3] print ("l is %r" % l) a,b = l.pop() def test_some_error(self): if namenotexi: pass def func1(self): assert 41 == 42 # thanks to Matthew Scott for this test def test_dynamic_compile_shows_nicely(): src = 'def foo():\n assert 1 == 0\n' name = 'abc-123' module = py.std.imp.new_module(name) code = py.code.compile(src, name, 'exec') py.builtin.exec_(code, module.__dict__) py.std.sys.modules[name] = module module.foo() class TestMoreErrors: def test_complex_error(self): def f(): return 44 def g(): return 43 somefunc(f(), g()) def test_z1_unpack_error(self): l = [] a,b = l def test_z2_type_error(self): l = 3 a,b = l def test_startswith(self): s = "123" g = "456" assert s.startswith(g) def test_startswith_nested(self): def f(): return "123" def g(): return "456" assert f().startswith(g()) def test_global_func(self): assert isinstance(globf(42), float) def test_instance(self): self.x = 6*7 assert self.x != 42 def test_compare(self): assert globf(10) < 5 def test_try_finally(self): x = 1 try: assert x == 0 finally: x = 0 pytest-2.5.1/doc/en/example/pythoncollection.txt0000664000175000017500000001163112254002202021366 0ustar hpkhpk00000000000000Changing standard (Python) test discovery =============================================== Changing directory recursion ----------------------------------------------------- You can set the :confval:`norecursedirs` option in an ini-file, for example your ``setup.cfg`` in the project root directory:: # content of setup.cfg [pytest] norecursedirs = .svn _build tmp* This would tell py.test to not recurse into typical subversion or sphinx-build directories or into any ``tmp`` prefixed directory. .. _`change naming conventions`: Changing naming conventions ----------------------------------------------------- You can configure different naming conventions by setting the :confval:`python_files`, :confval:`python_classes` and :confval:`python_functions` configuration options. Example:: # content of setup.cfg # can also be defined in in tox.ini or pytest.ini file [pytest] python_files=check_*.py python_classes=Check python_functions=check This would make py.test look for ``check_`` prefixes in Python filenames, ``Check`` prefixes in classes and ``check`` prefixes in functions and classes. For example, if we have:: # content of check_myapp.py class CheckMyApp: def check_simple(self): pass def check_complex(self): pass then the test collection looks like this:: $ py.test --collect-only =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 2 items ============================= in 0.01 seconds ============================= .. note:: the ``python_functions`` and ``python_classes`` has no effect for ``unittest.TestCase`` test discovery because pytest delegates detection of test case methods to unittest code. Interpreting cmdline arguments as Python packages ----------------------------------------------------- You can use the ``--pyargs`` option to make py.test try interpreting arguments as python package names, deriving their file system path and then running the test. For example if you have unittest2 installed you can type:: py.test --pyargs unittest2.test.test_skipping -q which would run the respective test module. Like with other options, through an ini-file and the :confval:`addopts` option you can make this change more permanently:: # content of pytest.ini [pytest] addopts = --pyargs Now a simple invocation of ``py.test NAME`` will check if NAME exists as an importable package/module and otherwise treat it as a filesystem path. Finding out what is collected ----------------------------------------------- You can always peek at the collection tree without running tests like this:: . $ py.test --collect-only pythoncollection.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 3 items ============================= in 0.01 seconds ============================= customizing test collection to find all .py files --------------------------------------------------------- .. regendoc:wipe You can easily instruct py.test to discover tests from every python file:: # content of pytest.ini [pytest] python_files = *.py However, many projects will have a ``setup.py`` which they don't want to be imported. Moreover, there may files only importable by a specific python version. For such cases you can dynamically define files to be ignored by listing them in a ``conftest.py`` file:: # content of conftest.py import sys collect_ignore = ["setup.py"] if sys.version_info[0] > 2: collect_ignore.append("pkg/module_py2.py") And then if you have a module file like this:: # content of pkg/module_py2.py def test_only_on_python2(): try: assert 0 except Exception, e: pass and a setup.py dummy file like this:: # content of setup.py 0/0 # will raise exeption if imported then a pytest run on python2 will find the one test when run with a python2 interpreters and will leave out the setup.py file:: $ py.test --collect-only =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 1 items ============================= in 0.01 seconds ============================= If you run with a Python3 interpreter the moduled added through the conftest.py file will not be considered for test collection. pytest-2.5.1/doc/en/example/nonpython.txt0000664000175000017500000000625312254002202020031 0ustar hpkhpk00000000000000 .. _`non-python tests`: Working with non-python tests ==================================================== .. _`yaml plugin`: A basic example for specifying tests in Yaml files -------------------------------------------------------------- .. _`pytest-yamlwsgi`: http://bitbucket.org/aafshar/pytest-yamlwsgi/src/tip/pytest_yamlwsgi.py .. _`PyYAML`: http://pypi.python.org/pypi/PyYAML/ Here is an example ``conftest.py`` (extracted from Ali Afshnars special purpose `pytest-yamlwsgi`_ plugin). This ``conftest.py`` will collect ``test*.yml`` files and will execute the yaml-formatted content as custom tests: .. include:: nonpython/conftest.py :literal: You can create a simple example file: .. include:: nonpython/test_simple.yml :literal: and if you installed `PyYAML`_ or a compatible YAML-parser you can now execute the test specification:: nonpython $ py.test test_simple.yml =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 2 items test_simple.yml .F ================================= FAILURES ================================= ______________________________ usecase: hello ______________________________ usecase execution failed spec failed: 'some': 'other' no further details known at this point. ==================== 1 failed, 1 passed in 0.03 seconds ==================== You get one dot for the passing ``sub1: sub1`` check and one failure. Obviously in the above ``conftest.py`` you'll want to implement a more interesting interpretation of the yaml-values. You can easily write your own domain specific testing language this way. .. note:: ``repr_failure(excinfo)`` is called for representing test failures. If you create custom collection nodes you can return an error representation string of your choice. It will be reported as a (red) string. ``reportinfo()`` is used for representing the test location and is also consulted when reporting in ``verbose`` mode:: nonpython $ py.test -v =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 2 items test_simple.yml:1: usecase: ok PASSED test_simple.yml:1: usecase: hello FAILED ================================= FAILURES ================================= ______________________________ usecase: hello ______________________________ usecase execution failed spec failed: 'some': 'other' no further details known at this point. ==================== 1 failed, 1 passed in 0.03 seconds ==================== While developing your custom test collection and execution it's also interesting to just look at the collection tree:: nonpython $ py.test --collect-only =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 2 items ============================= in 0.02 seconds ============================= pytest-2.5.1/doc/en/yieldfixture.txt0000664000175000017500000001056712254002202017062 0ustar hpkhpk00000000000000 .. _yieldfixture: Fixture functions using "yield" / context manager integration --------------------------------------------------------------- .. versionadded:: 2.4 .. regendoc:wipe pytest-2.4 allows fixture functions to seamlessly use a ``yield`` instead of a ``return`` statement to provide a fixture value while otherwise fully supporting all other fixture features. .. note:: "yielding" fixture values is an experimental feature and its exact declaration may change later but earliest in a 2.5 release. You can thus safely use this feature in the 2.4 series but may need to adapt later. Test functions themselves will not need to change (as a general feature, they are ignorant of how fixtures are setup). Let's look at a simple standalone-example using the new ``yield`` syntax:: # content of test_yield.py import pytest @pytest.yield_fixture def passwd(): print ("\nsetup before yield") f = open("/etc/passwd") yield f.readlines() print ("teardown after yield") f.close() def test_has_lines(passwd): print ("test called") assert passwd In contrast to :ref:`finalization through registering callbacks `, our fixture function used a ``yield`` statement to provide the lines of the ``/etc/passwd`` file. The code after the ``yield`` statement serves as the teardown code, avoiding the indirection of registering a teardown callback function. Let's run it with output capturing disabled:: $ py.test -q -s test_yield.py setup before yield test called .teardown after yield 1 passed in 0.00 seconds We can also seemlessly use the new syntax with ``with`` statements. Let's simplify the above ``passwd`` fixture:: # content of test_yield2.py import pytest @pytest.yield_fixture def passwd(): with open("/etc/passwd") as f: yield f.readlines() def test_has_lines(passwd): assert len(passwd) >= 1 The file ``f`` will be closed after the test finished execution because the Python ``file`` object supports finalization when the ``with`` statement ends. Note that the new syntax is fully integrated with using ``scope``, ``params`` and other fixture features. Changing existing fixture functions to use ``yield`` is thus straight forward. Discussion and future considerations / feedback ++++++++++++++++++++++++++++++++++++++++++++++++++++ The yield-syntax has been discussed by pytest users extensively. In general, the advantages of the using a ``yield`` fixture syntax are: - easy provision of fixtures in conjunction with context managers. - no need to register a callback, providing for more synchronous control flow in the fixture function. Also there is no need to accept the ``request`` object into the fixture function just for providing finalization code. However, there are also limitations or foreseeable irritations: - usually ``yield`` is used for producing multiple values. But fixture functions can only yield exactly one value. Yielding a second fixture value will get you an error. It's possible we can evolve pytest to allow for producing multiple values as an alternative to current parametrization. For now, you can just use the normal :ref:`fixture parametrization ` mechanisms together with ``yield``-style fixtures. - the ``yield`` syntax is similar to what :py:func:`contextlib.contextmanager` decorated functions provide. With pytest fixture functions, the "after yield" part will always be invoked, independently from the exception status of the test function which uses the fixture. The pytest behaviour makes sense if you consider that many different test functions might use a module or session scoped fixture. Some test functions might raise exceptions and others not, so how could pytest re-raise a single exception at the ``yield`` point in the fixture function? - lastly ``yield`` introduces more than one way to write fixture functions, so what's the obvious way to a newcomer? Newcomers reading the docs will see feature examples using the ``return`` style so should use that, if in doubt. Others can start experimenting with writing yield-style fixtures and possibly help evolving them further. If you want to feedback or participate in the ongoing discussion, please join our :ref:`contact channels`. you are most welcome. pytest-2.5.1/doc/en/xunit_setup.txt0000664000175000017500000000617412254002202016733 0ustar hpkhpk00000000000000 .. _`classic xunit`: .. _xunitsetup: classic xunit-style setup ======================================== This section describes a classic and popular way how you can implement fixtures (setup and teardown test state) on a per-module/class/function basis. pytest started supporting these methods around 2005 and subsequently nose and the standard library introduced them (under slightly different names). While these setup/teardown methods are and will remain fully supported you may also use pytest's more powerful :ref:`fixture mechanism ` which leverages the concept of dependency injection, allowing for a more modular and more scalable approach for managing test state, especially for larger projects and for functional testing. You can mix both fixture mechanisms in the same file but unittest-based test methods cannot receive fixture arguments. .. note:: As of pytest-2.4, teardownX functions are not called if setupX existed and failed/was skipped. This harmonizes behaviour across all major python testing tools. Module level setup/teardown -------------------------------------- If you have multiple test functions and test classes in a single module you can optionally implement the following fixture methods which will usually be called once for all the functions:: def setup_module(module): """ setup any state specific to the execution of the given module.""" def teardown_module(module): """ teardown any state that was previously setup with a setup_module method. """ Class level setup/teardown ---------------------------------- Similarly, the following methods are called at class level before and after all test methods of the class are called:: @classmethod def setup_class(cls): """ setup any state specific to the execution of the given class (which usually contains tests). """ @classmethod def teardown_class(cls): """ teardown any state that was previously setup with a call to setup_class. """ Method and function level setup/teardown ----------------------------------------------- Similarly, the following methods are called around each method invocation:: def setup_method(self, method): """ setup any state tied to the execution of the given method in a class. setup_method is invoked for every test method of a class. """ def teardown_method(self, method): """ teardown any state that was previously setup with a setup_method call. """ If you would rather define test functions directly at module level you can also use the following functions to implement fixtures:: def setup_function(function): """ setup any state tied to the execution of the given function. Invoked for every test function in the module. """ def teardown_function(function): """ teardown any state that was previously setup with a setup_function call. """ Note that it is possible for setup/teardown pairs to be invoked multiple times per testing process. .. _`unittest.py module`: http://docs.python.org/library/unittest.html pytest-2.5.1/doc/en/funcargs.txt0000664000175000017500000000067212254002202016151 0ustar hpkhpk00000000000000 ======================================================= funcargs: resource injection and parametrization ======================================================= pytest-2.3 introduces major refinements to fixture management of which the funcarg mechanism introduced with pytest-2.0 remains a core part. The documentation has been refactored as well and you can read on here: - :ref:`fixtures` - :ref:`parametrize` - :ref:`funcargcompare` pytest-2.5.1/doc/en/conf.py0000664000175000017500000002334712254002202015103 0ustar hpkhpk00000000000000# -*- coding: utf-8 -*- # # pytest documentation build configuration file, created by # sphinx-quickstart on Fri Oct 8 17:54:28 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. # The short X.Y version. version = "2.5.1" release = "2.5.1" import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) autodoc_member_order = "bysource" todo_include_todos = 1 # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.txt' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'contents' # General information about the project. project = u'pytest' copyright = u'2012, holger krekel' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['links.inc', '_build', 'naming20.txt', 'test/*', "old_*", '*attic*', '*/attic*', 'funcargs.txt', 'setup.txt', 'example/remoteinterp.txt', ] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- sys.path.append(os.path.abspath('_themes')) html_theme_path = ['_themes'] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'flask' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'index_logo': None } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = None # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = "pytest-%s" % release # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} #html_sidebars = {'index': 'indexsidebar.html'} html_sidebars = { 'index': [ 'sidebarintro.html', 'globaltoc.html', 'links.html', 'sourcelink.html', 'searchbox.html' ], '**': [ 'globaltoc.html', 'relations.html', 'links.html', 'sourcelink.html', 'searchbox.html' ] } # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} #html_additional_pages = {'index': 'index.html'} # If false, no module index is generated. html_domain_indices = True # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'pytestdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('contents', 'pytest.tex', u'pytest Documentation', u'holger krekel, http://merlinux.eu', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. latex_domain_indices = False # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('usage', 'pytest', u'pytest usage', [u'holger krekel at merlinux eu'], 1) ] # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'pytest' epub_author = u'holger krekel at merlinux eu' epub_publisher = u'holger krekel at merlinux eu' epub_copyright = u'2012, holger krekel et alii' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # -- Options for texinfo output ------------------------------------------------ texinfo_documents = [ (master_doc, 'pytest', 'pytest Documentation', ('Holger Krekel@*Benjamin Peterson@*Ronny Pfannschmidt@*' 'Floris Bruynooghe@*others'), 'pytest', 'simple powerful testing with Pytho', 'Programming', 1), ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'python': ('http://docs.python.org/', None), # 'lib': ("http://docs.python.org/2.7library/", None), } def setup(app): #from sphinx.ext.autodoc import cut_lines #app.connect('autodoc-process-docstring', cut_lines(4, what=['module'])) app.add_description_unit('confval', 'confval', objname='configuration value', indextemplate='pair: %s; configuration value') pytest-2.5.1/doc/en/img/0000775000175000017500000000000012254002202014347 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/img/keleshev.png0000664000175000017500000005531612254002202016675 0ustar hpkhpk00000000000000‰PNG  IHDR+—[[ŒXsRGB®ÎébKGDÿÿÿ ½§“ pHYs  šœtIMEÝ :iuÜ IDATxÚìw€\Uù÷ŸSn™º½d³%½’ )ô!$!¥©¨ˆ"Š ¢ ¼ˆâ "½‹@B  ¤‡dÓË&»›ÍîfûÌN¹õ”÷Ù e£ Ïç¯Ý™{OŸó½Ïsž{’R‚B¡P(ÿu°j…B¡P(R( …R …B¡P(”) …B)B¡P(J …B¡H¡P(Šz×J`¶ÏlŸyb4™´«7ïxgéúêMuºF† ©5¬¼4/œkšÃûWÁ@f‰æ\ðuÓ£†ÏüL[KÝ‚êê­ Ìó’i{{W¼•¹šFì´å¤æ3BhÈ ú¶Ÿ´—3Æâ"'°}?m»†©Gáþ}L9ý܋λHÓ4Õ …BñõòT)¤·„/Æ„j–ã­«i\¹aûŽÖ85pß’œ®„ûWäç òÂBŒ(¥˜P@„Œ<Œ3ÉÝŒ“xvÞk‹6¬ ›$OÄ»í­ÝI&ßaÌñÐtÚÉø  !&<Ç÷!#Â÷¸çøƒû ~röS¥Å¥ªG …âð²$8qB2¶¿½©uõ–†íÍíD£ƒû÷+˯êS€$tv¥"A=@ ‚SªI€Dwwk[kKkKwww8)-Ìï[ÍÍÓ1B5#Zù~+³p~(\X–ƒhWÇ]Óœ1àÒó9rƒœPÈóùŽ–6É8p@SŒ)ÅHˆÕ«ª¿~å%¯ÿã ]×U§* Åá£@¾ís_:ž×Ö•¬©oÛR×’ñœHÄ1°ïÊ’üHÀÐ5Ïg:Á˜ J ¬Í#·ÕխݰvsͦƦ‰D" õ)(uÄ€£cÃ(§—VN4¬-ÝHP3š·J˜Ô0 ˜ïû¾fÁ9âR0Î…ÀAŒ‡4 †®CÝœf{ãÆõO<óÄ•—_©:U¡P(r3²#žZ½©a}M£¦áãŽ6 ¼¨0'¬aÌ|΂GÂ¥$p!0!;[[ÞxûÍ%+–4µ69ŽÅgœI«Ö•´·M M=­¼¬hpYU¾I»’ÖÖÖε­MTÓ 3šHvaŠÇó%pêRŠÎd²=‘”Qæ„} s S\›Û>z¦Ÿñÿùò‹J …â°R tw1ß-+ W–\Õ'7äœ1!Ï)1Â!Àƒ)AH9ÿý÷–®XÚêC”Æ|æ{LÂŽ¶®7ß[4õo]1CBw¸Ä¹vPÏißMW <ÂÛº!êö=¤ „Ìd"í¹®ä€ˆ$Ã5°"7ª7w&švvfR6’kD muÛT* Åa¥@ŽÍ2–0õ!e:%:¥®çsÎ1F!!€’]‰ÕëVwuwy®¡F˜™ ’¢$r O¥–­Y?µy|qnn *šj:·7ûà ÍoZPææ³>ÑÕÑÝÝÑ--ÏÀ”hšaèyáи‘2¾ëJiY>¶e{g’ X‚©öùV(ŠÃL<áxŒj:çà2Θ„L@P6æ aŒ)%c,7659Ž5¦OY¬_ÿH$Üd§çÃÊŠ2‰®Eë6v$Smmm…Ey¡ü²Hs»Öe T 6éÑCG¶ûm-m;8㙤íY¾2 QZQ^PXR¿­v}ÝŽ’œ\Sh„#Ëó˜ä  Õ£ …BqX)cB×MÄ÷‰…Ô RŒ0$‘BJ) F(†€t]RRóó(-šžÍ+È›w0QŠxÌ€°fD ‹œ>™OsÃÌ—ÂÎÉ+ÍÄ-+å&â©LÚò™È3´ ¡Q)ýTÆJ¤2#?è;I×õBá°SŽaP@Hõ¨B¡P^ Ä%%„ .F ¤ëz˜0H]’УIåD£¹‘(dº #¨µ¥Ã—¼ª0º³¥cGc‹cÛ…‘HN.D „rBáÓe#BA3/Ç,¤8èø¡”h< `JºÚ;[Üv+vm?ã:IÏö _cÌ<¦zT¡P(+!™àRJ„¹®GF&ÂK)B€z^nB!‚sXß~’h9ž±…öœŒkÙŒù‘ ‘S˜ ç DHìõi(Õ"AJŒ²üòò>UõMº$šNƒ9AÇc]‰d*e‡L­3•Ä;Ò¾'4Ì=¹»ÛvOõ¨B¡PV ¤kD"@@ X àB:¶K &†&@‚BJ‰˜Çó S?óèc·i¤ª¬lØøqcŠJYg«]__Þ·Ê$cº:[Zì$$Є LÇ e¸“ò„ÙÊ+¨*®ÚV\·ÓßÊË#MÏ15›³æ–8Ö)ãܶ]M£ó˜„Ï<ÁÛò§™?Ú •ßyá¹oõÿOvþ±–þøôkßs`ì]ïþå´ô&þY–óËÖn_îš*ŠÏYvv¶J Ã4u= gh:ÁÀ…t=_pŽ0vÛ0 HH) aãŽL47474R"*ä6[µ¼:‘ìì×'7Ôµ ÉÉE %HÃÐ"ašt¹@éVÓµÓ5"³5íyE}s=”ÙY×ÚauëÁBs–NxŽç]£šF4" nÚ#ºnD‚½°çºÞºîœ[–ùþ#¯þt´ùñïYÃãÏx `ÄÍs}ø{õç^ùJrÏûÛœ[Çö|Å;üîŠ_jOüÉ#wÏ`ªµ6…BñßT ‡žzSB5B06 Í4L„pYIÑ‘ÃÔ„}!x\d,Gæ‡ lYÙ&ê·™Èæžd©–µÉŽpß‚ð ªT2ã{ŒR¯®µiEýÖÖD눴ÕíîL$ß±|oO'º3ÑHP£Xpò¸ð8…ºNÀáL&¡„è=‘ç{ÁsÙ kÁÛµ7Žaìý5oýðÍÚì…GÎ:¾„&>ÏÆ/Ÿ~çÿ³%àPEúeIj/­^øûo+ùQ(_¬‘RJ)¡# €`†©íú<_ !AJ)°R )„d$dŸk6­¯kh ‡É€Ò’ª~EÁ lËtó-ê_b˜À¸‡°ÖÍY7ÃHÁ6€ŒàŒå;®åzLx¾44Ÿc4¡¦”†A Bžë¹¶‡’D8¶½Q @ÑØ¬ãB æeÚß{»î†Ãô½há[@;fÖ¤B Ÿ§á`ßQÇôý/&%¸@¤W"¾ø¾«¾÷Bƒ’…BñÅ*ÐÑGÓ)¡” A@4ÒmÛ9¦†S–kèf<•!…d\/žÉ‚ç,¿¨ ˜—·i£cmÝî!¥šmâ¤% ½ 8O7q[*OvÕÇ[,ß "O¤\l;¾ïrÎ|\1ô¼¼o¹™¸QS§k¾/¤ž`¶Ï]Ž$ö\¹>"½š#QdÌÌ“sæÍéh?¯þšaCvKèXúúF0'ÌŸ‡Áß÷ôœÚôÚßþòü¼å›Z- (Øgä¤)—_sù‰{MÒ<¾ê¹ûxòݵmŽö;vÊ7®ÿzÁ'¢Å?½¤á¬½kê7ÿÑ`œðÇ7o‹¼öû»·¡ÃÓŠFýíŸþpú£kÉ#¿½ÿ¹[Üè›~ý­7œ^®ï'©5wM½â]ƉœsMãï~tÿ» îЛç>1³ô ‡B‰Ä²?\}óõžX;ç‘¿¿øÎŠMm€^8tÒÔ+¯¿â¤ÊËÔÁï’vÂgþöÌ‹×Öu¹`3yÊ%W^8±Ï'÷œEüÖ¹ë/|X—½hä©—ÞøÃ¯ÉÁ½ÉŽ7?wÙÔ{6>êö7:»`×]2>ﺳ~º”Àð›^}ô‚2õŽ™BñÅ*P¿²B]£šFuJLjkºnûŒùv[:U’«5vljFúP°=¿ÃN³°ä’R­ ?júºÚ­ %¹ßr°¡ÒB«5ÙÖ™ÞÖÞÔÜÒ¾=ÞžJ§Ó>Ú©·{$Ýie2.3µ`H‹’€È/tñn[ó|$A › ǾÀ‚ ×ñBzù”9ãôÂ9/t4¿;ÇÕCöLn¢sÙÜõš<3Ýwj¢{é=—]ûBÓsݼ¿üøƒêŸ<óÀ¬ -+QÝÿÍ«Ÿiì¹#Y¿èéÛVTŸ<Ì”ÑCYŸ Û±áéïÿõ/«³ è·¯}å7×òü_çÿñºÇ·g/uw~ôìÏ®!eÏýàˆÀ>“2‚ÙZ¹ík½ùáwzí|ë^ñ§ï^÷T­Ü¿ü°æ¹?»ü¶ùñ=ZÚ±ù½GnZ¸ò†Ç¼l±Ÿ„{q—Wûĵ—<°f/Ýw:¶-}ùKß\øã§¸°êc"„­å¿ûæÝ¯µõ$Ö¾þõû¾½?ÿ×Kûi½È®ä¸ó†ßs÷F±î­µ©³NÌÉÖQ¦Ö¾Q ê?búñ¥J~ŠÏ“Þ‘*’ÀHb DHÄ´ ã„;w(§-Ìö-)ŒM! \ Ž*ë3ª¼ÌwÜmíÝ d •Ù²põƒœû¯Wªå¿,"­bÓú–õu­©ŒíuYÝ ñL—CÌPÊÔš;í]iަÄs}$ =ᤙ“aÌã5`ÄEo«mvvö,¡Æ·ßkôw{žVÌY+¢'ÎÙ·y5Ý™•Ÿà„›ŸyoÉÂ9¿ŸV à,ÿóìU™¬bÔ?÷ÛùÉ9þƇ_}ç­üÞø¦ù-¾{)kýì§œ îyö•gï¾ *ûI|îÍß{–\p×Ó¯<ïŲŸ5¿üôZkI‘žÎ]ÿø³ýϸêæÛ~qËå£#ižZõ×\óx8€óïœóË;æÇ¾ì¯-\¶ø¿1xÕúõËûy'«7w¥W<ô×5>sÛã¯Í_°àÝW¹åŒBpVš³FäŸzþÈý„Õ‰t'>⤓N:éä)ߺnÊà5úL¼hj9¤Ö®höÀßñîëu »é–¯U–[Ðoü%¿¸åxóz¦ô‚_^{Ò ¾ƒNúÎ “{ýYþÌÛn8mHß'|û{'eË—©ÙÐ~Ð×pÑä_=tÇU3§L~úÐÈÚø·ë¾û÷ÍY!zÕ§ïkíÇ«{å±U êO¾5¡ÄÀZÑø+ožQbÝs¯7ìÓsÙ›»xrG£€ËÇ;¬$ æ”>ï¦û~{ûoîûóÿ]?æ]ÂÝ£o}øÎoO;sê7~ùÿ~0´G§×®ïä½ÊŽ”LšvD¶WÌÛj÷´À–·–eÈ‘3&*R(¾ „°$0’$B’" !ŒA‚k{­‰­›ë·olp¶kûí‰Æ–KÂÆu÷Ü?ûïÿxcCS˜‘~ýV–÷Ë+èÛæé¯~´õoo~БLX®øpsÍÖäΪŠâ³ÆŽPšSWÓ‘êö¸ŽçZ™4v¼®¶—@(55-@¨›¶‰áH œÔ4"<Î]Î\&AêÞGi•gNuo~°“€LVÏÍú_ŠÏœ64°¿ËŸxÝoî¹çž{î¾í²¡YE¡¡ÂpöK»Û`ׯêñ¿õ?qt^Oãܱg ïýZ~Þ¸ã*ud–+îy8òø~Ù,£zŽƒµâÖÁ,?ã˜ó-èUG{[–ÖîÖ³Í÷ßòÄçӜܴ´§výŽí¿«™ý'& ×%öQ ^ÝE¢A±ê—\þãß=üü¼µ‰ÀðSÏ>ýøq£äëo¾àÄ‹Ž+ÊÖ‹=©¬§A:3¼wÙáâIÓF#€øâ÷¶{ζ·uhGŸl¾ …âs¦WS6—\D AIÈî‡$—I¡IiyžÈÌE$¤›àÉùﯨ^²báÒ%ÂMm̯ز½0/ÇqìdÆêJYÍñDK".…$ˆ4v$ÕlÍòœÜ£úõÛ¹^£ºÐ#y²«9n3Á„ Ö áRRJ…Æ}¾àŒqî1ªk$@µÆ]~5ï{êùø{#ÀÖ×¶^ôµ²Ìš¹yeSÎÝïZH§á_?ôÄÜÅëšSûœüy¦£gƹ¥‘ÝÓ2 KBéÞ/RéYƒÀZÏ‚„‹#=ý…õ`Ï›˜ÙWHÁÀ"£÷Í2dæg·=tÿ‚$ˆ þà¾aOÝ}Ês׫»ÂG_{Ä%¿]œHn|ï…ï½ê?ñ¼K¯ùö”a‘KBþ€âÝëB$˜Úå1–½ÍÎ(š8m4Z³ZB낯6@Ö¿ûa'€>îüqyJ€Š/…I#I$BR $IP= ‘nŽÜ j&E8ÞØX½~Á¢%[6o°¼Œä¬3߸}‡nP„Áó}×ó…„`‚1Ä…ÜXß Úä¡ý+r¢E9¸ï#D"Ñ !²¥¡ÝM{ QdB0p@i:µ3N(¦Ý0u¤aÀÝCÙŽ”ž8kìïo¯°åõÅm³Î¨›»Âè7í¬~û?êÛo|鯋ïZæè¥CGTæ©Û´|åö½|Oû“!Dï‹· iôïí¼J ­—÷ѱ?~æÁ úkéA­_¿ê™&€Ö—nºuø³÷Mï«í]нÚÒO$MtŸIøtHH¯î½ræý¯ ó™çæÎÿpy]2ûe¦nÑÓ·/Z\ÿÈ£×ÞÛ‰ˆé^mô‰¦é]v¸ðØicðêj ó—µ}³ÜyÿýóØóÎQ±ç Å—D€1$äHP" Cƒ” ¹š ¦©ab ."aÜØÒ¼àƒe/½òzýŽ:à A€‘ÈÜql‰"kˆHR‚©Q’¶½õ;šu] -ˆG‚- ­©.˳X0BMCg6Óº¦Q,}Á]?`Žå"‚ƒ9%rl—¥™oÒΤ¸pò¬£µêe>lzkUSå’Àé§Uì§gãc³³ò>ùî—~{r>àO_2í¾­{Ò D0€‰æ$‡pÖ˜éæ&ëË=ÂUÃJ 9êºûnX{ñë8Ø‹ï¼ñ/ƒÿþÝ#‚=32‰–å4wï;>!²oÞ'å¾WwÜ‘ç|÷ös¾ <ݼeÍÊÅï¼øØ«ë2uO=´øÒOÍëÝÓE/³Ã…fŒÅÕ+lyû£Öñ‰ww'Í<2¢H¡øüé•§ÁÖw¤´í)­!¥7$h}'ÙÖŽk:ÐæT ÁŽìÈäÖ“òΓ(D;ÿùÖË<óTmSM(D ’’3ΙBbL)¥TØB¢ç1`!$¥¸;í¬«mÝig0•oLìÜÖ‘ê´R–¡i…‘@DÓÜ´ÛÕ•ÔL$4uC#:ÑLŠ@b!$¢‡æ8Énbýœ×æ.OŒ˜qr=€®¥¶+ûgù±#³Þ)ÑY½ ~·K P rLÏ*ͶùuöØ=¢}Ñœ_™qa ¼äîÛNÌNÜÛ¹ñ®íªàÈÐñÙÕ.©³w5Œ×¶y͆ší;;ÓÞ¾LÀÞÜ%½Žš• ^þÉ—ª€„ˆOœrÅ/¸}¢À;v¦{ëeíu!qþøéG€ ¯¿óvv7Œðq3F+R(¾4 ÔV¼¦µhMs᪦¼͹µæ®j˯î(^ßU²¾=o}³±¾ÑX×\ûÄ;O¿»h~:ÝëLp!¥@Ù-MA $³þ))‘” ô,`H@DÒ²×Ö·IŒ|ÆÝ”ßY¯ùh{CMkkC<Ýî´lo¯kw$¹'\ۋ䪖–ôÉc¾ŸN[>HA é‡Vûìöà-ÿûëíгÏÞAz^IÏzCÝ;nw„ײô¯?¿{Eíß¼¹‹èU§ž–]ç+ïùÅ_lÙQ·æ­?Ý|Oõn'œüòæJŠÏøÅÝd«ÑõÆ-7?[×c×èýνx4èxáw¿¼¦9ß¹~îo¿uÉ—]tþ¹ßz¬nŸ!罸K¦>ºÿÚq÷ý¿¹é7/®lŒ[ŽîÚ±òõW×ù¡AƒóziÒëBâ¼ñ3bØŠ?ÿe @ô„é£ÂjfP(¾4 Ä A è2¨£€†5Š0– ¥D øH$kGýÎT¼$cÜÒ—’#$’=+HBH)$‰„)H @RJ@2Κ;º}!ÌåŒ'SŽÀ¤Ï’Ü’œŽDªµ#‘N;™´ÓÝ™. Šò¦I¥R -HÍ|³ ª t`ñ!V?»=€ÐŽž5éÀ!¸‘œSî²»fM7qʵoû£_Ÿ—~7õØY­Eƒ.ùÑ9`­xøÆ‹§Ïºâ–ÇkÇÌ×£^œ‰¯Àyâ8zÌ ÷^;x«î»ñO+³‘¤ì¼Ûnžhœóë+¦žvÚ¹—ÿêÕf #®ºó›Cööpð»pÁI?¼v¬]óï¾jÚiÇO>îÄÓ§_õû÷“=þ‡×}ÂÐëBâÜc¦Å²¯°@ÞÉÓGÕÄ P|y(k¼ìé î0æ2î2n3f1æržñ<ÛvcB0Î})™D\‚²"´;%¤DÈ~‚„Çõ33 !ä:œs† )Á‚KI5  ™´_UaEiž‰ˆ—ñMC …Í‚¢pøPm èÙ¡çoóØ …‚c¿?ûw—N¨ÌN†Ñþ“.ûí“÷\xÚÕ?™1$ `T .aœü-ÞwÅñý£P¤rü×nÿû]3Èɦâ[_‘ƒŒÌÁ—ÝýË㳺¹ãéþêÍV WÎøýóÿxÆøùz׫â¨s¯ÿÃKýöˆà~ýW¿KpÉŸ_üà çX´+-uò¥?äÅ»§–ÚŽ«½-$Î=fzl÷Ê_Á)S‡Ô¼ Püwè9VîÀV"‰0Ƙ É9Æ’€ ˜d¨Óh ×Ðt$9‚ç@$FX",!ÊöÈ”2»pß#F ¥€dʲCAbìz¾“peQ¨O¿ü´íø¦Ö§,oø>©x†9ÌÀ„R‰ù!áf#·ËvWõ¨B¡PV ä0_Œ@5jB¦ºÆ„dŒùŒYœ3ÉašW2ƒš¤BpÎ !„Œ €³Þ·l¶”õü!{ì0)Ÿ ÇL @R2Ž ™º1ŒV˜âkÙÙU Ö¯oêLY-Éh8˜uÄ3Ž­tÕ£ …BqX)P*㚺†¡XpæZžÍ8 !„@"ÊdIEn  À hBpÉ% °Ä»‚²î¸¬${þÜý'@®ë3Æ$—Ì—-©÷ç­o³R£GVFÃÁ ÷»=gذ¾.çÔ AKsœ¢‚²ˆkÑD<­zT¡P(+ò|N0q}RX 9cÜã`I0ÊžD°fùzA(T¢:áR`‚8ßcBH]‰ H²q×»£Pv)+=B ¤D2»× à3™_–&a‡H4`ì wÆÓå}órò#AÆöS’eÚâÁ¼Õfú …Bqx)Ï%r}àÒ%˜ á8.c  SŠ5Œ "Xra3“òFPw.¥˜`€3Æ@JJÆYƒ)«6ì‘”]$’h—Ô£NH÷¥nC‡(ˆ†eÆ5z ¤s‚ºédÆÑ"zwÚÅ6÷!ãXŽêQ…B¡8¼ˆ îq3Œ1—àzL"I%#RbA0Å Ïã†ArJ#¡<Óê°%!ªLAéz>%D"Œ Á$d5è;¨ Ù-ÅBcÀIéN'·DÝ´ßZ×¥A0“°8¡Jv}HûÜåÑÿ H²LGÓŽæ¶ÎDÊö`=”SP\VQYÝs²kûðÙ—·”œséi}{»Öôoܲ7¼kùó/®Î=ó²³*Õ{¶ …âW„Ç}!#D°Ä0³RH„!ë0CˆjÄ0 æ2ÔŠ†—Dû6dZB 0Ä2<ÔGOì¿yqƒc¹RJRb´ë´@²ûö „0–BHR"H"›DÓ=H¶t×Ë´'=Çe`PA8>aȤš%qhµNÛÖ5뽜¾••#æ„L ßÍtw´4lYÖÐ4ð¨±ýr¨Z[R(Š/Nç\r@c)9CvÆ\J S3L ‚qŸs3 å÷Ëë{dù¦×¶`‚5‚1—Pù€‚@®é9~v+l!Fec ,1Â=Ë?ãžHRf 3D˜èrmlÇö¤çE¾/ P$à ‘IʤÒÙ¹nÙ&«ôˆ‰ƒŠÌ=ÞHBu3”W\Ö§vÕÊUëF—Jƒ …â‹R î1Á…)B4B¥€bêã‚sÉ=Œ©NûQ²õýér¬cßç;ë»ê:{NÊîã#¥c²+"„ä²çýÔ½bæ¸)%L1ÆXJ$,‡#Šõœ„4ßñœ.+Ò0gÁÑ{óÇÚ±vS²hôøÁšÈìܼ¹¶¹ËæÈÈ«:8¸}ù¶ðÑG î\¼­!]<8‚?usÝû/Í«Œ;ïì±…ÄjZ³dÙúúö (0fâÄQ¥&út†û¹L:;×,Z²®¾=ÃH°°rÄ1“ÆVd¨Aˆumü×¢å[:ÀÁÒáOž0 Œ÷—šžZûÏgóc.˜ydnO‘ezã+O`9ó‚còU˜†B¡ø *P:‘놦ëšÄ’kº¾Ë»ý cÌÐ ŠF ™:ä”EÊ컳º•;ÒõøÎ‰ÆáB3¹3#¹$–Ùmâ¤Ù£:RJÆ@#,³‡>ˆ ÙÝiï”pD€ˆ¦‘–a##|á¸Ø÷3ñ$H$Þžâ ~×¶:»häØMfª—Õ8ÅCÇN*ÉLK͆ê&åT©¨ÌÙZ×aŒ„ö– 鵬xý­æ˜)gŽ)ÔXû²9¯U;åGŸrþà’ª_ñÞ¢¹oÊ™çŽÉýX~¬}ž/Ëñëßcikù„Ó¯Œ¿»iíÂ…o½m^8u(o_±ˆ5+*»6øÞš÷ôésÖ€ßOj£ú/^üaM]rL‰Læ6È=fp®’…Bñ¥£wg¤úܳ]'íXIËJf„ÏÀ^Òr»Ó™x2Ùžä6ómW8LzÂ÷™å8 D¿‰Uá’àŒÛ §½.ÑÿÈJ-ª÷¬Öôì‰-…LpÎ9—!DzL¤žÓ„ã»#û•?iܸÃRwë¤Ûr»éDw:•Èø7`˜”c7é²to~ó㸤2_“n˦­‰ð#G”çt=˜W1¤Bg,ˆP@4Ö|ËÛ;\O²øú·_¯ö†ž~ö¸>§ñ£µÝú “O9ªª Î-;âøkm««Ûüå·ÿË„Õg¤hÈò‚œhnaÅÈãÏ>ere`— U:á„1UEyyÅŠõż«9Å÷ŸU /E‰šºdvALdê7µCáˆQ¬†ºB¡øj*¤!Â]æ¦áxÌöÒ]©T€/ì„c§Ïò{Wknu¥ §0ˆ¥×Ù‡¼Ê’݇fJ)$y¹Êž™‡È^M$2µ ^[ÔY~⹓«‚X²±ƒ£ÂþÅ»ÝzAUrZw¦÷Š8Àe(RÕ/Êw¼ûê¼%k·5'ABE¥…ážø”W™¿+ ˜Ï”š VŒèƒwIH×oý"J€ ÅWÕ Ç=Ž)A°,|î;Â@ÒÑ qÆ=Î!©$ K0 ÷ é[5'á.âÍI¤’ B0Àî³QQ60¡g+RÀ€²ç6dÕ„£ÆŽ=þøãi&ÓZÛ :1ƒÒãÂw#Ž™4nH>@ïU%Ôã¹<@j(·|D_òNM]rÌ‘ÑTýæN\vreH…R(Н¬eÒ6Õ)Õ(ÂXrÀ™†¤:Õ Š5"÷\&9÷$ÄÐ4Ì™t™* †ŠƒV‡Å™°“^ÓšVî1ÀHH$”µ3@"™"ÔsŠPö pŠ‘f’иQ“Nšxl|ëòÚNß0Á€™N¤Àf`3–ñ¸ËBºŽšºK—½¯yöR„H.v+S¼¾¶[š@:m™`iÞ^¡pÁ'ž1´ã×—Îû¨ÏôqÅ Ð7uréÞïü l„)tíùÿ@—ECÆ2d0«£©¦zÑÒÍå‘‹&í7ªâ€©!Z6¢œ¾USŸ<¢_ýæ.Z~ZyP B¡øêzá¸Ç<Ëu,×Ê8™”í;¾A €À!L¤$0‘"dh¦NtuM@L„‹B…òƒ…!Æ80— ‰‘µ§ {iÀ(‘8»5)¡ZPE †–UŒß^ko|wÉöÚº.×M'+åfv&ó@/ E‚T¾:x¸¼—‘؈è~2F–¯9-;Úm.ütëÖ55nމçÌé¬]»Íï3¸l¯ù›– ë_T>î´X~¢zÞâFG–‘JâHÞ.r‚áàǤý—±dsmm«-h°°jÌäñ}Àik·÷ÿjÓA25ûŒ¨Ô[ëwÖÖĵÊe¦å …â+l™jTHp\Ïs}) =21ÆÒãK@nNPxœs³=æº iS’[•—Ü™N6§&3/œO0ÎXqdz@2ˆ¢ëšaêQ]/"FÑÆºñòÚõÛ›<=j¥H·ë¥w¶¥ŠŒ ´c[–K$PÀ©”)ÖÌ^n>€CÅ…´¡¡Õ.©Èè{Á©û5*÷åêw>7¦2‡z‰†5.ÚªŸ5mLxïÜßeç ÏlùðÚœÑÇR¦Üjß¼ºB#Jƒx60÷Ÿi.ÐK†W¯­]nÙfÿI} 5È ÅWY¢aÓv™`‚"¬¥Ø ò¥œ1¤t¤÷}) FÃ<•I¦m×r)¦Ñ²H´o8QŸÆ (*­,%q~K²ÛÇŒ ¹ „„uHJ¡E8`Æ|ßMúà1ÂvØ.ö“¡¿0´Ï/ȉ'ÒNÆÑ‘€áûœŠ9 ^GcãHÕ ¼%›ÖmÏ9ª_^ÿ±“úïõ]ÅØã*@0”îGHîÈ“ozá_ÿš¿±èìGŸ;ÅX¼¬úÍr.pì”cGåb`{ߢïû2‚r'{¢¾¨zÑÜ5®ÀüòQ§œ+ÞÛ‰÷)ö—ZOyõâáýÍšMNpÄðbu^…B¡øÒ‚>±7è>9ç¦S2–'éš0´ iHŽã"„1ÁIB11uî1ás¢Q- ÛœÙŽÏ§ ‹unéj[Ò|â˜a‘AVd§M,I0B\ ÏKa„1` ±ŒäIFCL"Æ\Îm$™aÒ™$¥ñ¼\7¿À !SpÙÙšOuÝÐ(÷xGg†sŽ\¹¾®—5—~|Ûªêí~~¿U}Šr‚€ð­T¢«½¥¹©K:al©®–P …â µ„|Ïǘ!XHÄ…”Iè9O›¤gûDȵ]/mKŒ°®!‰—zH/éŸ_’à§±­5R“NvÙ6BDb$pàB2àŽïJI4Œ# ÓÔ.öÒ„ð$“A ménN$ѰésÎ8÷ç.sAëŒ1€ô}îx>0%!ÙSN1æ™eG”u$ùʺæfKHM“ž'XHéûž%˜…‘cR.Â'X×̰F¦Â7<抴\‰îxJHnu扌ń+4‚$â”hC† ;´Úc#¯bX^Å0Ü÷™@DÓ(Vº£P(_:}ü5Û7a‰(` HJé3!¹Ç8gR_"„¹ËA&$ÁµÜ`Àð¥p}Ï–Âèݕ՞Ôõ\åjÄÔ4ÓÅ6sR®Ûá¹q‚mKBˆ†t0 3œSF=7ãIA˜1ifZS™P$ˆ¤ô‘p¨Œ$ÁfØ´ÿŒ³§ü›Í€‰f¨}k …â¿J¯¢±Ï8upÅPŒ0“ÂòÜŒc»Ž‹0Öƒ¦fjÓ0Æã\ )¹`®ÏsÔ¦É …B¡P ¤P( ¥@ …B¡P(R( …R …B¡P(”) …B)B¡P(J …B¡H¡P( ¥@ …B¡P ¤P( …R …B¡P(R( …R …B¡P(”) …B)B¡P(J …B¡H¡P( ¥@ …B¡P ¤P( …R …B¡P(R( …B)B¡P(”) …⪚àãðö÷î»í¯‹¶lÛgÃöúc3Š•L+ Å®@ª[ðÂ3/¿»dMMsÒÐ"Ee†1ù«/›óMÕÒZÿðu×þ­~ðUúÕ#‚è¿Ù ¤è¤?x’hýÇeçܵéóÎÌOm»uUçÄØÑSC@¤_\¾rUIìWýB‡á£ÁÙ­Š¯Κ;ϽâŸñ}}U0ëÑW~r„©ÚèL¼†W~þí;æwæ5ëÊŸ^7¶_¡Î“-[V¾÷곯ÌÛ°sÀÌ™Ÿ™yµ¯ýsMZÀª—Þ¬¿tÄã°mýL&™À¡FO»I–Ùæâ¾Qãð´LÿgºU¡P|Ö ÄžÿÙó;͉·=ýÿ¦”žO ;yÊ9#¿1ýÉϲPÆ i—LXð÷탾>uÀáFÄ)èKÞwŸ¹±¦ù¯l GÍ<¡ä$Îëó™Ni`¹ôs/=ÜLjôêÓ,GzüQ²;•Îhá*ísòOñ›VàDnúÂ’ýßèÖCÌñ°i|)—½º—[•èsÙV¤…"ÑhP’¯`ßõú1£· ÔÝ”n¥=ñÉïôA—Þ}ûÔ2Ôp¨S·½Å†’ÜÀ.õæ­ r‹?'Èk½wC÷W&ÙÿX|ÝËw_uîÌ»V¤å—±|™ÕO¾Ð ºé+ÉW§ïz+$\ˆËê??´püÍÇ|l–¤ù‡|Ê´¶Í{döSo,ÙÐjECÇŸyÉ5ß>kPðÖ×oýîoÞnp`ð_}â|ºæõgŸ{éõ…Ç=øâwÓºéž7W7&%À›æ•ÃŽ¿êÖë'åïK“íµ\óÃ'×veÅ×(|ìEwÜuÙ ]t/½ÿ'¿kM]§´xÔôŸÝûãÉùuÑP»üÎFwÏ›—|góÞl¾iÁfˆ þA裱ÿjv oŸ\¹¡fëÛíÉ.PÑ9C¼kl¿AØ’¥sÏ[ÓÖ–Áhptù¨'O3ŠÈÎÆ%³m[œ°À}‹^Q⿲¥~ ç>ú0@Á¤úé#«0€ô×m[õ˵[ßnO§ÁÂÓúulð(mTÖm¯þéªÍsÛÒSVåž{Ô ¿­$+ªß¿juݾ“ýDµ»þ~ŸÝzh‘Å­yêÎÿ÷æššÚÆ¸ 8R>椯]ÿ½ Fç`à;_¾þÂ_/³*¿óôC“·Ï}ñÕ·,ÛÚÅõ¢‘§^ö£]8ê¾öÊkó,]Û˜–Zñ‘3¾Û÷NïûqCþÀƒMÄçÿhÆM R¿ÿüÚ:篾ýÁòm]œŽšrÍ­?ï›×]~b…ùùE¡hnyé–1æA¦¦Þ °^ŽÛÏnüÒoä?ê»/ÂΕ²wÏ_Þ–/¸øïáQ3®ùÎ¥SŽ©î¯Ð"µòOߺúñm9“®ùÅ5g %V>}Ç/ŸßŠF^÷ÔÃß”]°–ýäÌkÞµ¢Çœ1´þ­åÉüâ@Wœ÷·9·Ž €èzýÊÓ±öãS•½å‰ï]ñÀJ6dæOtÁøò€Õ¸âå?Ýýô»à´;»ã¬R ÀvõªWP0ó‘Wn½—Z´Ï½zê=y÷ÌùÝäèÁY2“êø(å 7ÝøR LP^‘mFžz»®÷pjS#o\AÀïª>öÅeëú”˜VšWi+Õò̦†­‚eWœuÄpÞù~ÿ9›; øÝó¾öç’½)¤õØœ§oœ^Z9ɤ:ÖœùÖ†&(ù¿'ž¬&~aJoÁ²WO\ÝUPyÌߎx¤î,X»àòõ]¨xÜÊ)cGQ­uóGÎÛ&*Æ=9~Ð(ê5t6ÜóáòšQTØûKv?ÃâSÝzÐŽØ'©÷®:ýWú7~vÅ©£úñeùÑO^h coþçC3{,qÑõþÍÓ~8ß=ä•ö--ñÖõ+k“@Ëäv4²¢ªÊ²Â ß¼¦z»CnøÇc—UíRÝÞ 6ÞþöÎûÙ"¢e•%…e}Kól窭j¥?öâ#{†‰Sóø5—?°F^öëŸ_rl¹–ØøÚý7ß;¿ f>òòÍ£x`ùÏ*òïZžiknÙôäõ7þ£ þéã·dä—•„I¯ZFÚ[ÿÆ×þ¸  pÌi'Ží_šc8Í+ßúçÂÌ£øÄõ?¨ÛÞYsçy7$nýžñ‡î…;ÀÜÒ›©éàì`ãö³?‡úùûî¿ï‡î-"¹ê³b{qòÅ?ú/.ݑ៺Ô^wï9±Xì¬ßUgv}äm{xz,›ðÝ×Úz®Ï,ýÉq±XlÂ7gPÛíKÉŸùæywT[RJÉ;_ûF,‹]ô\3ëIÀÝü3b±Ø¤ëßhÛ“¡H­¸ëÌX,6á;¯´d/ôŸ‹Åbß|£Kì)µò—'Åb±Óî\mg/zòÂX,vñóÍ»“b;ÿqõ7Ûî÷üË[^¼$‹Å¾þÖÝ—8ëï=+‹ÅNùÕG™½*Ë[_þæø“o]––‡oª_vÕ⚺]éûéº[Þ_ò\rOíxfóøÙ³aö ÷ïõ¡ÕY=îáÙ0û/“Ö'{Jë·}ÿÉÙ0{vῚ“{eÀÒ'üå±K›Üžâw.0{6<üÚwÏ5™ÖE•³gÓ~èíεëWÏΆÙ=uK†I)y÷½/̆Ùÿx0µ»=ùö s&T'¼ý'»¿*²[Þû$ùþ÷.ºo½³;Ùö9߈Åb±KöJ¦§ÇO¾m¥µ{°dÖ=8#‹ÅŽûñÂ=íĻ޿å¤X,»àÉ»3íå`Û5†ÏøÝ:g¯\þ05‹ÅÎÜý!ßùÏ+‰Åb_{¢aw">ÿûc±ØI·.ϰÍþ³Šü'ƒÖ­ypj,;æš©½>ì]ËðŽ9—Çb±Ø×žnb{ʲõ±ËމÅbã®xq¯O÷‡½ú×§ÅN˜uÑ´ÓŽ‹Åb±qÇ~þ•7Ýûü²WäÞýÏ-½šš:À6n?ËñóoýFþ“¾û/Ó{Ó EÆ^÷÷§oqD´çƒîÍï=y×5ÓN:ÿÇ~ÔÅ÷~YÿÜÜ€¢Ï¶ûñE+;ö˜|oåkë>îôŽ›tLÿ( eçÞrÓ9}÷óld­{úùí _›\´§Ì(<úÂYUÞò'^oô{]¿s[;H±Çþ#%çÞsï̾z07­ó_\“Þó0ÙöÁKrN™yDè¥_t¤dF÷íN:F•y ä¸gˆ ®¯­=NÐ+‡å@G݆ÅÞžG‹¦í›V˜ý¿S|€§MÙú- eýµÛç†#§÷ ð÷kÚº@Xë,|O[ኡ§¿2"¢ýç?ÿ^G@äøûŸþÁžhnÈÏÕ Ù’bŸµ{ðQpÈÙçT€Õ‘ð÷T&ïè)G™oˆ³Ï`°¡à SO(€xcOz2µáí5 è¨£Šéž_ÓÐq•\·ª¥W÷ߪÈg>hÿ£Ÿ¡1pæ÷¦äðÕϽÓĺ>PzÂ7®ºøÜ o¸óÏO<ûìSÿó]ןSÕùÞÓ¿ûîÔoüqE·èEi?=·ôrj:Ø;ȸýLÇÏ¿ùùœ§Ðÿ~$BöâÐó~öèÛóžºûúã+{lz¾ã½?]ýõÛßëØ5&ü–•ë’P8¨p¯IŠFûäkkHüÿöÎ<.ªzÿÿ¯Ù÷†}‘Md‡\P ”ÔT¾îkÙfÛíjj‹Þʼfni©™7[ +KÍԬ̗ëE1\R6–Avfcö9óûcÌ_~žÿÁÃÎ9Ÿ÷çýú¼ßŸ÷y¥‹g/èŸ0ÈíÖÛðFyf® €Ïý:Öa²ÿüãSÇ  ‹–<{Éæ=Ÿ=æ KÑWo|Ó«êßr»®©³uo·ŽµGÍǺпHZÌ%,ùÉ;§Ü»áé‘€ÚŸ×m?¯¶=S“¬ ò7¤ ‹k#~懥`6˜o£ðÇ\_\BQ§Ïtòu€ú²³Ýwà—òl’Ðæî\”’ÓwìœX: Ë8t^A€ùFÚRïÉSúÛÿ~ e1©ŒF¥AS¬‡˜Gk6•F£Ò¨—©ô`ó&£ÒhT™»s¦¾“«Vªo¹h–(øEO`þ-O^o“¦,µQøT¨KwNÆ¢+Ò@Nú>æÎ´–©áÙŠ– @?18Ð ÐÜÈ¿owįg·”569ªtë6ÂX“½ÓÒǧŽ9|h\\\?0üÃ6IDAT\Ò’t£ç„c t®˜ z•¾Ý­Yu Ü…}»ì£uÔ“a»ù; ê+•XÍZµªêf#ÕÄÌ éÄ+Í·sÛéšz2°îíöÞpV}lÕw½îVu¿ðýø;ž}nw ”§,Ô ã0jöÊW›’œ:msÒXbÛHßP& ˜lFç29,0iMöûE†ÇÃöpvmݱïL™¾<ãà¶ŒƒÛ|ƾ¾eÕÌÝ6‹¡»'Îƾ’iÌ:x®qÌ$7ªêÔeþÓ7Øÿ:UYž{³Nšûš´ýQéº,)ˆBßä%êjÒ™l[éB›NÑ“£}95•zyÞ1]Ðc|kiiasÄçîJ”†€A#¦ àtºq:ƒëK@÷ IÎa^|%û# saEÞ²Š¼e¢àNzArç/.ÝÆ@Xª¾6wuz³OÊÊ÷·<åÁ££9ãåq!Mï+²ÚKW,‘>-u5šëç+¸ êÅêÛ)î£uØ“¡1¹,0hÔéK“_»Øé7¼ß{xiX——Fwê,B¡ZWZPoJööÑÙãšì1°níöqV}kÕe ÔñóâA ^'Ð\]§·ÝÛ–Ó¡q\}|;ãã!ºÑ£Û 0-’Õ¤7[Àî?¤qüÆ,ÜzèÇ÷m]6o„7€ª“o»ØÜýÐ]âçâ–œC¿×S¦Ê“?U†ÌÛ›Œ,ÝÃ;|qtÔ_ ü¿ˆèEÑÑ‹¢£…ûyýü#l?.éïÒM†Þj1lk—›¶Dóð‹NaTÕÇ2­…R*TÅD÷0¼tÛú›ÆàŠÅÁÿ Ø­ó›Ù?pèsžª™ùðÑ~Ô% ÉtŒËïí@‹¾ù8½Âño,ŸãÁë‹øÁÑÆÆœþÂhÿÉæïòU€ÒÉNlßšn€hÌKOEôy{ˆ;7Z‡=k‹p…·û.Íg€Q;îÒ×d§uc·÷гêcz—ÈpmóœüTw«èíÚÏ Ë·­‰[—h,mpØÎÓ­¿¨kÔRµf¥\ €æ,¹µ$Kš8oÙöƒû—D3€ú“?ê{0ñà¹ÉN€õÊ¡4Yéñ£5³F{õ*#Ëå9EK\¼A+ì*‰•Hb%’>ŒàF»»Ú~Œê¶5œ^«¬ÀEµ›4Ž÷â .€Œ¼²ëMÒÝÍn …=8oj:;Ô„îá°$aRÞ¬áÃ逶d—#cv{‚R—\©àÀë«9ápc£»[›º4–¥:óîãÉÇ öàŒ•é¢±ÿxßú }õþ±cÖQOÆÐ S`û»0!~è³ â¨-¢Tù'Niþìo(Rb/qï%Ô×ÔK»•ÝÞ3ÎꮸлQÚ² †[,kôÕÅ üãl;\,˜>€ÚŒ³rGIÛ'~ˆ3€ªË2]ÇÌnõ¥‚f!I‘bƒÅswK$cÙŸ¬¾94nÿÉDÐ)ô=fXùQ³'zÈßÿùçGcæŒr»Õœ¹Zm`òE.ô6QQ+h¼ »†æ‚ÊF@wØÁ`XÃ"C|Ô_]±°Æ3jR1:]/aWÈJººo‹"5·DÖî(_¶Ô€©Þ|Çû–·1”…ì¨:¸Æf7¦ê3»¿)Žø×§O;v*=3ëøÞÏ=èàfV}l´wþdôe×Í=rLhUxVUÌû“‹5×åIµ|†G»ôþìqMöX÷vëXû¹#gÕ§V}׳pæ†Ì£Ùu«E %ßšnkÈü‰ýZ¢ föCÎd_n:$Õ[4…™èÏíO¯½ùÜ­êËû¿“œ„'ÆÛ² ¡—€ôTnc—ãC©®ý”ú‹¬ Z4µjÞ1^=ûî€i“ýÈ~9©27¾m"K¾Y:qX\â¼ §ê,=¨¹^ª‡DÔÖ‡jPé¬\±»ëRJW¹¥X pæ ðéh0|×ðbÊ=%úÑQ~Òè,6«¾ÂØ6$ìø¨ 7ÊK/æ5joí Ù…¹_·ßú¤ŒrQ¼ÙÅií_Õôz èo_YÆUEë5™Uõj³cýµ}ÆfVuö{Ͼù[­K¸sSeB­ÕªkªªªnÔ*´–»5Ï»2Úî"'€RÔÜ EîèÉPçökÄã$¹÷p46Ÿ©¼v¡ªclN)Ͻ¿`zrfðí¨wÏ®É.ëÁnj?·å¬>v}G¯þfÑW§\Nš;{Râà^"º®ºà÷Ÿ¾øôH¡ÕgòÚµSÚºçЄC¯šrþ•#Õ9[æÍɘ>qh°+Û¬i¬•—W;Í{IìíäOØ!Ol|ùÒ3ïŸ{û¥wÍÿz4ÁŸÛ,;xûÆÃõpŸðÖÊñ­&-Œ:‚w.]—±ò‘'ú‹(­¢¦¬¸´\À¬×›­ €®ø“ekXKIŽôæêå9‡·~Rqò SíˆCÙ)3Bvm/†(qÎà¶eƒ¹æ÷ﯣ€âÃÛ¾5êÅЮgˆÕ¬•鞢¶Þæj‘ŹÜz b,nh*¡óÅ k“¢rǹôLÞöçÞ& ç'"$ë²Á\äݹ²€-ðÆA¡~ùɳ ýè**ÿAÙ ªz 8‰¼&‡\ëÀèö´ÿô­‡uù’=] D׋BõÕoÖ¯M=)Uƒ.ˆ;çÙç+—O^sÜ'ügÿËÆO^ÿ׋•à{…>0aÙ;Ï»ÿwý[»3®Êš(LI@ôÈ'W¿‘\ÿÙŠw½TT­¡ß ‰/½³|tK†ª{c³ª.l_¾îpNe3ˆübÇ<¿nÅþùm¯oþ9§\iÀó‰ôҦגÝiÙ±-ËVî"Õé4qû÷kþ¬ TÃé÷q#Ým·XêN¿¿|íþ« 0œ&ÍZôÒÜX1ÝžiH5}úáÕWÁðð“hªê´¶[hüÄù/<31Ôžõ‰QväÿäHe7ZÞ˜¢ ¼B=8iîã3ã}8´®¯¹Kßbk¢Ùa`Ö 3dìîÑnd?¯Æ^ïý¹“±»ÛX ½‡RžyuLÊæ›};Zÿ_siKÊ!óöV8¢ÁEkWžÃ©ÊÞ Ó—OûrïÒZ£{sýÙ÷æ}jwq{±èksRÄ2dä²Ìæ¿Îhû‚Ö®<ÿPc!ãOpHWÂÍETÓùCݦMðçBZ³Öĉ1ÚÛ‘NT³½ÛîÖ:y^:?üyWòM÷&Ù·¯¾¼¿aÌÒY_¥sÜÏ~,†è•jÓ_i´}‡Yg¶ 8¦Ð:­*ýòJÀÜ”€iXJSrjÇ+oœZþöT‡–ØZíÄ”jÏ¥êШÐP2ª÷U;t•b†=òçÔ—YuCi<Â}yÑö}†…(Ðc©:øâòã¶â:£ìû ŸÖ$ýc|Ç7*4gW.M½1jÃÞw§õcßµ £Êò~™!µåÉ-E髛ߒè^šal>0×W(;¾Q©_IA Ÿ73„ý—-ðWCœ•½9Œs;6§6 dIýâ° ã7‹¹d¸î9Ì ørϑӹ…U¶Òé| È¸¤I3çL|Àõw1ZÆâÝ/-ý4»¦åUž{Ȉ§ß]?ÛŸEì€@ˆ@ ÷ dˆ@ D@ˆ@ ¢@@ D Q @ "(@ þÿtå±Ãÿ²ÿ·¼¤:Geø/N›÷òôᾉLjéëg²4·~Õ¥9”uæÍ²fóÝøãœ1Ã&\˜9n6i/B ÷¡57«tA0§å²­æf©î+æ`„@ ˆõ)–:¥ÎÊsnk1oÔ)jÀ å÷UËy‹¦ ñë_މÂ}®@Vc™Æ, [{Ì[•jM3KÀ꣞ó–ü‚Ëézb$@È¢+ÒÁÓ™×ÚdÞR£ÐÑxÎ}k¶ä)»:H£ã!„;žogÐçï\öïo³Ê”V v݉]\h TÙÛ_ß|ìrIÌøüÈŠX®-L©JÿòÃ]?¦_«Ñƹyû‡'>ÿïÅ#%-ZGi¥'>ÿtﯙy5Z€ã6|Âü…ÏM áwåÑ)YÉùõ•†®Q4àeAÎÀ´*¸­åŸw½øxª‘Oàž6è!LæÌ¬£S¯ÔÖÚnšÉõ‹ÙóÐ1 kCeæì é9…Vº¯Gÿ§=M?•]iÿg]G–M °Èçìùù Iø¯éSÕ\ÿ¨@zDÞPMÑ<ÜBÞJù‚»¤[*åyk/¹ÑXmÀòw÷9hE¨»„ÀZWz<ôD¹$›gOLj,ü¤°ä§ªÆZŠîíº.)a “È@¸ÏÌ_?:ýƒ’6²¡»¸:å¹£ª6¢O­œý¯ãÖ„Ek—Lá5WžýjãGóí{ÒŸ €Rç~øì?¾’:\¸jáÃaEî¾µo(¦E½¸wçS!œ[þek³º>Gm¢ƒ¦òp5Fûõ³¹y‹úxi Ý;x¬€Îä¸ så™/Åʾ x{Oórñç0´êêo dÅø> &FG0@sãtÐO…õàÿsê¼<Û1Xµ»Ú·„7¾lœ£Y#«¿2áXžžÏÌÁ ²™&›ÁÛÙÙ# òDVmv¹¼ÈðÂNÍMÓ ™/_þ5!ë†É5òㄨqb–ZUµ+ûìû5f¯àä¬ä:˜µÒÉßü÷7 $"'?¾(XÈPêô²j™ÆdÍ1Œ”X„¿/õpfù¯;+öÚOŽô¦ðô Z®È\¡i ¦>[õ•Ôê1gË;O?Àà;ñå •™s>½¶sëS>œä~«œ M rO ªÊå‡Ù.úøÚÜwsÙÉRN¬—ÏhQËç˜\¾$¯I^ÚúŸoD^ý]VvUæs…i‘"& t|LP¸­Y{° n“§·¨-fÑÊ>­eOäå:âå'v íüí’ü™£g´½dTþ1òÛst¥©õ c|Xô —æfÝÐ2ûí8òQ[x'Ý2Q¨?pôã’Ó úy 2&‹+¢þ£Íl;›¦6ë.—h¤_7F^9"_ºdjÖ°R7ã*†çä÷¶Ìòe€öÚ·G«÷Ñ)áüÖã,Ÿø¡À˜ûóUMÁU¯ÖÓ¸âÖtŒzM8Ün'‰|/” P¿_+)±½GÄ”<î ¾4ïÜÍ:7«¼¼à7è^—1[ä?Ë€±´ÙlSÖ× _tJ»ì"íµ8ʰœº\,¥º<›Pí~žnb ÙyC×zKc:1PFòýé(½0ýRžMÚÜ‹R’g-Ù´ç´TÕ¶Ž75Éš Cʰ¸6âg~X fƒ¹ ‡KYL*£QiÐë!æÑšF¥Ñ¨4êe*=Ø<†É¨4UfªwÍá;¹°j¥­QKü¢' 0ÿ–'¯·…-š²ÔFáS¡.¼;”öªœìNrÆâŠ\@›¯#ñ @¸ßqì6Ããá {8»¶îØw¦L_žqp[ÆÁm>c_ß²jæ> F­Â^ùjS’S§*/Kìqën7TeyîÍZ8iîkÒöG¥ë²¤ }w—¨« ‚Îd€EݦS4Áäh_NM¥^žwLôßZZZXàqÀÙ•ÝV‹Æ,½ómÒ¶+ÑXH€C ˆ9ÇoÌ­cžk,:wüÈÁ½ûÏݨ:¹q±8ø»7ÓØ|6`ãêãëiwðE÷ð_ìlÖ4•|!gü_D`€M#ÌŠC•lÿˆ)bS èNL`:1Úæáªz#•% Mœ·lûÁýK¢@ýÉ õ­»Dh,m0õæ\\žS´ÄŸ’Á®’X‰$V"‰àÃn´»«íǨn[ÃéµÊz QÞN+½qdä•]o’înv[(tŒ ÓùÑ|P4;µL5Ô ˆä1ˆñ¢@v.ëY¶½v½©ëô‘±ì‡ÏNVß,( qûO~$€N¡§Àòˆ á¨Í8+7õö:ÍÕj“/r¡·‰ŠZAãqì¹~sAe£  » ìèc ‹ ñPuýÅÂϨI|Å% Ñxo.€ÒjEs‡”¬º^@0”Ó»SZT²¼‚*-Ù<"÷¡1„^nLÒS¹]ºAJuí§Ô_díäÅ¢©UðŽñb‚˜Ù9}¹éTß«}J/ÕC"jëÇC5¨tV®ØÝŽ@‚ÒUn)Öœ¹|:J ß5|€rO‰~t”ŸGÇ£4:‹ Àª¯èuQkhLd ­¼z´ùæg­ÆêíyJ€9a`HPo‚OªñÔŠ©3žxlʬÔdÿˆ@ ü]°;í$Œ:‚w.]—±ò‘'ú‹(­¢¦¬¸´\À¬×›[6¿F €åì—4aæ¼)qžì.“`FUÑŠKÊ丸–D¥Ú›}©Ð'n•?¿½„RÚ¢„=iY û‰¹JµVmÀðvõ›·r€«Ó­NoÑäÜ—^>¥$ÑKø§;®*Ëœqæj–L®ó¨€ðEžÊm‹W›€-N²÷Á®uçg)ü_“Ö€!ˆ úmRh0€¥²*Ý¥‚n4Ö´ö…›9è–¾phªÊœq¦ Melw¶aU朌ëi ½S”pd¡h÷Òþç"'ñõO7ÍèG¾!@ Üw tïÓª@î©ó§=#°kSÇjÍØ6`Òœmî¤ÿ @ ÜUèÇ›¢š);eÕZ'ÏKç‡?ïJä‡@ ˆ9û‹ª=—ªC£BCéÄ(P‡Gey¿ÌÚZâXŠ ÒW7¾"$@ Ü}þž¾·» ˆÒ;Ÿ±FçÅiºþN¾zÊø‡’Ii@ üüm*,\ú-%G^Ñò>,ÓGÒïÍq-têäY«Š:U^ p„> ã7‹¹Ä (@ îÈ<@ þþ×±,ŽJÚIEND®B`‚pytest-2.5.1/doc/en/img/theuni.png0000664000175000017500000007536412254002202016370 0ustar hpkhpk00000000000000‰PNG  IHDR,ºµÉsRGB®ÎébKGDÿÿÿ ½§“ pHYs  šœtIMEÝ : Œax) IDATxÚìÝwœUÝ?ðï©ÓnÝ–-ÙôP–’„&6Þ,  >Šâó¨(EñAÁÆ#ŠXQšüéR$B-IHÝM6Ùz÷Ö)§ýþØ$$a‰%œ÷+$Ù»wf¾3÷|î9sfcÀ²,˲þ°-eY–eCȲ,˲!dY–eY6„,˲,B–eY–µ³Ð·|…”rÎS/^ðš•ËL¦} Z¥|·¡¾®4T¬”ËÆB!Q’$‰Òc†áÐÐb¨T*•KŒ1LHEZë0 •6®çe²Ùl.—J¥2™L&•ሪXT*•B¡Ð74Ð0ª)__ Ñ30H˜ÛÞ>VHƒ)IgÒ &ì6kÖa„P»-˲vÍ*rõ•ƒý=¾ËR!¥”ÃwÆd``@ ‰ñÆî”TÊh…ò}o8*•JµZÅßÇ„hc(¥"I8çR*%d­Zu8Ïf2žëºg@„Ƙx^ä …XJîyŒ±Z ©¦Œ¹HooßÂ… žaî9Ÿ>/“ÉÙiY–õŸo»ôóŸÿ´§g½ÑZ+帜q†0pÎÇUJ …$‰c´TR)©”"„8œ3J1­dX«rJêëó¹\6®Ëç³éL:•r#IV«µrYDí8ÌóÇe®ëd²i­d¡0X.—ãA*͘ã8çÆÜh¢5YÛµþ·¿ý•”ÒîH˲¬]­'ôÌ3s C®ãh™F)"€¦”è( ã8rÕZ )´Ö„P„1!!H’¸V­Öª•0¬å²™Æú:)µaVkµ(¬²jÇÀu8PRÈ$Çe 3NpŽÃ]ω*•Z­æ§s£šFeÒYƒ(Â!‚ì! Ö÷=óÌÜY³±ûÒ²,k— ¡…‹^r8# E¬QJI‡b‚°Ö:IÂ(9ç D"¥BÆ(Æ8I’âPq ¯¯XRJ¦|?“NÇq¢¤v\'rœ8ª®†¡1A>ŸR)XD!&!­µHDäzÜ•N”¨8I²Ù|&“•+0Æ<Z)!âyÏÏ·!dY–µ«…Pa°„(1Šˆ(®Vbš ‡ Gaˆ‚TÕB¥$¥d¸„1ÖZ‰()ûûj•²øç1ŒB˜b—Q¬”çQ"Š‘0Lëêëêê´VCÅÁr-Ô`¤LÊå!Lç4QIµZ ÃZs³§4I$`D G"ѵZ­R‰—/_cw¤eYÖ®BQTSR ­0h£U’ÄÚw1vµ1w8¡8ŽCŒ1¥¥T)­”­Ãj-ª…RHÎXS}C:•¢û®‡!„4žãâ´a4 – ½Ü ¸[ßX¯•¬ÖªårQHIRF Fë0¬õönhoŸˆ0ÑZÇ"ŠÂ¸§§¯\. jµŠÝ‘–eY»Zi)ŒÖ!Æ(òM0%@„HÂ!`¤Æ`¤Œ¥®ã4Öׄnj“Ïf !žç@¹T6RøžÛXWßÒØ400ØÛÛVʽÝë FÔ¡®ãôŠXÄœ²HJ%…ÖJk3488Ðßç§òR¡r%ìíèìì Ã0Ž#¥•Ý‘–eY»Za„ ÅŒ`Ц…  „J ¥¤PR á8\JEBØq]„ Bò¹,Îå8cím££ ¡®Þ£â„aœöƒl&‹ö— Ô××'¢dh` •KSB8cµZU(%¥TJ!0`L±8´~}רfÄœ@ʤR- H%1ÆœÛK…,˲v¹r¥”Pб1FaÀX&5™$JJ­µRR)I0E†!”1Æ9BpÊÏ÷]·¡¾.Š"Œq*d"ÆÌõÒ¾ï9.!”ÒB! qœiâ0l<îGQU­µQJK ÊÈõë×rÇÍd”’ÆÄÚÄŒ±ï»vGZ–eíj!ÄeŒ!0RÄRÄXk£5`Œ F L'aµ&”˜c-•Q’”ò½Tø®“„‘ïz ÓXFZ*ʉÃΣŒaŠ4 Q%BÄJ(%a 3#5ÂZË$N™SìÏd³„1c0!:•bJic”ãØýhY–µË…£L+Gq…Z&œ J€RëDÈ(Š$c—2×ó8çJ)‘$JHŠ0gŒ‚"˜¤<!DZÒa`F(§# ªÕj5 uµ,… b„9ÌI”-A0JIA¨I„@bÂ8G­ÍBIF¿£sBɲkO=ýÆnóÙ?þáœñìße×üÛ®˜eYÖ³­;&!ªÕZ©X,•JµZ-I „`B´6JjF8!ŒRŽÖ”ÔQ%QŒ 0Ê8ãŒp‚¨ë8A¥¢02Z 0Fˆ0æ~:›Íg29ß8s0`¬#„3†6ö½´–qœÄQ©8T­”ŒžËÒ)7ð¹ç2FG~VÝwß9vtüŧªvç[–eý;÷„JÅbÖ¢°Z¹ a0Ú( Zc„™CµÖZk!¤D)‰ÐB¦]m(¥œsÇq<Ï‹£x®‡1æŒsÎcŒRmwO©X$žë%I’$1%ŒQ†„8¥JƒP #’¨Z.!„•ÖQQFÐpçld4ôâý õÆÍÃé蓾ó³C8ho±S/,Ëz×…P¥\N’H&Âa”Q†@Zi¥Bˆ1¦”B*eÂa£t'Hkdˆ…L£Ôw]?•6Zkm(å¾`w\?H9®K0‘J+­M ´TJ*'§ŒSJ0a„„5`¡%4‰’¾žþÁ’R*ÍdÁJôÞq¦<ÿ¾ùÿ)ó¹±ß¶÷möµ,kW¶Íá¸DH¡€1æp!ÆcÀ!„R6Ûeþðë3Çs{hZ–õnï ™á~ÆjB„`Æ¡!ä8N:áÜ!„ÏMÀ˜2ÆB„F% .ËI"ÇM¥2©T¦¡©)•Î0î L`ø¾§Ê¸ãø¾ŸN§ó¹|.»±KäP†!ƒ "Z„‰Ã½úº¦½öÜ÷¤O;÷ÓŸûøéŸ<öèF°¡ Ç^ÿ×»¿{Ðpê’®¸û‘Gþzãc¶˜sÖùç«qÞÿ_߸â›_øÐøáÂèÅ7ÿîå@­¿÷/Ÿ]<ùÌŸÞÿÔ³OÿåºO—®½â®µÛõ0 „Ñð_]{í²¿|ý÷Þùë ß[7¼Ä—o¹õ•И–eÙžB*1Æ`„1cŒ€bP€cŒ–C„ç,“I+! B  Ä3†ÀÕj•–Îg(ãF›l.çiÆ(¥”RFiŒÂÄu]i*Vq-®U£0*’ CÏA6W×7Xèê~²ZÆŽ7mÚô‰míJä4æé\.Ňñt.Ÿ’ͯPqÇe¿üÎÑàýû9+N¼j).PÓÛÔª»ozQÔá93F9}ú¢“ï=û޽ètžü™ ocuîÄï~ëcûà” /xìñKž•…Å‹ÔôÑÄœ–e½»{B±ˆ0ÁÜáŒc2ŠŒÁ­ŒÑL,b@†qÊ!\FryŸ; Ïq³©lÊŒ6 u Í-m™\>“ÍÕ7bŽ‹87€’DÄQ¬„m@I%„PJ" Œq?püSF(ãÜ¥ÌUGB6¶´Ž™0¡¹}´›Iª¥ ƒ}z:»»vH-ü™;´q¸ ¤©cVëðÿÖª tiÉ3k‡ÿ=îàñ›Fÿ¼ñ3'€Î§ é·±ÄÌÌ£ö†{E8=ißÍK¬Ù›áY–e{BŽë:Žã;w\„°ÒF)   F€@-µŠ)-¦œ1ÌxÊ ¢Jk“òÓÙlÖõ|L§3*µ‰Â()•«åŠd”f„0J1BFiN©2Ѳ‡…b¡X,Š¥J-Ò€\?@Žo˜—ªklhU×Ô”oÂùæf¥”ã¹±’Âì˜'«ÖMhÚ|*†ø¹`øoZUZ_Üø“y½÷€øÕ¾Wû4nG¨5·y`7ål±D˲¬w{¥suœQ‡PÂ8€VÚDIB1ÂŒB€a¬”J¢ˆ:žë¹œq—±¤œ;©LÆ ÒˆPÀDH„JUka¡P(ËÕjÅ(“òƒÀ÷F]ÇI©(ŽKau 8Ø7Ð_­”kQH)K»æN,aµÑãÇs×JqשklDÉ$A¹A°cz…tÓ¹@èu?ÛòŸ„Ò×ÿrëÁ±PÁÈ‚–eÙzs™ºk I™HQ":|½!„q†ŒRI’xÌq8 \ él6ËQו€ ƒAH•(&I5Š)kqb¤4”Vžë0Î1£&‰ ƒ]Ýk Å¡áå¥Sæy†Ðr³XaBŠ¥R©aÊ™ãrÇÁ¥ïôó'$Óšè|àU½þðô›¾ª²iƨE›Ôtµ¯?¶šeYÖö†P*“QRÈ8ÖR@JMN@„(¡ŒáXERHà9N*ƒ+ž×ÔÜ’©«ÇŽc1„#u”$µ0Œã$N×u€Ã¹ëº© Èås~à'B »ººjahŒ!g2*„Á$ÖFéêZ‡ø€ÁT! ˜pÎ]×%k%O=é„íÚlÇÛuÚ§§Ô ºôʹ«Âçy&é]¶¤góuõõu)ŽHPç”Ìšç×DGîí€î›sß{œY–emw™á‘'c ZKŒ©Ô1FiÐ0!Ôhc”"~PŸ¯s©Ã™ÓÚÞ–Êfe±RµR9I©\Y»vݺuë*•Zžã"dßonjÕÔ˜ÍçÒ©tJ5v­Y¹zõàÀÀÀà e,—˧RéPÈþþÁîÞþªPsæNr|ŸsÎÃLÐHdzèw=?ö¼Ó4¾iD¿ÉÇwú´~°@Cÿ¿yå”oŸ{x;ê~òWëžnhÿÔï~ÿù)Ž3ºcüPï$‰ËµJwφJµd²YÎàrµb0B´Ò  `¤„ìì8˲¬].„V­ëëëí‹ÊU²ºT& Üç=ùº¦|}SCC}}è†æ–0Š0—sŒŠ“P„0h£‰¢!F¨VºÖJ¥booϪU«šÇ) ººº™Äqìûw¥e¥Rîì_ß»Ai£9õj(u¡8„F„F£µ1`AØv…,˲v½ê/'ý¥¨V¬m† QгºLfùêµÙ\}"L*åŠB®Ã12qT3Jé”ÖJˆ@ˆ ’$¢0PèÙÐÓÛÓÇ8ÏçóA*…Šâ0~ºw-Žú‡‡ …€TR# .W ‹Õr)ŒŠ•2¢¬Ò€Á2Úhc'&X–eír!´²sC­RQQBTÑÃXk³à•—ËÕÊÄñcÛZZ‡h×堵Èë8Ij)c@k­5€1µZØÛÛ»vmW¥RÝÖÞØÔäû>!„RJ )W*Q•‡Jq­ÁžãkM!DSK’bµ¢0SÃ3À˜áJ þÙÙq–eY»^•+BÄ:ªF2ªRÐ A}C4H3²ú{ß›4aâ>Óöfœ)•2Œ`‚¨‘ˆ–2R‚‚i"Äà@ah@ƒÊÕeF5ø)qJAØ3Æ)Ã0D6ci?¨F¡Ã<ßI¥B©ÊI, JŒAZ)m„›Óæm ÇYí_×ÕÝ;0T…̃l}Skû˜æ ÛÚû©Áçn¿c~î¨3Ãwöž‘½Oþþ®å-ÇžñÞVfS˲ޅ!„4ÑÅQ’„1dž0¬‘ðR.樨݆8H§ögûsî*”€–Zj0`´ÑJK)E’€àÆh¥¤H’8Nbm Xb”j£c AmºÔÔaÜH¡„HÂhýúõ^&=¦µ5¨«˵P" €AføÚ#¤‘1”Ùîë„tÔ»|ÁâµI¶m̘©³Ë°qµØ¿¡sÙ³ë&î·Ï¸ì¦‡§F+ïûýKãO=qÏþçtà!AŽØƒÔ²¬wg2Š•Hke3A}}Îu)0&IDœˆ´ Ê´ÐRI£!8Ic PZÇIL)÷ŸqV­VËåžjµ¶…"¾ïSB‘Áœól:íRZB¦R.úúúÇæòuu N&½¡0űT Œ6€Þ8M[+­”©@oÏì8­_ôì’Zó^3'5º¯ ¡Ü òM­-+_œ÷âboÆ´f€,®+3þ_°g°×¼ÛÔf{„Z–õ® !¬ETRqÈ©ñøyœ -eb´&Ž#)$` ImŒ2Øl-c—>ù؂Ǟhi9z‚§úž¿÷þ—¢Ñï=er=)¯~þ±9÷=hN=nznëƒ0%P[>çÅ=f|ðŒ÷¥ÍÐÒGï~âooi=z‚M˲Þå!Äö]&…äšêrS÷˜¼Ûĉ„þ =F)Jˆ”I’ÄÚhL"D&ÒHM 挘á‹x¥Xƒá¸‹\Çõ}!”òS²I*¡D,J¥RFŽãd³Y×w¤VÜõ|×­&¢£ôæp*¬^Y4wmÕoÎ;Ûuñ¢Üpèñ‡4³×å‹“¢ÐÿÆXÑÚ†ŒeYÖ[„—Î!­ÇO¥2¢V_èÁFaQ¬†Š%FqA€ÑBĘr×u¤ïi%E,ã8I„gcŒwv†Û^† c J5 — IDAT!ÐÆB®ç:œ{žÇa‡‰JÆu”.vu.ì^¿Áq¼9®ï© › ²Ù|¶3&ŒŽE%ÉÈ6;i.ŠU i¯¡Ž­ÙÐÕ×¾{# ûV/]g]TRJF] Wˆ–i­þ–ôÖã}4ÓÚ@ –pzüÆšê¸XÖ)Ÿ‚ÄŒ€ŒÅ¦ÌS¥õCö–eYoB±‚©F&JÂ8Vn*?гnÍÊekV­* •ZšÇG0 #£•PÊu­uœ$±P!$„BaŒÀH)„ô\×Ña #D1õ=—Ê‹<Š]žxµ°%±P¦Z Ãjm¨0Š„Ã=@˜Š(ÃÌqÝÐPÏ9# ÿ1cŒ%£A@ˆÃ(ŒãXkÍscZk%„kƒiªqŒ–QGIʼnÔcš$bhp¨4T‘ 3DÔðEHZc%„ЕÚPwφ«ÆO˜8v¸n-N”Ÿ»dÑšì~ãòã÷™µåE¨íûÚZJ t‹S4¼eŸ½êZøä½këö=é¤ý¶1DÇš:Ž;ÖyúÙ—¼ã)@S£&|ìÁ{ç0€7îÐ#¦>:çï·ÿö Äsc¦ÍÜ>²,ë_ÝâBÖPÚO¹'[‹K“ú\¾±)›Í¤´J´RZ+ƒd´A€(¡sŒáG9ÔÂ0Šc„ÆcR«†ç®ëz®ó„¢”ªE‘*IäP¡Ø¹ºsm׺r±G HÀSL(# 5Bˆ&`™Èâú h¤³ãËOê˜Y¿nÕêÕóW-°éYá<•khjßgʨ¬M ˲¬‡Ó\Ö’¤Zs)át’TÂxÙ+óW¼ürЖ¦&ßqÂH­Œ„4¡„„Ƹ†QÅq,¥2F‚‰ç¹žëqÆ(¥€Py¨T«D•J­¿o`]gwï€Ö@QBE*ä”QƒeŒ‚¤FBF$ù†üöm4vòí»çÛw­„Æ(¶ÑcY–õoBy?¢¸§(m¤ÁS Œp0 ¥FxxJ5J m`øV Ã-²mÊ„ÿºî„Ù›}Z–eíz!@9Ÿy± ˜ùÿ} sNâ|S.å:aF(ƒhF!Ð0¾1¥ QÒh£7]ÃŒ1Æ¥µR !DU†“bB(3„Ê(¥Ô Ê1„`„0Fxxn·1Èñ\-¥ÔÊå^ûÔ)³NøÐa9‰Ú²,Ëúµ­)Ú–eY–µSa[˲,ˆeY–eCȲ,˲lY–eY6„,˲,ˆeY–eCȲ,˲lY–eY6„,˲,ˆeY–eCȲ,˲lY–eY6„,˲,ˆeY–eCȲ,˲lY–eY6„,˲,B–eY–eCȲ,˲!dY–eY6„,˲,B–eY–eCȲ,˲!dY–eYïÝ¥¶Æ”ž¿îÒŸüuÁ+kË ¾éî¯íéìÈ÷×ýO\ýížZúêš`ê7ÿrãñ6Æÿ èÚŠ‡~uÝ-½°º ü¶C¿|ÝEþEë"Š—ßµè–®¡WCoôçO?”Ùdí ªwöO¾wëœù V—&~é®ß1zG6èÿ¬æì#¾êo7}÷Î<þÈÃf¼çèSÎüüÅWþ¿—ŠzG„Gmñ/Î>âÀ#ÏýõË5óÞe:¾ð“[nûÁûüÔol8ìË×Ütûÿ³›ýüIVÿ¿ó?~éýÎißûíW}¢¹gù†Pïô…Šâú‹XzOyÓñªÃ;f/üúÒH²ì¥§Ízñ´öz»c¶·tïæ¢¼64ùåßð˯NûÏnîÞ:8“λ¿qîå³²ûöé‹¿°Ï¸®J–Í{ìžßßýÈËë'œzê>ÙwœŽÉÊûÿ¼ ¢áÅ;\ý‰©Sû9µFþé->}ÍÏç‹Ö³>uÌô Ü|ì‹§ ôòß?­–kCÄà¡k!¢n«cÔî[:ÛîÈžì¼ý’Ëg¸3/»ígžþþ§Nž0i÷}9þìKyË%SùZ gÒ‰gÌhδòñã'lQ}Ýs÷¹8ÿï•ÿÔâî¤õÿO/Ë.{è… Ý’¡€2~ñ’c[wúXœîˆ4oZPR­öß-Ø)é§J]‡]ýì}ñ®qÿSK·óª·ƒßv+m  !ëfß½€îwêáocÇù©û·ì¨rwûÄOï›}ÏÕ§Or·øn°ê®›^Òÿv5C#ýn³sÖÿßµ,ÿª¤¯®_[æþ3O½¹º¤‚¬—Þôm¾X «Ìë ·w¬l{_yiÅßßÑ8õ¿Ó‘6ÒÒí°dçTo‡¿í›¶ÿAÍÝÎ ¡âº!PµJòÇ"Ÿô‰+¿}|ëNëCWçßúÇÎwR<´³ÊG(ù'¬ÿ?÷mÿc™¤"þùKUñ²Œªsøæ>Ã@ŒRAÓ>Oí€G\¸ê…ê¿ ²;éHaév”T½ÕNùYs÷NZjc¶æbå¯>òá_t4Ÿôã›.:´~kk/»á«Ý2§s¸ãî6MØëˆÏ]~Á{Þl.E¼ä†‹¿{ÿ¢W;ÔM:ìãßþÚÄ»¾ñÎ_[2»}íÞ[>Ü‚«‹nüÖwn™ýji‹ßþ]]ùØm7ÞþðÓ Wâfò-c÷>áÂo|x€Ú³uÞ_k-gÞ|ÃQ¸ã®‡nÙ€Àu{uî×/oÅ@~óÔßwêÙg;5ƒaû—h:_YúUoy¹ÑWÎÈÓµ ÆÞÜ5ý³Yü/ó»Ù…€²¹Ü‡¦OüöA£&½ÖiS žYø•Å…ù}µ> (—ÏŸtÀîÿב¯G Ÿ™3ÿ3s7,·xÿQ{­>{ìX zíª5—Ïíº§³¼AÓÒpÚþ/™–«C ^xâ…?ß·$€üï¾4ótÀˆG÷ß/—ÿÌ'üEÛ›}¤·}¿EUßü«ÂÈJ×ö¥`Ù>÷ ÄÙoÝøòÂõwW5xéì1ûLúÞŒæI ÔÜGçžðÌPïpÓEÝiÆßzÒ„½©XõÊiw?ÝG€ÚÚZÏnSw/ÜJõŒ\ôòòÿ}¦ûáõaÀKgÞ¿ç„+mÛûµÁ½êÕ?Ùu_wXp]wl]æ¸C§}y~[;åu½¥ÞûÎ;ñ²ç€Ý?{ñ¬•÷?<÷å®’§iê!'|ê gÑî"ЃOýðk¯oÉ]ým7wfð/ŸþÀ¥ ¶˜'ÖÞùõÏÿpöºÀk™vÜEW]0+w^s·³C’e×}øôß®Hí}òyŸýı´ûo¾"ºÿÏóÍ”ÿž«ï»òÌæ KVÞvé—¿÷²¯ ise“W~|òÙ/~òÎßž:\m=øÀ§?ðÍ…›v€ªövoXrëù_þS/švñÍ—rêZG¥¨Þ¿^qÎE÷v§ö;ó«Ÿ=v¿6^íé|eÎ×þúùé×?xåþk!„ÆŽËg›Z›ëÝ¿è‰g;€ºã~ç¥) Ëó®=ç¿n^‘uÞ7Ïûà”`hÞm—ÿïíËÑž_øÝ/?9ixXÖ„Ko:ÿS×¾”ägœóÕs>°W“+£JaÙŸ¾þõ?mØÆ^Ùúú¿õBõàìoœöµ‡ÍÌÏ_þÅ£&yÕ KŸ¼ùû×wqÇmgµÅ[-Ë›©={áQç=ZËðÁ)«z®T×ä ö 7Ü{étÖuËé'ýdåæ¾xÙ1çÞW!TßÃ_:á’9 dZÇŒjhmk®óäúÿöâ Ð|úMw|yOFþ²|ëëì½Ihhý†R´á¾ ÎûM'L:ÿ†ï¿'HjTkƒÀ„Ënùﳯ™'w;õâ >|Ðh¯¶öù»®½ò¶aýû/¿éò£›éö.ÑT‡Š/•ˆ‹ýw®…Cvoh.½ ^RÀí-ïË ê¦刮coîº×Äæ#›ƒvÿðRï«xÛ”9§OÚcc'þ|Ë_?E'þ|VËŒ׫öëŽy×¢CŽ:böþ>S*Õ:{VuûšuÿÙÙÓtSg\†RPóç>;óÑA1jìÏ>0îýyR. ÜðØ¢¯UÍ{ìó̉mc0€(¬Ü÷úWo¡ËüäOŸ¾)ÜzmãaUßIézWü«%‹ZF·œØžããÚÐàÿ{±w¹ìÔç?:~ Pé\0þ–®~p>÷É÷\¿åV˜è¦[ûb°ÿêSÉÖªgijŸ>bn¹~Ò”oÝ×IžxvÁYÏ—Qëîó>1qo ¦gÉK{þ©[Oœrë‘m{sÙ¹¡÷ÿ\úêA‡¿4÷ºSÞdÃÃå7ò£?]Ð0ýýGì3¾9ëDÝóúóS] ÜŽ¯ÜrÍÇÆo<ÞÐnÜoÝÜýc$]¾à¬«ãs~ó“Mtìäæîz«Ñ>ùÌËÎ|üœ›WTÞyåçï¼2;å=Ç|Ú©GwŒ~CẃOÙŸ¾ðlí™»”gmN¡xù]·>úlïÚÙŸ~ïG7+ÉêGœ|Ö¬¦­ô¬HÐÔÞ^Ê2@^Sû˜1©MŽî»¾qé½Ý0æìŸýä¼=†[‰Q£'M]øÛ9‹Þð&õG~ûÖÍ× ‰ÎÛ>}Ê^œ}×’ ::|ˆ^ùÕ7o^aš>|Õ÷Ξî@ÛÑ_úîÚ¹þÅâ_þèÑc®ýP#ˆ—ýæ¢k_J`Âç~vÕ§'múŽÔæNÌlØVvkëÿÖ •ëþòˇ‹0å‹ϚՂ`TÛø¯ͽ¤²­·Ýæpn=ãúÙ?ŸAë~îç—oO?<Èù i8ê{ÿoS%MmñO?vÖÍë6ÀVwÊ›3N¾Þ˜|ÖåW|lã„™³>uÚÍŸùØ5‹Ÿ¿úŠ{ÿå)[F³=Íݽ¡§tþûOùé/>º)vns·“Ï  ô>_øím—ž¼Wfã—>vë÷Î;ñ=§|õÆÕëÞ+À)r€Ú3w/Ø| @ôê}÷ÀÒ»ëÞt@Ò5ûÑþÉ'ÌÚîë 㥷Ý0Ošþ©S§lù=•6ü¾iÍÛ:³ÉZyï8¨t÷F ¶ø÷mh<â˜Ýý×^tðuɼûU TçßrGÞÿì“&ì)#X¨XÑF¿ÖC%£Žû¿«Nm{»mJf÷YŒÏPÒzÜ׿vLÛ;ÛäOzßáMPX[Ûó²‘|®smÑm·¯ðf|ô-¾½¡Ô´œ6 yî–ÖŠ·½DÝ?$ç×mzã$ ÀÆzoñirêÚØáÀ+/u-ÚX.òÙç×v´NiÛoón&ÞÆ9úñCEPñ¢2ÀUDíÓ÷¿{‡Lq&žúßÇæÔü?üuܑ͉^½õ‚¯?ÝqŵŸ›žÆÛÓòìøæn‡…à`·.¹ñáG~wåù'4fãH¹êzìÚÿúø·ëßb<å:N:Ðy] …Kîž=@–ßóèÆr‹u{¤wò ³¶ûTd²öïOõ@ûA{æ_ÿ»|·s~øå7±ÙbݘǤTĆy‹JÐ0©a‹£ŠfZ² {;‡@²nîü2Œ¹Áîí1’…²|{–]wÙ Ïô$f㺧óþ89ˆ‚‰3÷mx§o„Ü”Jh³/ɶïÀuNÖÍW€Öéí¯?µÃFMÛ=€UO..ë·¹D-ÖTM:Ã½×æw%ÒñFð ï61Ÿ€Òà ÛžXµíQrU{¸SÀøvÿõ—eãöÑÙ ô÷>í„Ùtï ªï¸ttÚ”,€þ¾ùÑÆ²÷˜é÷ü¬SlÜZÞób±eÏ1ûn#+TôÄ:-Mßò+t–€(Wû5q&ûPºäžå”6舰F¾£&;ù“˜Ê`õÓË*f‡5wª÷ÑËÏ¿œwí…‡Õáílyvxs·Ã†ã¶xe~Ê‘g]räY_Z:û¶ë¯þÍS½½÷_q͇:¾uЦoc(³ÿÉ3¼'ÿ¶yD®öò]O ]ôÉEß¹qÍÊ{é<㜠L®⡞ɹýÓadß«½¹öÜ;t$ €W¾{ÌßýÇÅÒ$½¯ön›Û1SG²P6ú˜s¿éWæýòóÇÜÜñ±V7@PZñ\Óï{ßâ H£+€‘7^†€0áº"wBOèíWu;J—ÞZéÈÆM++³ñDr; ÁYÛ­ZóPµùã)Xµ¤kI]ûíõÛn[6Voß÷Íúã”7vm0amPÛžû¾@—ù±U÷ ¨¥+Ö\°b͹–ëO›öÿÛ»ò¸³ÿÿ¾ûíV·}§h%Êò$‰‘}Ë.dŸ ¾˜±/cÁ؃±Že[HÈ6b ¢„PT–öMûr«Û]Ÿç÷GIR·[¢Ìïy¿æÑsŸóœóYÏù|>çœéÆìzQC»tc›T,#ëÅÜeüýóÂtF3>âsÖæ;ש‚½hs÷ùœP)«„í|æ÷öûáFqqz–¤lbRê…œ‡wѺXôàbD¡3Óÿ·ÇNm Ãa-vüú2å굄‰ÃŸ]K±Þ¹.eL¾6CZ$%¡ýik®€ HÁà˜[˜TÝ‹Ça”B"¯/}V㣼&Ýgnë>5÷uH`ÀÙ¾!oÓnnœ%´>·´½æ×ë†Ô{½…™¹š\@ …¬Üb½3‡r‰¸šµ­0Œ-›Î2Pe¿ý35°‰U©¶Ê‹ü³¹¶–ƒu™X\¾&Òj͸\ ,#. ÑŽ;™"°Gh5¥V)?SÈ+©Ê£S*e¥ Êf4&ªÖ‚tÕz^…B L÷Cc7oæÉÍò—å쑎qRøEˆsëÄ‹©Å`pxÍt5ª·¸,‡‹v™Y¾á‰Ûe%æ¿yŠÓjºS×ú8Ñ€*ó |-³^Ì]QŒÆÌ¿vxóãˆU¡i'ÖžtlJyqAØ;µÅJõciä–Qÿ»”U•«æ4ÕÀä >ôÓZNC¿%Î{(è5Ж–©Çp'i×.…Þ¾’l;ؽN¥~C;CÈ|•ù©ûËR/ÈÏ©¶)Ž®…™›’_?Y^u>Zá×úö]½î<ë;»5 Ⱦyñ•ä+^ÕnìŸ>¹2´1€ÂŒÂJ¼S¤`[ë×zñÅ×Ôlm¬e íö&ÚÎÆÚÎÆÚ-µ ·µ™°ôŸ­tU&)*ÊÀÒtÖd’’Â`»~-çž,~k-ÈÉ+Eðä%âà;~ŽSp>ªõ@ºbq6¶ U…>CCV .€à'éo²ÓŽŠtf:ðkðç,®2²¥25,¤±‰Éì>®QS[vdEoeÔ1æ$åàZXW·l®¥¹³œòó-Ó¾‹g82€ØëÏ'+ÔÜÕ—HqÂãä*æu”$=&€¥‹U¥M}š­‡yè%w~Ý|_Øg€5Óø›í˜@ƹMûb¬ëèƒÀ1si§ ëv`ì'dޱ“­@fð½ÔêØÂ³t±aˆ¿‘W/g—¨óQYÂ…7ÓßKƒo3hLk%ù’z;@…Áâ”ÖÇ~¹Y:c¯Ïu—¹¡ -<©äC+þìe1[GaÖ ™^ gkj”çŠ%Å%ùL^s¾:­)£cD2€ifÚŽPYÚbmûÀÒècÅŸRüá®}2)¹@Àؤƒ0ÌÒ}xb%ÕðTý4Ò½ŒÉ¦™iÛ"lWÂÜÀÛ„u÷S2,¬h1j¤ž›1@jlV\uæVYt0èmRÆŒ›Ì1 Ì®•‘$¿QLÇîöšõcî8.ÇÊké$K@ñû¯™dš»úsBPä<¸ü0«2³¤qç÷IÁ!ÆõoZyê#h9¤»€¢bÓ~ý›•®™î^.l@–]â ž*+g#ó3ŠÊI¢á8ÎÛ@úñUûBs?É_k:ì© éÈ&¿Øª+‰˜zn#;ñeØžÃEµåKUýW㣤(òÒÁ«IòŠúYÀÌÉ”[M³µKËÔ öÖ“Ü/%ojŒ½>!h5f¤ ñ Ê|?Dª0Ü÷\ÀsŸØ§Ž5ï¤,V }]nù©39ù2Нa¤ÆbFY˜±þ…àŽíbjÁ“'°æ(¾–Zž òŠòÊ3%K––SÝÁÕÊÇ&\.¤*L óv>)Xý:™7UO[ IDATg›Ç7c(:—$SÖƒU?tdqÖÖH)Àínbþ¡—˜Xúè(>-ëæbd̨™zn„©!€Ü˜ ÅÕ„M†¿9VQ9Hyª€†›³šfk#D¹!ǯçÂÞ>ÕÃ:š;~‹IK‡’à-Ûï•ù“Ïnî>i­Æor ÷=r@×öv¦ÚÌ’ô—÷/ý¹?àe>è—_›},B-÷4¸à—c5¨¯%·|ˆ®^®Ü‡Ár§!î†ê¬ƒ8FN­„H½Ú±tcAoKF¡Âfèxë ‡OÞwüûÿvèãÑÖÚH“ …8/5!«é·+&Ú«®eh³~üh~@zØVïQÁÃúw°6à*Šr3SÓuÆ­ží¬0õ»-^ØeÜÚ{égfŽŒ<¤‹>«$;1òá× (–(«wãU÷¿æ%1û®áÌÓÃÑŒ/I óß¶/ÂÓ‡4ãTÛ¬I- ª–óN!A%Á+ÆøÜt·Ñ&Åù 1ñ‰" ‰DAÕÿ¹…ê¼n+’Rñ¡–rm'nœ÷ì»ßBVÏݬX<ÖÝ’_œôÈçFÿlõûyEŸ:îý¦äÒ$)ÃDç]àˆ"Ó ä-^•ÍÉÄwâr8BŽ&‹ÌÎÎÞûï«Ë2†“[ûÖ®ÐÇžëû\x1dh²IK–üerVˆ„ÁHRYnù¹Úz®xY"Zäÿ"§¥&SJµjk=ÄØæL¯ÜÎ7³|NGÊû6ï§ÏåfÿñÏ‹ýE0oå|À‘_¦•|½ïlX×ß(ÿ¾p×=Ú¸5*,)y™YôªU"§T0ºjIûªÖŽtÇdƱxBòr³vßx~N [¢Ýv›’N,­‰íµ×þS¾É÷V•søUSϪš69CÂKn߸Û:Ör\sm)*–Äå–¸¶ÝÔ„òŸü"x=š7×ÈÅ·ŸDý”½öß0«mVÕÚº(ùUl*ÛP“KŠCÏn]w«M½Ö.褣ânÝÌC›˜ñcÀÅ·Döh¿„Ðb|vs÷)`­ZµJ•ÒkéÒÂD›¼˜‡7/ž>uòÔ)_¿K÷ã–ÝÆÎ[¿jR{Ý*;ÅÖÕK¿P2bÉä÷?`ð 4_ž½)œ¼Ø»E…ô#™}û×y‹×ïò—ȹáâ‰::ÛjñLZ7+ ø"-)úiÔ[¥¡s×oœLxl½V}†¸™(ós2ãž?}ö0ôÁ³7™R¡}·‘#Üšðrnmš½pOH®Gþø IØ¡³ëÅžys6‹)!¬÷…®ß8YVß üÆL™›™÷ôñÃÐàû^f(u<¼¼;[j•–ÿ°„=ú:ñóÓRc#?|òøMfËÓ¼ø·oÄÊrBN»ÇîæÙ®ê>§ªþó5|”Áb+EùiÏnžùëÈÑcÇ}/†f™vŸ°tÃÜîeަÊf+o\SfU¤Càå›÷"e­¿q.ï(C£™kkFÂó褌´„Ä\è5ï0pê-tìN±08Î}<ÛéŠ_ß>spß#'Î<Ì2p6cͺÿ¹é³Ô R•¦±0÷R ÙÞÖЮÔ’’;oòĦÆ=ôÙU£)ìlÈÓaJnD$l ‰ÿýIòÑW"©¡é¼>Äá:¹•N~þ좈·ùQNǶ­|»k=ÌH•üœü¦IÓz,°4:Èï'äÇç½-Qhé ²ÕmÊaš41ŸhÉ-ÊÈØ}ïõšàø]á9©ÚFS=Úžô0zŸzf°[4Óeæä‡åJâsŠ2ÀkiÕä§^Í ß$ß“Ïc÷gio©]u«jæÖDÕO%% ÍH…2ímÆŽ»¯×†Äýž•$0šÞ³ýqw}ƒ*ºÊÐ2®?Ì”µn½¥¥÷£à[UÔc;Ø5¤CeJ^$ç܌Ϻ– J!¹míšÍ²ÕÔa &‡’gççž ‹Ý·õQòÍ"¡nNgzš–6Tu³UtŽ*y}ñøíL”dEß9µÿ¡£ÇO§ë¹˜½á—oKïb«Þ2Y*Í@‘pzéŒåG¢K( /40DÔ¢g' •´eÕÑy @}ùÜ?qB7–ºZŸ×Ü}Êü´†³ãhРAã ‚,Jqß ݃³Ý¿ÓVkANI2‡ïyaåí±ÝœÕ膓syrßU/à¸òê‘!ÆLš¿uÉ Ñ AƒÆ—·ÞÅêfA¨¬øÄ ͦÓLXx8Š=ݧ 4¾¨k³Iññ<ûMìYz4´ ¢ 4þ#î)!ìáð¨Ò’BòõÓç«D&«5Ø4a¾NÐŒ£AƒÆ×¶"¥×ÿ\S¬ÏËJÙð´d°W»üÆ> z)TèÂ4h4ÏCyÞÉN.˱̖h7S¿RÀ†J‹ kw!#à för^ßRÐ(},æèÜ9ûf”Ì adÛiòæu#-94§i'Dƒ 4èœ 4hР 4hР 4hРA;!4hРA;!4hРAƒvB4hРAƒvB4hРAƒí„hРAƒí„h€=Þ5gÂÐî.....ý6GJi’|A(³þýõûqÃz»¹¸¸¸¸LðÏ$¿êáâØk;æàáÞ©K/¯¥W3”4YhÐNˆ†j0„.?ì8vrS/:K¹r7×SF‰é£*ê,£î‹vŸ8w|Q‹ÿÀ`d §f_y…7rßG¶N0͈I/QÓyT–«êÉBK`&Eq÷Îì\9sÜà^]ÞÁ­kŸÁ£'ÏZºnç_¯ i7_WИ~a3wå|D‰§þ'LptäÑ¡Qî Bvî —›OòñlcÍ¥ÆÌ™“ÉeÖ·\ÑX[¶ˆ_Ÿß¸lóÕϦÇ豋綱37ÐâÈ‹²Sc#ß ¼|õ|ÈèØÍÁKS‹^ }±yQÆÅ©}fÕþMžíÐqL…æ]ƶ¦õÿ ¿‘£äõõ°@ÛLÈÀºÎ^6PÝ›BÕ—«†“ÀÏĸÏ*Tñ‹?þ7aýÕÒ~âžK'7ÏöîÓ¡es c#c‹æŽ®½FÎ\sàÒÕÕ.J¡¤h=¢WB_p9áèÓ|³‰uy—o?á÷Ëê_Y“ÏcÒÄÿzkñÛ”b>ç³ÊUCIàgbÜ甪èéÎÅ¢•àwúé·ï]õ«œ°ô]ÍYÙ$­GôJèË¡8üøÙ¤jž1Œ/ÝEÞ‹ ›§ òÚð¸ˆúÿMü¯”¬Hþuö\- üLŒûœò |{uû¹Lf£¾ïSýÝáóncGu0dÑzTÿ+!Iô WžM( çµ7õÓc€=ܹdËõð¸,) ?üpÀ2gnæå™CW=–@‹éK;Ç] |•,R€gìØeˆÏ“º5å×d˜IqìÃûO\{•!xFû›9µ¿­€Iä®{–UZ%¤eæÐ~ØÏ&ÛóY̱kÎ…½L) !´ë;û×UC›pT5eÆÕ•3Ö&IØ-86‚qÕ÷´ÿÕûÙßlY­½ùñç¹¥óž‘Û˜_6L´å’¡ÛÜr=">G¶që>ƒÅw.ß~#:¯› Ø/¾tl”Ù;zòPðúÖU¿ w½Î‘3õ[ö›ºüÇ-4™™{Ëâ_ÿOQïÞb‘y·_|·læžù½}Ì¥sAbs•lC§3W.d­ñž|”4õÞ‰?Ž]¹‘˜¯¸BsKYì›l†ýä \´5Lµãþ=yäL`Èóø<Àâ õŒÌ¬œ†ü¸b”õ»h¶,ó¡ßÁ¿.Þy›# 0utíå5yò@G!* ¸so;¿yU?Úí¿¼ _5—ÕéäH“×GV¯;v« âWÖÿ’ç;g.¨‘›NÖm]ÔE_õ<¬– ª€Õ»Œ~ûåÀ‡±¹ðbU_×U è³÷ïõ€ô͉u¿ýñ&.%O €©Ý¤MwïYsF9ë0ªåªšÁ4ˆRÅ/T0®F© diAGvº™QWÛÐ̲E×i+g¹ó¢ªk–YÍ+õk3éV¤Ý:&=ûX«J÷𬳮´ÎøOèÑ›©„<鯑A>×rÉ÷?ù¹;A½×…—PEQ¤øÍ‘QADßÉK6ìÜÿçÑ#{7ÌêJÑù'ã¤*¿¡…íåB=gºó2%=ùÅÕM£;„ëÄ?ßHʾ±©AD÷å É_.[Ó‹~ V¦fSUºø‚ ºO_6½A¸÷îß“ z®y&¦¨â°Õ= ‚ úl(Wy3/Mqëº(¨€Te$ÅÜYߟ ¿¨ÄÄÄĤôBEQÅ¡?~C„Kßá^Þ>3ç/]¹òÇÃ:A½W?zßmeΕo ‚ ÆœNS”ýI‘y}V'‚ ˆîƒ†yû|¿`éÊ‹¦xv ‚ <·¾xß—’×G}:D§‰;þy],.H ;¹¨wYŪI)2n¬D„ÇÔ—ÃbÓÒ“ß„ßØ;¯ñÍ¢ÐbªŒ¯ŽNíLÇl¸û6#-6,`«O‚ ú,¹úV^3U=R‡55t²zâSÊt¿qAãÏe(Ë%£FnRjB]©IÀ@JóRcCw#‚}äY|bbbbbJޤ´‹¢[SÝ<¾ßw=,&-W”{sã0‚ \¿;›Z¡É*äªJ²4„Vϸš¥B™óÏÒÑ}Öá{oÒÒSÞ<ûçÏù½;Œ<’(¯¶Y¯Ôdnà´AæÖêůWõâ„(eö¥IAÞ'SßEstb‚ \'û¥ªaÉ‹­žAôßô¬øÝŸd± #¢ÓŒ+™¥º#‹;äEÑé‡ë9ÊŠì.¸·ØÃuÒÙ2mR§©wÞ¢“Ïþ ¸9E)RNù ù噘¢(IäÖþA=W‡W´Z|:öXù°¨ôŸÒ7»ÑaæÝŠ¢YÖlßM/ÞË‚,ñÄ‚ <–?*Ve,ªx—,~±c0AD¿ò?*ßžÿ®AÞǒʙ̻5× ˆî+«4£©~SÝ‚¶;ª¢­'Ýû’¾Ú;œ ˆÎ³®e¾§1YøxC?‚ :M¿˜®¨‘€Õ?R‡5ju²JâWimÕã¦ZPAê°Zô&‚˜t)[ùáÑ9c¶E–Z™ué[‚ ˆqgÒ”õㄾ€Vɸš¥Bžt|4Ac+ UñöÜÿ|Ž–y”*š­éu!}½{A1`[”´6ï}ÅzÔ0øŒ9!ž×œº€2üôÍTEu¿Gž¾œuól!xduë Èž\yQdæX œÐŽ ÈBNü“®¬*ypêžÂu\Òx­ZM½ƒ°EçÍ…l€e>hùbO .ží¡–òoùE¼/!3ƒü£tzzµÖ¬-8æ]z6P”–)©eº†!°íåa /%¯”vTaT` µoo\Deh;¸Z½xš®"¡ }uòÐ9m|¼4*cMÝz9›ò ~qòL" ÑÉ»‹Ñ{©`h9iÈ»š"¯‰€Õ>R‡5êt²VòWÏܬ‰Aµ% úÐîºýä¼÷ÅÔL }].Qz¡âó©oýJ`]u_ž›€"ß_Í2ôëV/‹j ux¥ê iqvqi%‡ŽF-„ï?§G_ya‚À®›#@BÈëê2–òô'/D m +Ô±…f:™Iù¥.‡iÔmB7€ÈS—eïìIÖ]ß0^·ñô˜µhêcmÓ´qoWšVäZõa (È¬ÇÊô»ç_ööCÕUdŽu(àdðµx”r’* òfå“4tùÌŠ¿Òá@IžŠm²” û™švl¥÷!ùöS¶Ìo£@–úà‰€y›¦ü=©‰s Mñ÷"«ÙW€Õ¡Èz“ ºMu«å·";&´Œµ+©[ÇB(FvBŽúuÚ§kÔédm…ûKró3P–ñÐÿȉ+Á‘qéÒ†Ûò Xg©à4ñœâqtÅ¢'|ïù—•û a#¼c£¢ÊìÚ¿R%X= $…²1ˆAƒéÑ×í„ìÒ=R±¬:6ÊÄ2p˜ÿ×&F兄иÜãk´9²ÙÙ½ ÙWO=áä.”'\ñ‹k:b½=¯ÖM©R4£®Þ®Üˆ²Ð³!¹Ý’i·.&XÛhÕà›¡Zn–ˆHÊ|ö"GéX¶‰‘*zó(€¡kÓê‡GÊÄr`ó9ÕNéHyIéo¸¬Ê¤có8 ËëlÿÔ`TNÖÚl~An~.*Ó/ÿ8zUP±¹çŠß¶öme¬ÁDqð¼Þ³ƒd_‘Ö]÷YÆ}×çÚ¶ûäÝIbðÙíÁg·›÷Z²õ§v‚j䤯TiLH§›-ƒº§!ü'õèkÇ*#_«ÚMl\<s‹Ê07Ö®à$9ÍŽoÊo¿ŸCJÞœÈp;À’S‡¦TDÏÍ»‹P†ùÝÏ&å)7/¥ØŽèeÑ&ÜfæwÓ¢÷m9-RdIÒÛ‚¤Ðî>÷Û–H‹¤ÕNé˜\M.(d•§Ú”\"®&·ÎR­kÔédíÅûËqó3PöúÔÞ bhõYºh¨“±óë”ÀOÑ}¯I÷™Ûüþ <¹m¡w'3i77ÎÚþ´¸zS^‡Wª`¨°U׿ {š,m`1h`=úš4')×ÂZ­2€‹ÜøœÓšLã ùÃ7bž ,r߭²Ú4¥j¾'l?º‡@EøÝNмœÑÒ«›i£ØˆÆ4êýËÁ9ÎÑÝÍzttuuýføŠ í^ÿûíäº~ƪzÈ1´3€ÌW™ÕR†mhc……•rgŠ‚Ô ck}N{®kÔédcææç! Y‘ÀÜÙJã+–ÀOÖ}€£oßÕ{áγ¾³[³€ì›_I>Ã+•¢¹#2o^y£êMeªï4ŸÃqòÿ°5¤b°8,PH겄”$¿QLÇîöÕÕ"qŒl2ƒï¥ÖH3†ŽË„þ^\»ùtß±âæ³Z5¥ ‚V#ûˆö=|8 ×iTÃFr®„<ýîÑS1-_¾sëúõ[ABOlœúY ¡Ž™K;=Y·c«S&®¹¡ -<©¤ÒGŸ½,`ëá(¬ë NÖ¨ÓÉÆÌÍÏD@RIŸ¿á3K`¥B–páÀÍ ± ¾Í 1­”äW—¨Qã¥()êeš¸¦•ËÌsþX ™~Û/¥T_‰(I~_\fJÿ³zÔpNˆ¥ejÈ{ëInmwdnÈñë9€°·‡QµßÑtÙS@Ò‘M~±52 G{Y(ˆJö÷n£õ M©ßnè &’®Þ, F»U*2)+y#ó3оäz—*|øë”ågêµÐÍKÉÈ/‹ s3ÒÒÒÞfæ‹kºtFÃqœ· €ôã«ö…æ*«1ÖcFZ’ß  —ÏP…á¾ç’žûÄ>ŸÅR‡5êt².ĝޛ²¸Ssú»ºtõ^+KY ú,djšYhH ~‘ÿ®Q…(ûsÖf ¬’qjH)мtðjR‹«,Ê,`ædÊ­ºÙš^!so-2|âøÁ^ª¶ ÓŒmó;h@ñlÓœmw3«¤:Uô:8ªœ²ÐV>j¢‡–óN!A%Á+ÆøÜt·Ñ&Åù 1ñ‰" ‰DAÜzQò«ØT¶¡&—%†žÝºîVšz­]ÐI§z×ÏÐ"fý4øÑü€ô°­Þ£‚‡õï`mÀUåf¦&¦ëŒ[=ÛùÃ(ÇjÐxçë#Èæ^ÃíxŸÔ”ªYm3ÏᶇvÆ@»ë¨ö•ç-#§VB$ˆ^íXº± ·%£Pa3t¼‡öçæƒ­Á¡¤žYæs¦òCmûßÿüãpÍjœ=×zÂÆÅá“7Çÿ~À¿úx´µ6ÒdC!ÎKMÈjú튉öuÈ‹+Ú}tY2ëÎo‹~ñ}‘–ŽU[a£œ“Nþ»¬Yí&Îݧ­]6@7jÏâµÇå(p{ýoA×W{þ¸—).5/FÍ[vùna¯W[7ø‡¥Wx·ŸàÑö%[®„%P4ÌÌÝôc#†4éúÖ…?ùÇU=wÑé¿óüwK}e^DÀŸGÏÿûøåÛb_¿Y —®ž£Æ jkðNúd™ÎU¯H^3ã÷§¼®KöoÞT½ü U’úèÚ…+·BžFÇ¥”&²zÆæ–ÍíZ¶nCtîÑűB®ç«Ó£wìhÔ˹ ïŽí™p!CIý'@Ü]ÐÝsËû³RÙ÷~õîõíј:¥”d†ôq#¢óÂÅZp“,z¶Õ“ ¼O$+h"ÑHã¿pl¢Dñ߸M€Ì{ä÷Ôpè0»Æsñœ<éô‚y¾9Ýçx}¸Ó’É3j?r¼ÊA£VÜTˆå<çáÝÌX4•h ¤Ñ( j½¬úoPE‘ríH„ÕhO+Nã±É×ý^l‡olwWô¶€Œ[XhЭ.7É¢¸[»ç/½×jÑê!æ´¢%Æà }½P¦ýaQ`i ,éüúýÿëcÚˆl“+àŠìäåGI»àƒÅ‚ÑÂ{„-}ɽºÜ,º·bÎÁ·]ÖŸØ<´)M5Zi4ê©‚úú—Bd^Èî-óÚrb¯ýéŸê±e}íÆtìˬ÷·=­¸µcþZÌîno&ä(Šr’_†ú9QØlØÆ­cÑ­ÑsS«Ë¶‹]™ôŒ–@ š«ãj‚,æèÜ9ûf”UŒhÙvš¼yÝH˯Kɬë Ç-¿› pM]¼æ®ü¡—Eã›Ó)rž^8r<àΓWi¥Û±™ãæŽ.FŒê߯Öÿ¯‹›_#h ¤Ñ 4hРQGÐñ4hРA;!4hРA;!4hРAƒvB4hРAƒvB4hРAƒí„hРAƒí„hРAƒzCOLPfÞÚ±áxpxD‚°™wáĸ&lšœå ³ïn_sèþ«7‰92Àñ§kGýÿsøòÿ}¶(:/,Oð~ðéþû'ÒF¥¼Œžñ +8µ(hÝ«û“Žz{$ Zmÿß:!–qù¿uÿöÚw}VFÔh=Ä‘üðý¡»i»v|ç(øâ§áÈbþœ;ÿèã´ —ò ì=×XêRví¸äù¶)sN½•mÜÕ´ì³|ÿÚO¹~ŠiØuþήsÞž™0hóëÿ¼PdN¾èašèež8©H^zA2“Í6ÒÒ°1ºZè¶ÐdV`/§{÷;gÚzVöéßf4iáx©…ìÄ‘ãSëÒõø'¡Ž×rl+x/¹(}}PÊ”‚ç9€¡­­ÕÑÚ|†[³¡†ìj¹O>Üð¿—_ç¾W¯‰Óˆ ;§·¬p¾´<Ùù‚ý!q9¥÷<3…Öý–ïYÕ³.‹ohüjð_UÛÿ¨ª•ˆ»r>¢ˆÄSÿ¿&8:~ñ븶>{|¨¼À)½—…M§>=íÛìøNó±^2ií§Å7zÙ èé0K  PIDATO³ÄÌl¿ȩ̀Ŭffºí›™ÔæêpYlŠ,,‘&æŠÂRҶǤ™[˜û´Ò·l|‹bYnâÄë9’ÿ(Í}»õY¦™}ó_ºw2à²eâQ±K½ºž{^Õ 7†¶ë²“ËÄOÖv!0Ÿ|Êo¦ÝGqšß|f¸"騄á»uVìñ4n(©jh¤AC•ëÿáÙ×ÉThÞeü`ë†S‹U:Z“Yy.(O»úÓ”µa­Þ<ò«õ@dÆÅ©}f}†™´R³âq¶ÈÈbEïKÛ™õm"´Óákpô<+aW»&óºµ\ÖZG–šº.øm´¬‘‘FY´óbÔ½ªîå, m9»¥AcívMLfôéx££ >ùw‰êF­½‡7€´ g¢ÄÕÚÿ‹gßhõ˜øMC†u‡Ò ÑpN|û ¿_¾°}¬-¿Ò@‘¸ú»Ÿ‚mçüu”5¿>c_4î!‹¿pôi>YÿHQrýQÌÑ·Ì>®ö õšrÕ9y+Ë&K:èëeï{žŸßˆŽ$TF„<ýI¤ß[«Šgš††ö¬ÛXmœ-lH3½UÔ°Ân>xl+Ƚ~âqA•?÷½˜fä9¶½°Ac`[èpåÑ 5b(3o­ûnÙ¦snc§QßâÇb±+‰ŠÃŸMú ‹ YðÓ¸ó¹Ü¾®Í‡©H“¼S_m#³É–lIFz`A%wÈh(Õ.N{_1uH‹ž`­fmãÇ[˜2*­4 @™"RÔàÒY¦½&váß=~7ëãSùOÜÈ·9Ò¶ý_¾ ÚÒ¨9'DÉÒ‚Žì:t1(2£ ®¶¡™e‹®ÓVÎê¬_µ=’§ø/ÿ~Ë­T aæW=û3“gQvŽ–ƒõYŒŠÖ57ïz\nDž$[Nà›5ßÔN‹Lëæ†fIéS$Cuò$ ÈÃ_&ìy– JW2ŒÍÌÐzº)› *5"ÔæRŽtÖô7Šzþ60­8—„†¶Žg[Û LmU”Á)Å\ýR,4uô¾íç²Ó–[ùkÎÇÊ]:­mÊÙ[åòí#®S$YzGµ&ŸU…˜z&ôÕ½¯|râï”þ?L†‘™wŽß—µ]æùî^R{ãðþ×Deˆž‘CÇ~ãfNío+`ÊŒ«+g¬L’°[pl;âªïiÿ«÷³¿Ù²Z{ÿòãÏK/éÏÈÎmÌ/&ÚrÉ‚Ðí?n¹Ÿ#ÛØiز­‹º|$ dîý-•Pùö¬ÑkŠÀrúÉ}]/ûÞ}“«äµê5qáÂÑŽˆ¿åâ•wCŸ§QãvÃ箚ӧôF 5õWňvû/oï j(BjRcûµP[ ³"sÿ]5aþt¼6ú^ºrñÔÞ•£›½½?¦¨Ú"§ÉÀYߺ5‰ù§o^ÜY¥ßyñA¿ÕN?jÈÎài÷²7´gS™oS¿÷º#À°pv˙ڢ5ì}^¬×ÔbQ÷+ c‹â¿ °6§ã£Ut•%˜ÔæŸÝ­W×ÌÜwUð@€âþ§;™Ö'ºêªC¡¬¸(]ôÕ˜ œ¼‡[@Ì™ó1¦ÂÉŸ|Æë2Á£´ ,|²ËÇ{ù‘'z#Öž¸xåÂÑÝ•÷Žý4nÆÑ)–É€õþ{z dÜÝ=kÈ€i¿^~ž–-‘‘­gÿyaß`]0ðÚ{þÔÖ‰¶\LŽó÷>¾¬G³ÇßÃ?v©jŠÂ¬BYfC÷ÜÜÚC éð4ï9{nÆ)ŒÚX !ËŠ¼ºuêxoÏ~Öø>Êâ5mÝÆJyæÓÓËù&Êk£¿*Fªf‚Ô¨5©IÍí«­¶4l%¤H½öG`~\:©³€‰EóEù–UŸú&óC¶Íú-{Äïû½mTfW<= °›ôËÚ1eÛG&ùŒükÚ˜‘·¯½äñÇsVí; ¶ÊÝþ¿Ùg’`Þ¹½U=Ô¬J_^²ë™ Ö3öný®üŽc ¾H¯`àÞ]ºó‰„ï¾vÇâ~¥©jãAóvšËFþÏïÆ/«;µß5ؤö£fi7m*Òá`h7µ´ÔªÍçª÷AéÉÏ•Ì6fïWŠð±Št¦v³i#`2@¥¾.¼Ÿ¦:"uL¶9(‘’0~ß:oÄà.åû„d¹ñ÷F=.~{0ݱ»€†€§ ‡´›£[Ê›¥D\·Ñ£§F˜Þn¯Q¥°’â¬y§¢ß¶wý×U[ð᣼„—ãž°—}këÂÔ¶Pé)9)ô-F¨³ØæÚ Ûòð¯ÑHð˜²ê]Ñ? }ã&V¿ï’¥£‘Døé¯XÊxÔÖ “ÛXôŸ·>åÁ¨ý‘lûÇs×€²Â…ÒßJ­Æí¹õ{s!#Õwê÷1 hå5À8àdfÎÍ Q³Û—”Ìö.ðø½]m“NL ¡€ @§ïo—n§Q:vqäÞ “'*ò›ýtýŒ»vÙ'òîþ4bþߢ×WîeŒµ*ݨžþªQÍQ­5©‰:WSmi4ÜJHž›€"ß_|Ç2ôëV/‹*Í%ys|áò—µ»f´Ñ®S°Œgã5g .  ?}3UQë¨í ïš1ëd<¤Z¼énî§.¬ŠÃù%Lbò0kwxŠ_œ<“htòîR¡XС值¤x"`ަ釉jމs Mñ÷" ÉzµšŸ«n,. /GOç}žƒ”Üx)b™»”p(å1Åx‹Š%sY¨Øl•eL-(”ªoöe;;èpdg…Kªp“~W®e:\ïgb¨œË¼~FÇá„«Vmj¨„çQs`Þ¦íïv\u¥˜©ßyBoÒûÇÿÍ$ËINüSä8fpóRã&OòBÀжâ%Øl¡™EfR¾²¡iãÞÎ\«þ#ìù=y·«Z™~÷ü+Ã~Ã4êËð´x PTœ0¸š\¤‚¤j¥¿*F¤AT+‚ê§ê´¯¦ÚÒhH'Änâ9ÅC ?ùã{Ï^³7¿+ªÒj‘…ûç,½ž]•VòIŹ\CK]dvJ²¨ÕBH»÷†S;&mÇoXî® $dÝ’ãïãÄupB™o20 ­tU-VÙ1Ù e¬]ÉŸ°u,t ;!GQo£VósÕ ¤°(01ä—qi~Np ËÉBƒ÷>8'‰—€¡)0¬øRž$[K WŠÍÓÔ4@Ic+É%¹ûht¤T”]/ÿ(¤öò…Ïk­_7³¯ã.|ãu5›Ý¼Õ~ÆFµ‰ni¶;Äqêj¢€2ãöñÊmB/Ó²ïËó’ò z½§«K9ÜF슅T¡†Ò°-zrf%Á~JKñoo_ˆ54ئñlþù@UEYÔ ˆjEPýTöÕT[Ÿª‰Ï2î»þ8ïжÝ'ï&HƒÏn>»Ý¼×’­?ø þ‚Œ¿^˜ÎhÆG|nÀºCÃ|ç:Õ9ÍÂ`ó9 ËH€«fj…¦}Gº³°Í®Z÷Ø{þÕ\2j×¢=mŽÍm§U—V)yQ±[ƒ§Òæ‘ò9°¹•k²läb9¥>Ùkr¶j~®ê1ˆä˜ÆZåÉòmzQCÃIë½o‘gúzüŠqEqa¬œag!Ô‡€2X¬ÒЬB%UqGrdøÐ´à º8yÚ=Ëç½tËw)DiÓ®f}ºNÕ«µÄÙ ^'^¿iæxd3ÇÚÚ$®Ípo‡¿¶½B¼ß¹×cÚ¿½zò¹v¯=îïc2± æÿµÉC§2;8BcuNÁcuõvåF<…ž Éí>ÀL»u1ÁrØF«FJúPUMÜÔ ˆjEPý´æö©"õÔ–FC®„0xMºÏÜæ÷oàÉm ½;™H»¹qÖö§Å¯¢ñ{ý¾¤#H;±ödlÝwÌSeÂÃ×â1Õï@‡¯ÿÍâc- ùÄ›ë˜b°x…D®²WÌÒ¨²Ê‰J.‘W“˨·Q«ÿ¹ªøP"ULa¹9¦”‰¹rðøï–*•ƒa¦Ë©`´©·iùÙ>FìzÙD)R`ê|Ø^A&káX÷¿ûj ¡¿e–sŽ|ñæ’DxùgÝFùnÿ˜ /nþË]wEópÜë ¼.ÎŒv42Ò¦ÍãQÍër**ۼ™WN>Ízuî\‚éà1m+$j¸¥åê ž¹Ee˜k«çõ˜znÞ]€2Ìï~6)O¹y)ÅvD/‹Æ4¯¬¿Õûmµ¢ZT=­¹}uÕ–F;¡2pôí»z/ÜyÖwvk}óâ«"ô–S~žà eÚwñ G{`ýùäºæX¤9Iù¸Özlõ;ð 37­ö3éÀ{µ®É!Ž®…™›’¯*Á6´1€ÂŒÂJ?S¤`[ësêmÔµþ܇քÀ`—§­Iyª`³ùïE†ÊÌ•œfšþ$]JR:8˜8ÔÓ×’bq6¶ Õ‡‹@».mš²-[µþÅ @áš«I1ïœ ©¬Ù¦ˆeä»ü•—ò¦ïÑ—m\ibWWƒÎÔï2±—@á­½{v]ʶ÷f[aR–À@n|ŽüS¤UØ~t€Šð»x9£¥W7ÓF5¯F?V™ZDµ"TõTöÕT[ é„d ÜLÏßfИÖJò%,8.ÇÊké$K@ñû¯™uZTH‚ß(¦cw{Mµ: %E½LJ¢ˆo?aÓRWªL1XœÒ ºªéÏÒņ þNDžªš0s7B@Zx҇ǒÉÓŸ½,`ëá(d¨OöšbDê}®º1q™YXž¬ H‘ ¨òÜ4%‡å‘Ǭ|9EÉâÒâ ,¾mÊ­'˨|+’L3Ó¶¼ÊÝc`iÎèoc(R_Ί”(€ÙÚ£;µÜ³òK»m2€Ö½ºË–{RÓm[°Pé1Qßœˆtu¿ÛËÈôS’X ­¶c™G]z,m?¾ïW›pŒl2ƒï¥~Š‚ ÕÈþÆ¢}ÈuÕŰQ¥Ô?Ô_UnE ‚¨VÕOÕi_Mµ­/;C£.NˆE^:x5©•E™…ÌœL«Có[LZ:ÌoÙ~/¯Ö#sCŽ_Ï„½}<Œ˜5w€Ì½µlÈð‰ã{m|\ø KjN“¡kÖô £v-Úó¬BÔ‹¥ejÈ{ëIõ±:¦žÛÈN|@¶çðc5‚VcFZ’ß  >š* ÷=—ðÜ'ö±`×…ì 2?ãý–Võ>WÍt…@™V¤,“@\”(£@)ÓÒ3v'…H&KƒY®“>xï§0šÓVW¿žŽè!‹³¶FJÎhwójÚ˜ÙìkÇ”F_×JÈøÈðŽgR›ts=ä¤!˲>ü/[BÖJ‚yö#¼íZÝ'VvšN#{êH:²É/Vò ²Ê·:¨ €¤«7 ˆÑnzÉUÖ_UPƒ ª¡5Q£}uÕ¶Þì ªC65¤Y5ц61ãÇ‹o‰7ìÑ~ QS¦¿(ùUl*ÛP“KŠCÏn]w«M½Ö.èô.—¨²Òäûa…ú0C2AK)äʲÿù¸‰iØ}馑ÏgœMGò‰Å›Úø©›Q)m´œ‡tÒ * ^1Æç¦»6)ÎÏHˆ‰OPH$ `€©ßmñÂ.ãÖÞK?3sdÌà!]ìôY%Ù‰‘ï¼E±D 0®íÄóž}÷[Èê¹›‹Çº[ò‹“ùïÜèŸ £~?¯(¿Á¨¶d8FN­„H½Ú±tcAoKF¡Âfèx5?W¥‘ÐÑ68-C\l©£ÍX\[M<ÿy;Ú—R–05=Ûš³ž%‡+ }Ÿ&[1å‰y2ý¦?¶Öþ4³(É(Œcñ„,äåfí¾ñüœ¶D»í6ÜêåˆÓÍ£õð¨0IÚô;–]ûè¨ù)EÑÞ[©I’nÞ·¼YÕLZ'L¶²R8ì&ý&tØýÓ#½ã>:°”¡EÌúið£ùéa[½GëßÁÚ€«(ÊÍLML×·z¶³ÚEÖÜfžÃmíŒv×Q |.ªú«bå¨AT(‚¬5Q£}uÕ¶Vv†F­ƒ”Š}Êìà£{|o‡EŤæË0´Ì»õ™1¾‹²„Ó+–º“K`è·½î÷„Tîݵӗĕƴ­{Œ¤vé~t\féY÷FÍ[v™²fI_ãüË“û®z–qý¢´,1 €khïÖÜôïúÛ—U`©ìH^3ã÷§¼®KöoÞ´: -‹ùsî¼?¾­pÜ>KhÞfÔæß§·¨ä¡J¢ÿð™p ¦ô_Mz,Ù·ÁÓ”2÷áÁ_6JƒkdßÞ­[ÿ¡šÝû~ÒŸ©`Ðcãɽ ˜ doCNì9tþîó´b\{·~ÞãÛ†.Xt½×q汃“m¸d™ÎX:Vm=¼¾Ÿ;ÚYȬùsÕNÿ¤×î½¾P,ßÍú qnúŽÇY àÚ™±×·c—œ¸{WÂÐÔÔha¢×ÕJ×A£ÒTÚËÈQÿ¤ÞÏW_àî`b€…ÁÛW^W’ÿ͒ʰø.ާ6i&Nqß F=^A¾¤À431×Ñ~Eka©I“æ$Œós!S¦®k‡ö÷1У¤WBdžt Í6j;ícOHo>pûÇì W²*D‹ömUOÚ:!€* ^>j§ížc“›sªœÚG_>|ðìGÑb]ËV.ýFxv1á’Y·6/Ù~-æãäóGžw²“˲A,sc£å#ÚÍÔ§—éeôɺ¾pÜò»¹×ÔÅkîÊzY4h‘0­¿4¾z'Dƒ 4htNˆ 4hÐNˆ 4hÐNˆ 4hР 4hР 4hРñño± w6tIEND®B`‚pytest-2.5.1/doc/en/img/pylib.png0000664000175000017500000002012412254002202016173 0ustar hpkhpk00000000000000‰PNG  IHDRšrë1\gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<`PLTE ‰Þòïïl³šœ¢bek<¦ìÔ~  ]cR ÆÆÆ•ÊéÍ!"Lµ==*GR¹¡¬±¸×kkïÉÉiºíš "D;Håä冃ÓÖ×>Eþûû™ÿÿÿž´œqzIDATxÚb`cfƒa6IÅ@ ürrBÌrrbr $$ØYˆA[–KV\–ƒOÈgb11€bà“åä’–e‘—““‘“àg’b` ~ fYYY9&!>!9> 91€š)+/+ cæeÁË@ A9aYF¨‘œr ,ÄÀÏÎ''$''.Ä, Ç$ÇÍ"'ÇË@ ü i&19AiFa ARRòòòÄ &“㔇@ ür¬BìF” AV Ù¼pAZ€Z$&Ç Õ+ ä‘  €và åòÄÀÈPA0-++@ lÜàP““\Àpä– 7Øå„äÀÄ ÅTÍ"ËÍ()''%a 9Fy&Y9 êàúYN €„A"’Œ²L ¬ìB@c䨍Nbr²ü@CùÀÆ@ ¬ŒÀÀ–Z²GœIŽ €d@V‚¬cYÉ$ÇÊÄ@ À€>9 iY.yy.Iyy€b`á”b…xEÄŒi^!^DŠÈË(ò¡æ„„#@1-—“€ŠIÈKÅøˆk°ð—æá”áä PX1CÅ„AÖË(EÉ °@c™è((`g`‚²YyY¤Ž]‚]‚EJ† èfA&&&r&^ `*ã•“c 6>vPÈpC˜¤ä…Øee9Á†0 ðƒ@ˆI€]ˆAŽ €€æ11•HHCQVœK–äfVVV +€JÙé– €€ê„@æ Í—+Î'Ät‡Œ8帙äXä> : pØ ƒL…xê) Bq€ªãÚ+ÀÇÀJò²ò03ãh/0‰ˆÍ  :AHb‘æ”ãÊÊÄÂÉ ^^ ^ 7Ø9%¸€Î  :V°*~v>v^ )y¬ €X˜À¾c`ãàB’`ç !Yy°N€bà9H† l1²Œì( €ÚËÀIΨ6Š‚Ó /4Ó(\XÅ„$P$L¢< ³xxxy$ˆ_†W‚T¢ ©ãáa—“<€ZTÆ Î fZ Í àT,îFs31°H*c`bà&}PЀ 8ù ð $';@ 9)°B bˆ™„Ä€u0WKD“xÅ8ehÔïÀf9~&&`þ’f0v9NV9^)IY.€b“€fCnN~q9^fiiYiaipV`faá–òÄÌ‘U,*‰5+8QrËKq^ i„Ø6˜@1ðÌ«Ìb(ÙTzJJ@ó@1€Ã\†IHHˆ— E,Kò€œ@À:Ô  áEÍZ° ÎÒ @À\*ɨbˆdv&AP.“ ˆi ™ ÙXN Z¿I0Éq`Ìj *„&ÊÍÀÇ)ÄÎ@pӀƢLf°Âd‚–pâRÌÌÌr0kX9„¹µ› ÌP1>Np  €ÀUˆŒ00H0KKJJ ó1±2ƒø\œÀ¬Ï-f AvN¨E |¼à“âAH`g `‹Œ‰TxÈJÊB°fKƒJ7)PÅÇð+P,”%€µ7+(Ÿóñ±ƒ 072CL“ebeâ3¸äÄA"òÀ+2—›Pâ vƒœXˆ ˜= Á)t@ÂMœU¬WhÈ) ÒK,$ÇÎ jÀq2Àjm9vqq -B   ™è&>°±ì,2MOæ9v˜iÜÌ`¿‚\-ŽH°[8„Yø@Å‹(A]®oÙÙYdš„€ Ä§R‚,Òàããå¹J$Á >N&V I¼ìȉƒWH‚ïÂÀ`g H št˜”†d".ˆ˜(ÈɬÀ²Qˆ“ ¨—ƒ[Œؤb‚§?&!`<€J[v€B6MŒO”KÍ3I¨¯Aõ&¤d烗­ply€Â  œ,4ä ¼àR\A€¶,’È­"XNàB€$q`Æbàc  i| BPÓ€‰‡•E˜=ÒàÖ8HU·,ƒÈ s‹±2CŽ”,€næc  iìÀZ(.È ,Y98A)ˆØü–ee09¤Ä…À X|ALeMH¶Öj KÙYhš8P† %sCJY1>H;—U€šG9XNe'`‰Á ,x€9UB€“——EJŠ…— €@QPbâD2‹bLàÌ(.Æ ,,Rš™K¸¤$89M ˆ”4¬¤ pgŽÙe @ø©XLòñ²pÉ㲌Œ(|€bà@Ä 8jXÁ%¶Ðù,ÌÂÀ…< €€%++Ì£bÐÀÌÇÉ"ŒÃX™ÏÆ#Rà p E02€€‰W —õŒ<ð*Tžü<20‹ˆM°â– 8Yq›&Ç,V!±ÃªE€úTj„ 1†Mã”F÷»¬Æ `Ï\zɰÂR='ŽÆÌ4FD-)' I˜ßœB˜€å+$CòKñ²à1 h„„,<>8ÁƈðH@ëT€‚”!LÐÒPœ›I€“o¢áa&aJò@€”<@Mãå,NglF|¦ñ³ C< °Ð2y…˜ €@¦q3±3!•¡Œäü ƒ€i›‹‡‡Ÿâ,^6`?…Q €€(t”¸¶¹¯Oº€YÒ’dd‘`fd”áä„4ˆÔ©aææfà ¤1D Ý iInnfÄX»¤ ,@1ˆƒkiô¦HB˜£LGâÀšX !4k/n>^i€‚˜jåÀŒã–€6íÙ±5‘€© XV  ‰ ±€ê N.€‚™,!03´<UÞˆŽ+j>Nt€,JqÜ4`æçDnp‹)~N99,Ž9\ßÃR²rrÂ4v>p3 ØD,.Å,Ì,%#³–…X‹Rq¨ˆ$‡sr4@X´q lqW‚ l0Ð$AP'Xÿ2 ÚƒÀ¦%¯°p†5ß À@ ,`ÃAM41>a`óKŠIÜ|vÛÄ…ÅX@Lfù¼PæPûA” Í&HÚçä `*%Ì nrpKIBÂr¼Ò`0)0 €Iâ(­R&DÊ5(¤€z âä `Ü21A0°Æˆ,‡¤¥l+sÊHÃ[0Àe¿ ,ªÄ@‰OØœæ$.€b–ì Ìz‹Þ¶›Ì L à†"7"IpÛ›LÐ:Zd'»È‘œ\n²‚M“d``„6d„Á /Pú7$ঠãFÒZ‚Œ @Ú–,L Ó§2P·‚AêUhÓRŽÔìbDÊwÐB˜€Ø…ÄA!'ÆÌ,2Øýä ib|`ÓDA)Ü̵ÿ¸ ¦‚= lió!L÷}€YØ–> GJM i¬PÓ@)SjšÄ«à9¨Ù&kD3€.Ð_ìLLІ%¨y* 8¹l/$NA‘$5C Ì–Y#Ãi‰K“¨Ý ÊHì ,| ìHnæä  iâ &®<¸¥ nòsˆ1€ƒ_X,,€ ξy‘i1I0»Bˆ†?'@Ü&À‰I)1pK\XOó²ÃÛÂ–Š¨…!À‹\|“´  ÅqrÈ4!9ˆi²Œ\†3#¸H†šÆ1 Ü‚h•â@ä PüS°(…$×CMƒ6Éå!Ý(hC]Ò¤•@­BÂÀæ)ð"z˜ `§€B2 –³äa¥!´…éqBKl`ÞáådgG*ÚYY‹‚œ\„l¨Ê–惕p>—8@Ã Ô E-xIX£ƒ‡m8¹Å4pËn˜8åÊK ëeBk¢ƒŠW°ùÂÀF @1€‹dˆi …Òèv p4H£T(2 ¨U…8Ôxf`«’— €ÄYànæ;hµ %ÌÌÌé/ Ô À„'RK A²?¨½Ï$À@@Ÿ# lš$°äưÌgŠBº\¨¦ !õŒ€.V`Ög5‡€ú8ùx¹hš ŸÄ4>P¹*ó9A`—1Ê¡¸ƒðüÏ ê[ wvvpWXLqÄ4)`Nfafv^Pκœ ˜^EA>·Ä¤¤ Ř+¬G* lVÅd€‰‡ƒäN.€bæ01„Àj9 =pÞ–6ûù 1-ìÿÀ­v`׉  K˜9å$¸h°÷ÖO„4¬9À£’BÀz”ð¡£oLœLÀÖ°Y ôxH‹‰صvh¤$x¹èS&`Kš 9Þ!c"¬ & êÛJ°ƒ[ŠRÀF#¸£$j‰€ª@A9v`%Ì, Lš’ÂÒ4M4ú"„’ y!Ý20‡ܽZ.îfI ƒºYÒÌ, Æ(¨¿jÁÁZw4M­ÓÆÁ îg1°ƒÛ\À¶P7°ƒÉÈ%ŒÔS’•– U…h+(gI d~1P$=,Ïy%¤ñõÚPÛ¡©³â˜ß€NÖñ _€¼AJ· €@¦±#ÜÆ ,ö€™XØ ä”bæ’d$¥ (@ÀH\Þ‹¦oи ¨C ìOb7 !ÊÃ/ »ƒ€5*XaQ Lw¤ÂÊË+%‰«QÎŒè[2ƒ»Jˆþ"@³>R4쬼=J\^äç÷NÄ9™Á!¸ÅÄ ÎÏR ìžò1ñáî·ñ#zmÒ`+eyàƒ¹ò,-‘zÎÀJ€Yܦñ2¢w¦á^ pMMµðŠˆWw? Ñ„›h 1x®#ª·ËÏ#!7 €àÍN9¸ñ9 Y/¼ 7 €À±nw²å4 iRcD†@ €$@IT:‹!œÆ,O”i ®,(‰pÂR@KK`‹A\þÁºõø:¨<È©Üû”DŒ9$Ÿ‚úTâ`‘`Æ—ÑyàSÀ8¯$¤Ì¶ p è8èX°ôæ•Â[j 0€=q)˜s¾Š¤åŒ PM'Ä ,½9¹ðP÷€Ÿ‡Wjš8¸;.)@Ó@ÃË ~°_(Ä)LÀ4Fh„òÀâRÜÏe €›& ̪à1P÷‰¯i²Ä%v ,¥ðð²°ñË€™Æ ,µ»”<ÞÁyYð@/Ô4ðhˆ„<8Jh?«;RË¿i"0bç•`šÉ&$Øyx„¤øAS%ò4 ØßV[2BÐÞ0'^Ó@FÊ.Yˆ£@lȈH@1pÛ6œœ¼àá>PØñ ã\VìÌhe ‡0KË3ƒtÓ@1³ )|cRò\@íðôjEƒNX¢ €D8ø¡@†…IXÀù· AEÀæ— D5—47·˜˜  ÐÅ22XT²{\ ËE¸@IUX €ø‘úØÀþ¨@‘@³á@™›öô! ¤Ä íg|@ØÈa’âÖ­à§œ=Bì| *4¨ ŒH.€BqTÕq2À– Š X@v\Æ@Øe·†òù@]9üêÅÁî¶mÙN t§AÌõÐ\Æ‹.¬&™A­Â.ƒLo€ú:B„óƒfø Þ9 €0‹X‡€mh XL3ÈH±òñb #!,.`‚¦q~fan`‚d‘ÁÔÊZ¨oAN d§ Á€0XÙÁvc‰M4”Ë „q€­äƒAvä$,LEÀd$Š/ µXYYÄ‘}!ÎŒÜ)9 €Ýi Dždàã5ÀASœÌRRR`sÅKX˜‰sp1#i€PI„l± +8V}(p°{|àîB jÒ9 €€îg‡Ì´Û¤°Ñ-`K KÂZÚ²Œ’œ@Ó ä¥^æ0y€&d ¢ð—cGÍÔà䆔 €}9 ã`#2 , …ÐiZð kPsj‡¤0ÜÐÁ3a }âÂp²ÀØc—,0µòlØÓ77¸fe”G- @n–q ñOµÀY¸ˆŸCÔ|LHN“…ÍH\.߸nf&PiÀ X–ÅšAÖ Ê À(B9pA’ä5³y¥ hÜdá PÉ jVd§É3CW’Hn ‡pH#œ íõJÁËÊ„¥Ü•U| n:;°ƒîD ŠP¸:^ / ÁFÀÃ, ´@ ,à‚P\ Ùiò¢ìÒH)y1‚¢‘®€ <1/ ‹N1kQp`†¤ d ‹8¢ß) N Xá!ÅŠ¡ W€†JA=_$·G‘¤$Qœ*s«<6·±°¢Œó#d.(šA9@ˆ“šÖÅÀ%Ÿ;dt ä4€‚: ˜Aø$Nƒ¤p—BD!8•³3K"øB @“‡:œªá“uHN óƒK=vp而—€úXoƒ8a³Ï§ÔiÀ@@rš¤84ƒóñ¡;C–Måe98€y@Îü%ƒä8q)HÖ­’äƒvàäÏŠLD Ãj`šg÷ §ÄiÀ´ÊÇ+ƒ”ÖD`:dœ&Èb„Ò‚RP6È٠Ȁä8H1ÏÏÅ .ø ™2® ­ AUØ…è H u/@§ØiÀtLª2È…0²8¤˜Y$à©Ë õ. ¢Œž'%¡.…'j>P6dÖ\ lna«Ø! @éYµºW¤ §,prÊ¡8 XêB‡qàiHR‡Œ4jy K ôŠ p!dòr²ãno€e@Á/rø¥@• ¨2§5€BªÞQ) Zû(Ž9¡ @ 2¼à•hõ€ ¼ÿº%1æªIMòŒ §q¢OÊ2KðÂC l³¯ìœRÒ”%5QX„Æ $$M YQx­Ê‡8Äi l¼ ‰N&,Nãä%Ûiˆ4q¸!Ê.bQÄ! NH™]ª@ §‰CÇÐ@CF¬àµg ¡WXë(²#êp«]Ô‘€–kÂ2ÀìÅî<ƒ;>\âPgE`"@1HÉ ú„Hý\8ƒüøå€Z ›QšK˜Ô׃.1d÷N‘ÑÄ +ÉEß›v‰¥%‰îß ª0¸'éwõ×jÂIEND®B`‚pytest-2.5.1/doc/en/img/gaynor3.png0000664000175000017500000005477012254002202016454 0ustar hpkhpk00000000000000‰PNG  IHDR,£&lÒšsRGB®ÎébKGDÿÿÿ ½§“ pHYs  šœtIMEÝ ;ÅTù IDATxÚìw˜EúÇ¿UÕÝ“6'–°KAd‘h bΞé wžùÙN½óNýñT0"¢H’³Ä%Ç]–Íyg'twUýþèÙe ‹,¬ ^}žyx†Þ™š®·Â·ßª·ªˆ” …Bq¬¬]»ö©§žš6mZÝÕ«(Ž¢ª‹B¡P(~+¨2B¡P(”) …B‰B¡P(J„ …B¡DH¡P( %B …B¡P"¤P( …!…B¡P(R( …âXÑŽá;\ˆy»·/ËÞUiY.¯Ç …K‹Kª«ªÜºÑ<=½²´Üçó5KMóºÝfĬ®¬üÏßÿY´f «1Gž;â¾ûïëÔ±sQaqœ7¡¦2©² x©mèÄ R³,;ªáÔJÏH«T½ôÚóó–þPZ½OKÉ¥ )@¨N¨‹Rƒ[ B#’ù|1=ºw¾úê n½ýrÃÐU¹* ÅïS„*#áwÖ,Ï)/…·[×u D$$x]n—aÄÅÆÙaÓ0 )„eY\pFÓÓ™¤¼¬¬p_Îîž§v‹ñ¸¸öºut;F lΡ±šGê®P(øÓêU»vïn+D{WÁ`xÅŠ ?-ßðÑ_~9ý-›©¢U(ŠŸÆ ÇÙB¼³vya0à2 ·ËÅ“R2Æb|1I‰‰ñññ†ax<·Û B„”R—Ûåu»4MR˜¹9ÛçÍþnËÆÕ1%Ö¨­1Ûã&ºÎ µ SƒnRRQ°lå‚ü⽦Žjl@‡@híE²~ÝÖ1£îŒDLU´ …Bñ{ó„–æîɯ®b”QJ !„nÛ„PMÓ¨®@Jér»]†¡ )¥ ÔãqQišŒT—®ûiñ™i§uïYˆP©{ôØx_JÛŒŽ’ƒ2"mŒ„÷æ,Z±xåºEa»ši¶£/ €tdçßhÿÅ ë·þ÷ÝÉwÜu*]…B¡ø]‰ÐºâJ)! „J‰”Âæœ‚1J‘\ØÂ2 ÃãvCHnÛ„RéÒ5ðDŸ+ÑëÖe(kÕâüÝ[‹‹Ê‰ÔâRÛfv’ý q©noLu0´c÷®…Ë?÷û¼Ò<·×ñl•y¨üDˆÔJ!Ÿ6C‰B¡PüÞD¨,Ñ(@)Õ£ B B)e”JB„ ]Ó4M¶ÍïHéq»cÜéL¶MOKHˆg„ælß—[ Ù”¸¶­_»msÖ€Ct·wÓ¶ËV­ÌÚ¶±Êªvëîª@‰ÐP@Ô“Z+BÔyï ºyã.U´ …Bñ{¡ˆe:º"¥D­öPI(!$:XF`Û¶‚Rªiš®Óæéi¤Uz†A:·jÑ,55ÆëÓ4¶sçîõë6å啘á²í[×lݾ©°¦2‰„M;l›’jè\Цƒq†?¤€„” ¢&P£ŠV¡P(~o"d›£”2F %RBHB)¥„HH) @# œsF(Ó˜¦i¥IIIÞÖþiÝÛ¶Žõº5&ÄÆÂ.ÈÞ¨`¬:4…Å#!3ŠpÎ! ¶j¸2*9ŽD¢/Ikÿ¥õ¼"è) …â÷%BR¦ëº®SB#Y)!”PP€J Õ(åœSB)¥RJÓ²4FOïÑ£OfóÌÄ8p‹>'#-¥c›ŒpMh_Q¹à–€­k –Ü&„€Ë6)5¤dRÔ=¨õ„$©½(J@Që’) …â÷%BÓ¥”PJ uœ J¥T£ŒFGǂۆa´mÞ¼UZ Š ŠÀmÛŒ**c]®äøØˆ%2Ú¦Dˆ¾jÓV–”Q[H €J©D‚ìO/ª:€”¤Ö÷¡”HB(‰FÐ) …â÷%B.]' ’ HêõMà Êq!cBPÊá>Ÿ/Š•˜Å…Ù;¶Á2•åI‰‰!-ÒÒZµnßñ4@ÐM¹ÿÇËÊ Ó„´¸”ŒiB@ЍðÈý³AŽ9SA”F‰F¢BpDHå ) ÅïN„‰.rüç?ujÀ!RR75)¤äB®Y·a_$˜H¤!E¬Ç“àóžÚíT RŠo¬/­åÞ²J¯×k Û”„2CJX–Ũ.$RYß:Œ£³_Ÿ ëÇÑ5-æ¶×Ç^ýA€Ì;&~K[µAB¡PüŠ"D)ê)P&9úÄ ¤¤´v#B$€ìÔ¹K{·'8"!ŸFI$—hs!Y$D´ââ’u·Ã0Mr„ $ú"„Hî¿ÕŽÈ A I½í°_ä±lÌ\û·Knù²ÌùOæí“>¿µÝ !2vù¦Ù_|ñíü•Y;òª,0ZuèÖgÈcÇïÇT-V(ÿ#"äè 9úWdt6‡8:ä¼Ñ4£° ¯u«æÞø«J&Å'¸ˆàÎþr…å¥Û Š'ùõî½ûÕtÊÂçBT£š”š„‚HH"¤3ÐF k#·£@ä°ÑÛ¡&kꜲºÿå|ýÝîîêdü¶Å##Ù3žÿËÓßäèÙ™¹›çnZüå»]®~阢„H¡Pœœ4Îc $ºq&ˆ­ÛQ§–:Y¢”jºn¸ÝùÅÅpXs¹%aPPPBiÛV8¶…]\Z ‡-Î-.¸Rh”0 ÈÞŸ]/F¡.NAÖÉOýˆíÆt÷Õ뾘Wq™>È›ñÍŽÈo[8¼hÖ7¯S ¸ögœ{ñåW^>úܾíb£j´ù“{ïy[ø¤«w‚«i;…BcØE{¿ÕjóŽ Q·VGJIÑuÝíõìÙ›³Y'©—n›U¥åÌNJJJä·t—Ö¼ez¯>½Ì¬­¹Åáˆ)k !…£j’!¥ˆ.ˆ%”êèÝáf‰+B²ò§É‹ƒÐéÆ{Θòà„<}?më»öðZTløúýÿN™½jKQ0R:Ÿ9ê÷Ü<4ÓMQ2óá±þÐâš÷?»·‡—Õ+Ÿ¿ìSK$ŒxuÒÓƒ’yUËþùüœ*ë|Ý?þq瀴ºáA«xé[÷ýåƒÍ6°ýë)YWýµwL4Ë¢zËŒ÷Þ™4kå– ñ6ïvæ…7Üyà 7¬œwÿ,h÷§©Ÿ\ŸY[ü²|Ö]#þº‚¤Çã3Þ=¯ðÅQ7}Q¸ÿû‡¿µ\üæ ÿ™¶bozr—a×þåÁkz%ì¿[iæ/Ÿü߉Óç¯ÞYjp§vì5øâënÓ'-êF†×??êæ/Ê×}gî‹üsNN¤ó#ßL›®Ž³R(”'Ô¨O;Bu/ZP‰@¢ÓDRJ!MÓ<^oNÞ¾EË–®ß¼ÑÜäVM8( Ûf(4\zFëŒË.ÛóôÓ“â™Î˜Æ!Brι#92ªpΜ³(ˆRJ·“é±=˜—¯˜¼, §\rv¿ F¶€²9S7þ»vÞ7½ì¦g?Y°¥Èù´Y²õÇ÷ºòî ;"hÊÙ}òÜ8ÈûøÙ Û#d0ë½g¦–@Ò¨§˜DwKK'Ω}{õîz @OíÿǾùÜ‹ÿ™4kÉWîW Êå¿öÚ§&Îß\tBäƒùY³Þyðª¿LÙk$¾÷åƒb»f.+âuÉÕlž•Å€ž~é€J Ÿ )Ý>õ¡kûdÅÞ X¥›xíŽ?MÜ]»I¹¨\õï›.¹ûS—9  \¼}É”—ÿxÉíoo¨qœêò:r)ÞðÁ#/Îɉ¨v§P(ŽI„¡ŒRF¢/R«CŽ¡v¨N„œÓ ຶ+w=¾ø˜ÓûônÑ*ƒQ¨® %d|\lß¾}Úµo›–ž–”’䋉1 ]Ó41C×]†ÛÐ Ê©‹wˆ†…ÓºãŽwp¨tÉ”•t=(ÝÓú¼ 2 rÞë?çåýä3sËÐŽ×ÿ{ÆâK¿{ãÆN0×¾þì´\Müàøó`×»Ï|¾ÇŒì˜øÌÄ<Hóì½ýâ[áó7:*á=ëÚaÍõaɽÎÞ»]¢±?÷æöÿ6yxû?òéËýò%i^ùæ[kjÄô¸lx"`ë÷«JkgšB;ç¬ €ÖûÒþÉ 4šhÖë¯oë{ß›S¾žúîÃÓcmš0qsDÙüçøh+@ÚŒzøO¦NøÚ½ÃÓÀÜðöCÿ^ë(!aÑnüè³Ü¶çÝöÈø'»¡G¬ZË¥P(;Çœa¸¨ÒDõ†H)%($¨¤T\MB5›˜›µeùÚ­];tnÙ¶¤¬2wÏ.H$4ÏHhÞ܈Mä„C×’Bn‚Æyc*ʪ"¦”`6çÌ–“[RJH !•õ4‰ÂY("!dãú7Q´pÊj€ô}VÍ8÷Â6o¿¹…_¬®ê7(®áäÌÝ_}¸†;.Í÷ôoæRÏøÃ#c¾¾yJ©ÈúüÛœ1·µÓAÞÿô¨åš^*7¿9þõó³]ÐüŠgÿÔ'îðÏ"°/7ꆵò·­7&hæ/ùaiu@!&6|P{RzêСA|ÝÿpaÇX ±®ÕjÚ»¹@õ†Uyfߎ†·ë¥#Ò¾ú´rãwk+.<7‰fÎüåŽËuißÄï'á’瞺ª——>üÀó]aå7–òÓZ!ïûwæ8"ÝõÁ×»¬%yíS/—nº~B>PüõG«îì9(®¾³:ð©ÿgçÎúr >n߬IŸQ°+½@æòÉKˆžŸÒ@ÏÉ«ò+£oW?2¼Ï!/Þ^l!Õ4®Ï=OžÿÇ/KéW?}«£pxˆ;Æ},­H<*Y•áœyýgÂ7K³òªZ¯k´¹`L»÷ÿ½ æêï6Ö îï-ûiî.ðsúÁN_B‹„º*BënI Ø9%Îoh)-ãëÝž–˜™”¨È­°ýsYÉíS]ªÍ)Šc!Dã¢ÿ‹Ž…EEHáÈ1Fu€ I¥` DÂÐ Ýpén¯î6B×½±1‰)ŒÂ ‡@‰˜V$br›SA:aqBJB¸”œ&PJ‘N¨‚3 I¢€.:JÌìᅧž>T>ÿÝWç,3«§,.qqZ ’z6Ó´ƒºpfXöþHäHÞOëJkÿS°bYn¤{§ûdêkÕ6«ä¬Ø¸ªU|4í¸áï¬ZåHAñôG>½iÿw¬Ü©÷]ýüŠ0é»f&yµÈ¾•«³­˼Õ9c»üû¥Í®øa[ðŒ¶ëgm€˜cz:ðH ç zqµá×¹t5¤P(Ž]„j»]R×ÿH P!%‘Dg¤ \H)Á•Ä`šÁá1\†®ÇÅÅ5OKIˆ­©©1-3!.Öëqk”9‡>8ë[¹mÛ’  {À9B8ÛÿJ ¥DHH)ÔÆŽµížñÍÞ#}@¬ÿb^Á…—·8¬'ÂâZ$9hßWf¿98¶átB›ß÷î.`\`Ç[ã>ôÑíÝ |ÞÝ~Hwã›Å&`.ùàÛÜWeü\Y…7ø–£@1Ã^šú°$ ðÜO®¹äÕÝuúËOù©5¢jéÌeƒgeÙâîÓ¨ª“‘B‘#»(§‚ÃWk!»,»Üy—Ô:Yml¤P(ŽD#«R{fOí^ 2 ¹”B¸tC#œÃ²©.J½šA¸ˆKˆ‹ñz].7%477·¦¦F®1æv»R]UUSSCñx¼.—KnY–bg×Â… HÆ(e”8#o¤Nodý;<úx¹ð¶¯¾ËdÞ1iéªú,úïe)€M_þ˜g7`¾ØÎgd8ZµkÙîPí½˜E[×oÚž_0£÷Úôþ¸÷w@ûÛß}ë†LØõÎãlip)‰ësÝÈhðò7ë²ór·,ø`Ü““¶¹òæ^1ª‰)Š&õ„jפFß8'ùHF¨Á…”Ü65(5½;v®Z¼ô‡¯¿ÙºyKMu5œ-P}ûöI¯Ï…vnÞ¼5+«²²Òíukš «*+‰>¯—QÊçœK)é!ýUHB@D£êF㎞š¬©?–G5hxÆÁ#GûUhÏWße› ôÄ-.ÿÈÀ8ÈýúÙ›GsÎE7<5=€Öõ¶¿ÝÔÉÌzgÜG9zé7wõ€Äô¼ã¯çÅÀž÷ǽ·)ÔÀ64iиwŸÕ&ZH5»WÌüjÒg“¾üvÞêÝÎ2Vx:_üØÄ ô£€§Ûå4DV<ÙÀ¾.¼ëí-=xöbÇŸÚôâ¨~—½»ËŠº1½.AH={T'Oã+OÒ G_¾±‹@lŸ<þ—ŽºäÚû^_P ƒ}õ”¤P(šÖŠ P½±/g‘Žd”ècÒ Œ‡ÂyÙÙkW®\³lù¦µë¤eK!¡·mFiQqqeeE$ÉÍÍ3{öÌï¿ß“-„$”Z¦1M·Ëåõz͈iEL)…”2ºIC4.®N$ˆÜ??åh"(!G“/YµvÊüª5¨¾ í›ñõΆÖù™c^žôöƒcÎh—dDÇè2z]tÏkSß½µ«—Èà†·ÆMØ ñ#ûc4&›&œyïC=ýÁ¸w6ÚHÍÈùÄgßðÔ­öíÔÌW›+wRf÷A£o{ò­é³&>>ú”ç:ñö¼÷­¯ëŸé¸qmϼþ…‰¿âœÛÓ)\[Õ¥áë>öœ¤èûfç^ÔÉ}LÕ'¾÷]ïM{ó¾KtLvEý·æÝ†]?þ£©/É4TûR(?§(Î~£GÉ+kSF(e„j„R{ò)!c†ÆÂ@’ÏW‘_¸zñÒóJÊBUU>·[r[p[#„Rh„>üðƒgöïWR\¼råÊ@ PVV6{îܪê€ÍEU h¸Ý^oL PmELHPJ˜Æ$£¶-ÂË4%çš n."]€N cˆîD «Â³Ué;ûƒ«.}}7€7~2åîNJ2 ůO£rˆ†B“úã_u»—r"#„5ÕÕ…ûòŠóò¼LOOI© Tƒ1CÓ ¸sZjaaaØ4Û¶k›˜”dÚÖ÷ß}Gµ,KHBlË ‡‚Œ2N©mÙÎÉyŽëC(u„FÒ9q’H R/xOÑ X÷ñ§»ÆŽj§H¡Pœ"¤S õwÑŽÎ 9Ço ÉÓ—f(ª©‘¶íóú¸ij „nÛ–ñxÜáš`AAAM ѪeªÛÍ ).)!”Ú¶mZœ1MÓõ`0d†R.¥`e#šFjš4¶4ê† ‚DEα⚦ØiQ½kíæ’PÑŠ‰¯L-ß°;.j¥)Ã(Š“@„=R(:÷Rÿª”‚si[&ç–°©Î¨ÛФm𰏂‚9+{œÀêê@(²m›sÆÂ‘°iF8çÎTF)c¬6AJ"…äDR!lgE,¢v,‘@¹B afOþë“ëÖÌz>pßY‰j'…BqRˆP½0´ý±Ú’Ô;MUJʘÝþàÝ#ӕרP(N*r†¾jÏÕ®ý%ÎþÿšÆ$cTÓ5J‰B§D€=›Û¶m!DIiiEEEÄ4+Á`0bY’Ñ葬Œ2ÇבRr!œ5@ „0J%£#ÎÆuâ`—H*j£ó“–Ý©ì P(NãÆ‰PÝëa0 H)$c–eY–Éܦ„h”:Ÿ€¶eI!**ÊËÊÊ‚á€4-+lF8ç”RÆ%”0Z;àçlH'…³Zˆ1¢iLÓ™¦kµ»Öpâ·ò„ …âw.B+€´…àRJ¤5Á@ à¶Í—RRJ(¢’a™–2•WVUIÀâ¶iYRÓ4]Ó¸o Ý1UJ)—B’P0hÓ5F¼;%B …Bñ»¡ƒú{ÔÎÉDÝBeŒ2ÖTN\ÜÙx”Pê„_K!#³¨¨¨´´ÔÙβ,š¦1ÆàœPçA !¸Ímn;‘ µ±âJi …âÊ’ TFcäê{B „0ç/`š…Âá®ë”ˆe )œ¹J©3Ó#„(*..((G"áHÄ´,)áÄ$!„àQBr.8ç–e9;™Z¶iÛ¦m›N Uœ …BqrqLÑqõf„$„s¼”BB¡H8l‚0KX‘`È­{ˆŒF8±t²²ª²´¢·N…©©,)ÈÙ¶"g_û^=ÛÄkj J¡P(~SÒAœ¡/)Å?Î;)!$á’s! VV†*«©@œ7ÆÅ nFlnSHÝ0˜F)#.ð„ Gª*«ÂÁ`JJê©]»îܶ-1 ÝRj¡š$w:3˜ËårkJá¶B›sDO—RFÏth|tœ çg­ØL?u@‡T÷~õbšáö%¦µh¾kÍê5=ý{¤»” ÕšLHB•5 ů,BVe5¶Í¹iÛ+bF,Ó [ç¶ÅMË›"‘N¹¨,-óhZ›–-½.OuUeyY‰s2](Â&„è3(Õ%4!u g·SÌœm‚ŒÛf8Ê$7c<uéDc„0‰mÛÜ’\€ Pi€uÜÁ½¶T¥ö8£c².jò·nÝ•WâÄ•˜Ù¹£7{åΘÞºw,]º3'Ö1–á‚ K—eí) Xu'ežÚo`ÏV^zhÂûÖ/[±qOq êIiwÚ€ÝÓÝîœ5iN~ÆyW kí&€ çÌ›ôýîÔ!—Ÿ×)†I(×-X´vO¹IÜ)íý:WΙ±£Õ¨«§k‡¿w kÚgK¬Þ—]Ö«vWÜöÍÇó§]2Ôž9}W›Cb¶.]·§<"©/½K¿¡ýÛÇR"³féªÍ9%AêNlÙéôþ½;$h°‹6mwë³ûØ+m¯n>âúóÕQ …âW¡¯>Ÿ$ÀKò%ð IDAT…B [pÛ\XœKɹF™Át‰XÁˆ›1] Ôë2ܺ¢¢ªR@†) MóyÝ”;)-,¬(-=¥k—N™†f ÒÓ<9)55E ™_\²lN‰Í97…iڶŇf]ʦØoÆ*Û¹;”Ú­g².krÖ®ØNëÜóÌ4Ÿ¬)ؾií¾‰oíÕÜžÌø»KBíc}Twþøí²üÔÞgiŸhðêÜ5s}?Ûså…]$S§ExïêÍ-O=÷â´Ø5í¥P(~uª,($„PâœÝ£Qè’€0asaÚ^Ãn¢ )-Á%ˆ¤Œ1±-B0B…à‚[<¶#á¢äIJ¼¸},F›þþÖq€ÄvÝZ,É)Ê«æm‘óÓ–€·ë¨]Óu1ú-ÍùrÃÚì¾(%frßþÝZ(H¡Pü&"䥚sX„‘B:û^s!‰¦q0! aDHB %&ç5f$dYĶ ·8,K“Ü…¨miB$Åø†9À0ô„„xM× ¡¡pžœÌ [6ó<Û„p*%¶°maÛ’ &ÉñŸăeÕˆÏðRi–î+Gb÷fÞÚ$¥®¤„’0G'¨Nƒ¹«—ü˜WV ÛRÀk‹Ö\ÙU¹%œ¤´M«;ßÚHnL¶æDóDêÎ0´Ó”ï~˜¾Ã²:œwf[ï‘óÁEÕ`ÍšÇF]?âiÞ!9U?s?îf=:ÇìØ´a_ÿmÝ2°{s‰–qnkA‰O«-|ª»4دÌ+4%#©ÎÍÑâ[&³ …ùÕ¼ƒ‰IJ Åo%Bzí1¬R:‡l;€”T‚1F%LÓ$ Ó£Ò¶­ˆer‚ˆ ‡9B“‚r‹ á&àˬ FÕáꪤŸÛí2tMH۴욪 á2tMÓ¸)8çœKn ÛBP1Í>ÞJ¥²õƒB«ÃpµðÕÙCšåÅAÛÁCi«mWK`¯œþͺH«^gÝ6Ùçb²*kÆ7ëI׎Xy³>|ûÀ뮪ˆ(ˆ»Å©b·­­özjKÏÏeBÚšKÛ¯ršÛ£¡ ?s?ZR—nIëWlȶéhíÚZæn7 …p6"§‡ú^RÚaš«~@:Ñ ;b ¨n¨c Åo&B<† M£‘Ú µË†%Ü6#„ ŒqnÛ¦er+1%Ù†¨©¨Ü&šÆ$£BH!k’ü¼Š‚‚’¢"M#ˆ¥Ò+ 5 kWïÙ½;ºns[у„ˆ$M²85º—H.êÄ©|Ï®Jénça€ åÖxÓ]°Ë¶m¯"-† õ·s´ÃÚö¡iÍ£ƒ¦œ5j`zý¹B]1ðŠ‹Ö×$´JªÞ¸`]çKüÉG,Âtnòý¹å‘°ó«G¾×±G‹•ó6î®Lo)é4$íÈn Ñ=:ìˆ%ëËtĆîÑ”ö(Š_‚Æõ-†¦ëLÓÓÓ(cÎn’Û¶1%¤¦kTc‚H‹Û6çÌÐ’’“ûöïס[Ww|œ2 ›¶%¤”@0*/+\PJ݆Kg,åíÛ·zÕÊ/¿š¾xÉâüü|Û¶A ëºÇãq{Üš¦IÀæ– ˃Op¢G¥:QW¬aU9ákIz¸`oqˆ +P¸cýöH¼›Îípé® ;­æ[xISâñÖ.2 ·ìáÔâZ¤0Q]Eck‰÷RêŠñjxù†¹+JRÎ8ïÜóú§Wþ4gM±}ä»ô¥ø`—ÕD5R†ów×w’¼âmÝ£Q’µú§-Õ ]NIþ™g×"‰ŠÒœ²º C»|o© I-âÕ¡C …â·!.$—’ Á…°…tN™€de: ›aÝ¥K*9ЈÔæfj‹fg :«UËV\hÑ4PBtBfÄ2-g †‚a³¸¬âÇ‹V®ß¹mç¾}ùE¥¥e5Ë2mÛ\PB5ªQPPB¤2:E.%DÓ´£Õ P_ZŠV™S’ZBÇíã*6.þqî¢uy$ó´ní['Ëì• –l®LìÚ«S’Ókñ­’©µwÝÖ‚ê`uñγ–‡3êüâêHý½ëÜ-{uO¬›½pã¾²ê@UinÖü/?Ÿô]V…€]¶~îʲä¾CºÆk±§ :£YÕš9« kNcZwNEåºEërËU%{Vÿ¸¢<*'?{?®=:z«vdSOí÷s¥M<þ®1¡Íóo)¨¬©©,زøÇMÁ˜.þLZ¤P(~ûá¸zS0ÎÙÚä µ¡RJ) $ € i žœ’”˜˜`hZDpgG8[J©ÑøÄ„äfÍ*U%I‰‰T#»³÷,]±rëŽ]µ9!TF4ª1ç,ˆCàÛ76¢Ã¤±­;$.Û’•ß«MbÛžg¶­÷·Œžge@Ø6´ýƒQÄ×~ðÂ9K–Lÿt õ6ëÜwèV•Kògnþ~jäÜáîý_ÖÓz_t¡k銵ßOYÌ-¦Y»~öëž Jš»²,¹ïØn ï2¸ïöÉËæ¬l=v@ó†Ëh\·sÎ ü¸tõ·“WOj§ÞƒûißÍ,¢äˆ÷sþUg·2 %ulãݸ5¹G[ßQoÊžl˲rrröäYÐ\†—I¦ƒJÆdt¨wŒjÃOL¬§:¬'vè= yßî={ÖíÞh9" P#&!%-£gçfñ¿‚ Û´yÏ„ÐÈ®ï?_PÚ¢÷Þí’\Vù®U+ŠYËÁ™G íf ºªtçÊy›ÌVCz¦)R('½e¶n½eëVBjO”“D ºm‘BRZ@!¡šÐ–¬6·uY”š¦!’‚2áöÞ’’ÊP¨yz³ì¢âªê@("º[s³ 'º'Ž@Ø‚C@ H)á8nG>Ãû”®™3u%fœ’˜q ·,[¦ëÚ¯¹7šU¸ð“¯·› üÕ{êè+Î9?¼xyÖœi«8 Å¤µëaÿ#móˆÀ–ï?_V¦Åµé{þàŽ1*ºM¡Pœü"4|èÐ-[·;Þ£Ä9äÔ9ãN‚‚rÁ¡DR"‘T#Z8\#m›p)%ç„M£–-À¤×ã6\n›Réñ†"f$\w» WØ´‚¡p¢ËGàœê-%óB)„6$C—]5øíA™îú ÁôæCo¼mè‘?ÓsøèžÉI\±·õP5\¡PœÐ4îùü‘#ÚµiH"%• L€rI¹$\!™$T‚P!%LB×taÚ‚[nF 3P Ó²ƒӢĘPmÙ¶®{“\q‰&´gRsÕ„íˆÉkÄF!„PJ(¥Œþæ{œÖö†[ÎSE«P(¿7ÒuýÙgÆwhÛŽHâ¼ ±ÿ „JÂ@‰lAÑ™&…p1Íà n¸tÃæ¶eÙ:#.¸\Ôë J)uÃïŠM nŸæ‹õŧÃ-(“pâéUŸ¨÷uøá¸Ózv˜4íIÃÐTÑ* ʼnO£;ë”ää×^{eÆŒïæÏ[˜“…ëf‡IA)#” !)ƒF‘BXfÐt¹]†Ém'–›sáJuaEL!C¦­h„1ʈ¤DHB ¥’P çðcc}=ºv¾æòøÃ…J …âd¡q!Ú …B¡P4!*hJ¡P(J„ …B¡DH¡P( %B …B¡P"¤P( …!…B¡P(R( …B‰B¡P(”) …B¡DH¡P(J„ …B¡P"¤P( %B …B¡P(R( …!…B¡P(”) …B‰B¡P(”) …B¡DH¡P(J„ …B¡P"¤P( %B …B¡P(R( …!…B¡P(”)þ7U«^ÿóu— íÝ»wïÞ#^Ú9á<ˆð–wï¿a̰޽{÷î=lüšÐÿŒa %BÇ×&ƒߺyHßa·¾»)(•9NH\ï»_›ðÉ‹g{OÔÂ}Ê-¯|øñ«£âO†U­ãX1wü÷ÎQCúö®cÐ_¾+â|D”Î}âŠsϬûD¿ác™‘Ï•ýïU–]3¾\Uk¦~¿ÇTæP(TëhŒ7½9}ÞòYϽ\øüó3!š<ìéÏg.^úÅÝmsÞ› –Í™òÂÍ™¡_QøÕ­çÞ³0ð%ëêpÉ5ýÓãZ ¼vT;×IP~‡Í×/dÓ߷YŽ>wÇn‡“­uœpn'cè|õˆ´ÃÊóÊÓþ‡jø‰&Bæîi®©¿]²îN×ýû›¹Óÿyu÷Éðhz¸|ýB6<éã3³z"åîxìð µi‡#ÿ3õ•Äõºó©K C'a ÿ]‰Pͺ‰“sNšdOLsý^3[¯Ò´uà˜úÑW˜T«‚²&냧þ6aîö*Ëÿ2¤7tzèë —7g€îœõþ[·lSap¥v>cÄ5wÞ:²ƒ·¶ÑH3o᯿÷ÕÂ…!#6¥yæ)ƒn{üž®MGHö ì•-~ù¡¿¿.·Jø™†?3‰»•xá·ÿñ¹™9aïŸ>áRmý·Ÿ}>õÛÅ%g½1õ±ÓÜ8b®o®»ž¶ùýOºø§çzÿøòg,Ž(<Ú=W<»"™w|òŸÙßL™>sÁŠeÜHívöõ+!¸âáwÎ Æõ9¯óžVV%¥yÊŠpñ{_?~šçgr}XsÅÇ“ªâÃe6Òèâ¨o‘ù\rÿÜ Œ˜¸ØÄô–éÉ>^¸qõ®*Z‹v %¹vjëÌ)^+oýÚì €NúâÃë[ëÑGݽ®ýÚ®:r­Á­ßTÕ!^4ûÙ[ù:/¦×õÞqa¯–FMaÎæ%S^wÕio~ÿR_/€àŠGFÜ9;HRZ·IŒOk‘žì%Y Vä˜@Ò¨ÿL}¼Ž¬áªu” þ| <<¡µO_xËôJxÛÚç”ÖÍâh gͬé‹slèÝÿøþ7wñeúMRÃÛ‡iàÅ3ÿrñ£KLĵÈl–Ò¢ez’ÇÎ_3oMÒ¯þpÊ}Ý¢Õ-¼ý£;oø×zt»þÙq×ôk¥WlžñÏG^™[†ä±ïO{¤‡çH ô³ö mxiÌM“Š7âéÏœq@5®~æâÛÖ^1é³[ÚéGW”Gh•ÇÙÝV͹uØ#Ú«?þß ˆª¥Ï޽gzà=ë™)u2dïpõè÷Oy{ÆÓ½¼uÝɶ ¾ù_«íNcÿúÀåg´òsWM{ý¥OÖ‡’ÏyæÃgF¦k —ìIá ÉFÂK¾¾Áï÷û¯üdŸ]w1¼ãÃëûøýþ¾7O©½ÞøÊH¿ßïþÔO5õ¿^8í¦3†=¾"pØÄ#Ûßå÷ûûܹ zÿÅPÖ+øýþ‘/®­KÈÜùöh¿ßßÿ3Џ”ÒÊ™x…ßï¿zR¯ý„ÿÅí7}˜m5œlCù+q£ßï÷_õyž““ŸKü¸¬T³üá³ü~ÿ›ÞZ¸«Ò’ÒÎýô¦‹ŸY<š\7¯Ãgö˜Š#Jpõ“Cý~ÿ°ñ«ƒµ—DMÖcü~¿ÿ¬WíO­lþcCý~¿ÿò‰{ëŒcå|t™ßï÷ßô]™8$Ísþ¶.µé¾)·öóûý£ßج÷ÛVö‡WœóàòšÌuÞ‹YáºO˜Ù_×Ûï÷~leÍ2qx³]‚GS‡·Üš§†ùýþ¡O®®—§Hö¤Ûûûý~ÿØ·wF~õ~÷Þ~¿èãG,º£²¿¹ë½±~¿ßßÿîJë—ˆ¨\ôÐà¾7LŽÞïQeíòx©œ}‹¿÷ó£¦ç‹ÇŸã÷ûýþ³îQh×k/C¯×L#[ÿoŒßï?óžïêÕ5Q½êù~¿¿ÿ_ØîåN,šfNÈÕ~ìŸ/LøºÏgï³K.¾$@ÅÜ)ëõ†ªNÝ?|쩾£N;¸ñóo €Ô!œR·”AoѯO`®ž‘€Uº³€û½:Ö좿¿2¶¥vüykºÄc¥(q§œÙ§mœ°=öÐ-£Éuã~» Š£Þ„ñv:ÿ‚Ö‚%VݽÐÄÞör(Ï)·w‘­Ÿ¼·Ú9í¦±ë?njéýÎî‘î:ÂHÞbàð6yEá&™U8$Á&. #óâ{¯H°ûËi;#'@ ozÃo‡³§(ÏVY½iæz¤öê•VwË$¶sßLUYk ¬ãê½õ…×NséÇs öO²ˆòeŸ.²û^3¬ktQÚ*›zn(¾ßŸŸ¼ @pá ÏÿÐÀÜP0ë“IÙ€§ÿ•S÷w×$¦Ç—µÌ•¾ÍµNò9²&JÇÛqHWÀž¥Û¢%i´yi'…SVWE —,ørkʈÑÞ­µ VgUHé¢×ë›âšÇ°‹r*8 'f$ØöÆø÷–šÎo=6ÑÛÎhS&~¨•i¾¾öNOaG“ëFv}MSû+Ž+ƶÍëå„>€°Eãº-3wáâ"gtK<°Jnyù¾#ƒÝ£z'Ç3D}`‚¿@YdèáP´fs…øíkø/aXâŽqàV´"ˆšâ À“à¦õ?ï€PyHW€¦¹nˆÀÆO¿É6ëF›|ö“kȵýZu¬Em•¿@÷›Ðÿ/OœŸp$2÷-[] ÅiÎHéÍzœâ°{ÑÆê“;´°É¤Œ”Ì,)%¹• ­åÙ—÷xíÙõ¡%SVV žHaçÏ›¶³ùEOµoÄ«<§6?wAßç­‰ØÐ[]pËàÇͬ~û® >j=à¢Ñ—ŽuVû¸&QX­)?ÔJÇžëÆæ£IŠãÂ.Þ^  Ú‰ÖD~‰²Hh•Ô 2¿ÒFìo]Õ‡]wœÂUaQî´"¾ÔzÜö'q}®½ eîä’œ©“·\÷p`ï›ýùÆÄ îï$úŠòøeèÌ¿<1bñ}ßW¾ðܽ_9÷à»*ÙQ1i±© ß2¨AÉžRINZš¬ÉÍ­@$hÖÖ2š:èʾÆúeæòÉKˆžŸ"òæ~µ'sô ­e/3h@çû>zqp<9øY-.MÀÒÎ{n¢ë½WßødÁžpö’Éÿ\2ùŸ-Î~ä•'.íè=î Ó¦Lü0V:ö\7¶º7Iqü23h€æÖÉ ×Fš¾,ÓØa[ž5üW€Ätî—‰õ9Ek³Jy×ѨÓÀö•{¤ôí“®7ý=].»¬ÍäÿÛSòí§kþØ}@œµgÆ”]—>×Éõ 6«ã—¡Ä÷=qî’ûgV½ðÜ=ÿ|PÓ°BNÓ0Žó'šK+hÜÑíM&B2Z¾îÝoÞ~Wô.›üiÊâ’çgÛá²³9Œmx âJnѲYƒÏKÄÕj诽µlÛÒ™Ó'üÙÒü¼Ù/Ü×î‹¿öòw#mºÄg¥ãÈuck{Ç/õ ë‚ˆ"±'Øó}Ó—…ŒT‡À“è¥'D ÿŒØfôC>~tÞæÿ¼üE÷ñ—v‰#¡œÙÿzua±Cï½±‹«I쯷¹ðÚÓÞzv]ͼ‰‹Kû -ürza×[ÎÏÔÑfÕµ?é¬?gÙƒ³ª‚‹^x®ËÅ ÊQÃgØ&—@ý’–VØÃgœ Ï!GÈS%)Í©`´l—¸¿W#q½®ÈõSæåìžùMa—±CÒ7´ GÙîÒ£˜~Ó“: ºòMþìO§2 döW[ÃMg«&Hü°V:î\µ˜qüBè)S hkÑ 7Éú ”E¤xO9£U‡dýDªá¿hO“zÎ3ïþ¹‡^µà¥ë†Ñ·o߳ƌ[{öíÿøäo#ޏH¦1ö¡iCoä¬ÏÚ±vÒÌ@ïk‡4c¿p³jüÀ¸á±‚ËÞù4û7!¥} TV4cdWî«@ÒÚ%é8™i* ïY²Ýhסˆ´òv»ld€ÍŸ½ÿþô²î—LÙÿ‹¼*gÓ–¼à‘Ç¥ô´î¼Š–,Ú×PÅ1÷L{gv½ânÑU§U„{Ê®)oÈJÇ”ëcáHÅñK ÄÔ =aÐ@oÞûôDÅófî<Ñ:Õ¦/‹ðîÅ[-€v~Šï¨á¿VÁ‚?ÝÑå¡iK–ÌŸûÃs.[>óãn=«¹Ñ„ö'ñ½¯™ `˻Ͼ4C®=£þúëã.ʣ겎­#NòÐcÃbã¶èçO·.'t=×n©Ðap×8å ¢léÄJ¸snœz`’\Ô @η³+ýWô«‹}es½xÌõ׎ûªêÚÞ)#* uåìë~Ùð9¼8eçácEEÕÆ¯ßý6§^µâ¢jÍ»§ ${ôyû™Ä›ÄJ‡p¹n _GÎlCÅñËÁbÒS4;ç®.kØúž®×\Ù@ÁÄñÿY^ö ì_uà¨Ê¢Õ hÞ¿+âGüaH*ýµkøqµ…〬^ñ÷[û¾(ñ”„òÜŠê`°º¬0///¿¨"È›ÔþÞ®WŒÍP¹)'nä•§Å4]Q¶ËjJöÐcC|‡yp¼ê²Ö@xég ‹D=ƒ®ûì‹À5àúsõߨd›€cžìݺsŸ–â3DUöòɯümncŸ½¿ÿÁÓ}0Ú\0¦Ã{ÿÚØA—÷Ú/Ù‘½‹ª€¢å+ ­Þ±è©Ý»ÅaOÕÖ×þúBå9™¤Únɵƒý÷<1jå}Ó ~zåÊË—ŒÙ§]²aÊŠöeÄ_óÔŸœ¥Ö¡ÿyàiýÏW ëÚÜÞ÷ÓÔWÿ³ qÃÞ`²ÍŽ:ïGL¼i¬tH_s¹>•ÿåÐ`IDAT|¾ŽœÙŠã$¦ÇÅý=K†–Œ»ê¦ÙÚÇŠ`Eáž»³«Øá°íŒtí®{á¡u7¿´d×Ä»Îÿ±Ï¹ƒ{¶Kõi°ƒåûögÜ8îúNÇÃwx[Å•~MYÑóÉ]½lµ–ãÑìŠÝ˦¼ñæÂ ípÝ‹ôsìté7Q ?;×3€æÑ%€}“½iÒÁŒítþ]O><¦sÛ.5Òþzë‹®íñÎsëEÛ±c:ºš°(Ûe5NŠm‹CëàÙZJþð£ƒV<¶ x`Úáúþ²öÿXúÔ½/Ù]= Ó]“³rê¿^˜Z‚ÔOŽ;7ú@{¼½Üo?~|ãìÚöÕÄyEožÿé[o½÷áÄϧ/)Hìw韞æÆžñ‡©F4¶]úéBkä#>»y]·­'4—ÿßÞyDulqüÜmô¥÷ˆ ¨€OîÂCE¢b£¨$FŸš˜DcC‘X5+Iôib XTD@Ä€HèÕ‚6@ad]ʲl},‚pÙ¢¾0¿Êì=sþçÌ™;wîÜÛ©…,íñ+Ö9h¿X·¥;XµÝ.,­eÞ»u÷™ØÀi¼§£±ªº¥çtOS1§žõèÖõ‚ܜ¢û,±¶WP°‡…æóƒÏŭ͵ũçNFF:_Ð`â½`ã®5Þ/$èõg{¾)iLß~(ö±Ø9qñWó«µ]ÆÙéHûüq½$nHÛ³*ôÇ<Ž€W–’˜š]&pðtÒ{¹‚©õÓë×õKÓ¤ÏÎö.Çë&쌽«Ce6:¤^I¿É:þ_¢+;B6Œ{Ä—4ä\IɤæìnþèHèÚðÈ|¶€ÿ íjËÄÝÝJLÍÊÕ«ºsɪ­ªæ€îP—éKC‚u²âйå—#c+¬§M²VÇȺ£|g¹‹›Ùõîܺy£° ¿¸¼¾“>|œ@·÷TØ]Ýu-%ŸIwñ°%—þ²zÏ…Š @CÞÕœjº«§]ï—{újÔaÒÎÍÄ~L@‹^§xºö£­ô4)õ‰Ñ§"Ïœ½p1ñÏû‚¡?\¿ç«y#þsûÕZ1.4­Å _oè‘t;•Ë›Bæ°»¶&.ø~ͺˆDf'Ôf¦ä³ŒÝ=†j3<-›‹Òï5ýýBÀ.ϾÆrô÷òš·’±ùŸ¤aªý &MûÓ­ Fýí§~~JÒGVö>d\Ô¯8±ò?«·ý”Ìx–zò×ÓqÙÇ)îÆ=Ó°cúàbZÕŒùÞ¯²£ê;ùúÑá=L?wôȯ‘gÎ%6è»ù/Û±ó3·WÆbà]CÖc{ıÄ›HZ2×yûí/ãK 2xé1å@¼}DÙû‚}EUt‹;1¿þÆÑÅn8Ž{„æ·#/ Jd~& ê úf—¤©(æ–ÁlÛAø,â^zc f9omWóìºh¶÷ê î¯F“T ç|äˆð[¸Bä§A‰ÌEˆðáÛ¢§W"K,çùYR¡{¥ïÜ[dƒZÄÛªAO’cJ%;Oõ¿Gdë³)€‘½¹r*B B\{~Åú”ç[¡Ì‹á?³¼>ó5!#g¿ˆ·>ÌÐÔi¢Æ'-=·ÂI8¹GOVfhCCŽ” |ï„”À$_Ò”wxÿѦQ+¯œˆ­ñÚ .¿ñÒÉx»M'/šxlKÚµßÂò÷á¦tª¨ýä~AJtäÅ®•ÿîˆѽù`e@µTD­Yýs!ëŵj†6c—ìÝ9Ç¢GôH’CçoÎäÐLAk¶®ð1Ls‚^zs%hpËxG±oÅEžNȸù öù»ÿ$u£¡#^ÓçNm€**B@¼qHÈ@E@ ¨!Š@ PB !@ "„@ *B@Eè]GÜðç¾/æûOvc0 Æ‚Øz ½;ÒÖë‡V/˜íÍ`0Œ){Ë:Qß%™ß­úOàûî ƒÁX˜Ð€¢ã'h4@EˆP¬óÊ~^2ÁuâÒ£wy 8Ìlè½þð™ §×Û#±{£3V8õÛõ\×Ïd0 ¯Í×y½‡¿Áøµ£ÎE®Žâ@¡q¢à,Vh4@EˆÐÀñèòÅ’6Ië­Ø?ªH „Pô­ôô­õÑÙd(‹¨õ‚„¿ÔweV[—R±™=¬ ÝlÜG3­Ñw×rE·š±¹&ÉÐJ!Ť'AP#ˆÏßî|éq\Ô­fÓ…ÝþQuø‚ÿ&.@Ò ä‡ªo¥@³4ìs Dßµ@zDIY,ñ$U´Ÿ Ý )ˆöÛ§Ï3‘ïö‘)º–X÷ó¥2}ão`éùâDÔT·÷ÓA»®·¡ƒÿÑ„WyõøÏg®äßeñT íþ=eþò¥SmÔ1ISZh@X&`Øšsÿu®¸t!!%«¨’#¦8N_¾uÝ k5LÚ^¹}ç©´òV(™À–pÀüôÆ}Ü~Ú*véÔ\S²˜•´uYx “¶ëNRJ’¢ÏÆ&å4zŽÝiÕ±ŒûOëž”&í™ç‚ã® O”ó_ü‰¨>yåXÇqïþÁ‹¿X·që–õŸø¹à8Žã~¥R©TÔÆbVd„OÅqœ±8ænuuu5³Ž+’J¥R1ûò"ÇñÎÖŠ^^´½ ÌÇqïÏ7}>ÇÝ'O„ã“vóˆÙ#•Šëbæã8Žt%~ù“7¶OÂq÷Ýu»£{ë/}â6~}V‹äµ.`_Û8ǽWÏ.¯­{Z^|íÄÚÉ.s"«…R©T*2OÎÁq_|…Óå'x7¿öÆq|òÎW—“·S½ÖpùSWÇ=C»[ßYy&lÙöd–¨ë¿ÝýÎÏmÁù—>n/øÒÇqÆûAÁ‹—¯Ý¸uë—ËüÇâ8Žã“·q%r;p[ ïAÔRÿ÷»ânT>cÕVÞHˆX<Çqß IÏ„/t²¸éæ…³9u\»âã±8Žû¬ø)¹¸²¦¶¦úуâ?ÂýpÇÄ׋_ï\bWì'6dUV*•¶¦-uóúâHòŠZN+»2u·?Žã®Ÿ¯yé"yGPˆ×§'±8é5‹ š'•v<ŒZ<ÇÇ.0˜âo§ÖGÎéÑ€‡‡·+` ž7ĨZºêä·ß)L‡áïªÒ­ uÜOc“*®Õ<ÏtaMúÕzÛYF}ëQÕ¨"‘X*§Ó¶Ôäßl³ÑCT»;ÝØÉ^g—qez jòosÀÒýù³%%ÐwlÈ®¯{Ò_:õ’wòˆH0Nˆ·íaž¤½¡Yj:ª¤®¥­ ÐÑÔ!QüèôæUCȾ1AØÄl¸îçþ÷»ÚNÑžÈgÅÜg®ÓoK:rcŠšÇMÒ%èYz\¥éŒíÃúÚÒKyÏﯨ-m7ùÂ拉û ÿÀ ™žÃ6®ÉÕ)ŒŽŒUËNçÄ—p=ÆÑ1ÞݸLlÚ†E¥;#«%\eÎÿÄš*z–™Ì² v7’ÁdF°­¨±¢@ÓH«GI§h›k´Cc[z2ÝÇêËë€d`©£¬WúŽ ¹”° c#Ï\Î-{T×Ò)–/ÑäQÙÓbUº*¿•ߥÜH;š;4 5IŠO%ª†PxO`·öä/m¬ç|ˆnô¦ß ”Ï’áø`WZI¾ à|Ç{š¤6-¾Ê·eßcÙèýðÓ*Ǿ;ü[f¿:÷ü¹ç0óÙñU ­BöÊÈÕ)LkLÀXõô4^~| ×c¹ôb.Ùk¿ÏL›³‘*˜‰)Ì…Ÿ[Ög'×ÙyÉT6et±¶a‡€B#÷ì8E… ä eLz©°­] 5åíÈî;6dVV\—øå¼mYíf~[¾x”‘ ÚsC&¯Ê’ñ8yDT.˜¦›”0ë‹KÙâ‘fÏ•’¶•=WªGIª!”R„hê4€NÀTôÍÌßWÆä´‡¤ëNIdš`BxžLÕ>>8ôàùèUd€ÆÔøüw¡SšŽ³=éù³².ªûL·Q²‰W€#Ô^¹T~ù‰ÍLwC™ÓI§õÓ–b0Ì€ËâöXiµÔ´fd-óù"_©c’áäoŽ®v¢¶fî]0ñß®®®ž[²´|>ûþ·SŒÈÊ©BIEÈÈÑFês³kÞA`ú¨9Sà^ôñã ǹã úó‰ *î×Ô.»1Õa3>p€Žf¾02• ëŒ[þNi8ø{itdìÛ›C÷fM’‘gàëž#ÖòÔ ™œF°-ÍÌ ×€ÚÛÌî»}…uÅ÷ÛÀÆk$“ÑÉ*Œa$xœQÒ4ðÍ „®ØwlȬ¬D, ° ቨT„u™Q¿WŒ‹ËÍÍHKNNËÊ/H9³{©§)MI‰£ ÕÄ­Ì»÷kyh˶r‹h8Ι¤ÌÈ=1•|ùRâÅžI3«MfÝ`ªíìï3)µŸç¦Û¯K$­e—Ž&1»Ä£¸­ž ¦Ž&4 kšP 2í&Gòv:¥>b–·.´µ›L™jõ_2’ÅÉ$]·9cUÄ7~<~½u Òºb?±!£²$ Ss `æ–6¿¼´¨µ‘+%Ѝˆô”)·pß'›ÿ¨×µ×izÊjæòx\«¶¶öY}3O¬¤ÄQ„jNÚ¦Y ?š´û:mU]Æ4ñ•_Í,Z›Pw#"xn®ÿTk}š¨S_S]§=û*§,P GÑ¡ªõÁ»[&[`\ѰÙy lÑUöЬülެ­ñsé„8tT ÝA]ýÁÄ‘¦ªüš±ßyô‰ŸÏ²¢h:Í«–—Õ‘»åƒÅ©îô$¼fVUÅãêVñù"i?gf* Sjö3'éÇŰ-g¼oA{5»¹Ò s…޳ÜåžôÊà4bmi6 w‡ü}Þö5{Eaº[¨¶3‹bîŽmÃ)_oñ}Y=eq2IoBXè¸ùßf×[>§bæ¬q¶zäŽÆê²ÂŒ‡¢v¾¸yÁ+ö2*«f?ÓK;%©%ëÒÐ[¬¨ÍUÅyw¸d D<‘d{NÖ§½§§–²G$Œ¢F•@͹M‹ÏõüO­áÓ¾øúË; ’¢GnÕ:ŸäÜàÔ²„ -*.„§wÛ¶m`¨YzN÷4sêYn]/,ÈÍ)ºÏkÛy{Xh’¡õúÁU«æ°…¼²k)ùLº‹Ç0qÁ÷kÖE$2; 63%Ÿeìî1Tƒfì`Õv»°´–yïÖÝgb‡‘â?voÝu(ö±Ø9qñWó«5­:c6…ý˜ÇðÊRS³ËžNzdböHþŒÛ²ïHÂãN)@cNbRzÁ]·¡]☤eLÊû=K8uÃjÓþ8`dЏµ¹¶8õÜÉȨS§£ã L¼lܵÆÛ˜òÂ$+W¬êÎ=&«¶ªšºC]¦/ ÖÉŠ+æv–_ŽŒ­°t!ÿr½Ì"0âPttë:7,qÖ!½ZeÐ׸>•¾$,ؾËþ qCÚžU¡/y!™-¹ôÇÕ{.TtHò®æTÓ]=íè2;­{1è«-FÕwòõ£Ã{˜~îè‘_#ÏœK(lÐwó_¶cçgn¯¿õfœ†Ž"ÿu™g×…D³½Wuo–¤bè<ç#G €ßÂE»ÔþI " zz%²ÄržŸ%zéìÍ8 9EþkkГä˜R ÅÎÓFýïæ¶>k‘Ù›«!íPú?G\{~Åú”ç{œÌ‹á?³¼>ó5A˸Jsr8Š|‚M j|ÒÒs+œ„“{ôd%`öÁ6è©ÿ? Ê í·¤)ïðþ£Mÿ¢V^9[ãµ?ÜE }œJ™NCG‘O²éäEmI;°ö[Xà>Ü”Nµ±ŸÜ/H‰Ž¼Xµòßñ!ºƒþgI¥ƒpO»¤!9tþæLÍ„´fë s4·R¦ÓÃQäGľy:!ãæƒÚççfÔ†ŽdxM œ;u´ª@¨!¡ ÐÆ@ "„@ T„@E@ ¨!Š@ PB !@ "„@ *BøÿãŽÕK†ÎÄc%IEND®B`‚pytest-2.5.1/doc/en/img/cramer2.png0000664000175000017500000006131312254002202016414 0ustar hpkhpk00000000000000‰PNG  IHDR4žªñõsRGB®ÎébKGDÿÿÿ ½§“ pHYs  šœtIMEÝ :‘ÖhM IDATxÚìw|”EþÇ¿3OÛ¾ÙôN IèÈP¤‰ E@Äî‰gûéÙëy§‡žÝÓ³\ñl§§bCE)**½ $Þ“Íîfû>eæ÷Ç&!( âê¼_¯;Ã>ÏÎ3óïÌggæ;ó J)0 ƒq*™  ƒÁĉÁ`0 &N ƒÁ`âÄ`0 'ƒÁ`üÒà{s“Bè+%®÷Ê<ÅžH@#ˆ t“NÒ"IµÅ)ë‡4—åš0O)¢¤ó»”RŠ€ã‚D("” !! „P¢#JB]Š RÓç' ¹ðâ˜ü©å*©÷* ÷ž74cVn?P@bµÈ`0¿:qª ªçU±×Š* B(*NS]ÒTÉÓ./‘B>‹$Mш¥¨ÂÂ/EEQ5]× !T—DIЉÀs³‰GѯP ]pà‹©¡¢Ì¿vm=&[qEÕð¬i ñíO@8V“ ƒñ마NÏù²b¿+Ü9ž¡L)*èš5ì“J ÒZk2HȆi:PŠ¢Ò„0¡Vo à…U×tFÊqs”Qª*ò¼ÝbÂhT™(Ö´8Aò‡|®Ò"pÄfN˜8)3yäÀ”4»‰‹ AÀD±ƒÁ`üJÄé_[;”©³ï§ÑY=“28ëÕ%¹š;Q£³s适Šâòù<~¿¢é¤#…è4MP"ºÑTãx[ŒŒ €Ržêf]Iâ°ês£ÚŠö™É8ì&‰G<@ô9€T$°Êd0Œ_‰8-)óù¥€Pt„ÂQbÖ#ö€‹«*IW¼i(b# Ö5 ”@T p(¢¸¼¾Ö6¯˜ou=‘BH§4¤ª%ÒâÌÅ$€b¬Ó;/†ˆ¢µ9MΆ”Áýy]¨¥Ðö%0ƒÁ`ü"8N‡^xdBºLœ!kÄã¬3Õ—gˆÈL5ŽèˆR’ÑJúîòùÜ>¿qíH¤„!@#À8ìòù[ü^Ÿ®F0Š%(ª9ˆ£ÄNTRuxÙKuW”ªDE®K6ÙŒƒÁ`üšFNtÑ&Ô) cÀkl©IT}Q(Ѩ®cd°ÚÒ23"ª~èpYsÈßöSÀaŒñÇs<ÂH%z40‚PB©N8ž7˜ESrâÑ£1¶ÆêÚªƒ¥€ˆbæ°¤¬¾¦îp‰­_†Q)D#'€ ¤¨€l¬2 ãW"N]¡”¶ÓQÊU x,î¦t¬‰®SBE19{ÐÐüñÖ~ýH0VUÕXWßÚât»=>¯? ª%Øæ xýº¦Kf£Õ#H¢Ñd4[­1Ž˜¸Äø¸„„„ÄÄØ´TÑjNv{’ ölÚªø¥%"PBöîË—g´Û) ˜"¤"ä(sz'Ä3yb0Œ_Ÿ8 €Ö5!ä7<‰H爦!”–=({R~ÒˆaœQŠŒÉF!.35Ž„ƒáH(¬„CTÕœ ÍuÕ5¡P0s`GB¼(J¢Ñ`0 &£d6JF'ð¼Ù,˜L±6›ÁbUʶîÐÜà16›Í˜çUÌ©8àŽ¬?X;aòPV ƒñk§#{cÛ'õÀšÊ…ƒF%l¨Qbb3GH–ÙTS"š‚`ˆ±™£‹K€ê˜BÀãØâŒDÂIi©&«#8aDÕ)ÕT]×U«• ƒUÐUž€Rj‰wˆ ñX)!@AUTBŽnÄ¥Q cJ Bs„~0Ô‰ž„ !š¢FžĘc|o4*þ0¤Z¯²»ªÉ—djÖ…°Ù’l ôÞœB{ÿ|þo?óý!6Ĥe ?-oÚìyçŒI‘~&‚@Ãõ;V|øéW›v¬l Qàm©Ù£'qÁÂS˜Ø®/ƒñ˧;.È3rX@H§àש3¢—7…V7yœ•bÈË饺=Æ.˜M”ãtB4] +BiGáè™E]OŠ.`u=ì•P ”Âæó˜A§È`2ÅØ%£ôú)æ5Þp ¶É‘š‘@¢ê è)„Òµ—„=5…k 7~þÆ?òozòÑ«ÇØOv×ΧÏ{ìŸy! Øœ‘ÂÿèähàÀ{ÞñÜzçQŸjÞúâõ¯ÿèÍ ·þí©+†š™@1Œ_¦8MN‹‹žbG A\±jm’ñiqVñ¼F”çxñpufB7òDt‚8| OÙŒ1ŒSJœ¬ÑßéÀŒÀÁCª]ȉÏô/(ޝ)Ð<.·³¡‰J’7inmÕB¡X»Íf6D c ”B(%D'cà GB@%^)Bh[0ÜÐR[]ßX×èô‡”„xûiÙ9vŒªk¡0Ça @(â(æÆ:âèáç'a6wÑõ£Œb tÙ#·<¾¶ÊÞøÓ[g¼S¶ØqÍwpåë¯~¸fÇÁ¦ dJ6é¼ßÜô›izýWÍy¦ð˜G¾xùܸ5 î5¿;ç÷Û4rïçoÎõÿókNº{ÏÏ¿øÎÚýÍalë?Ἣo¹"î¸2KÜžëP&Ø›^zú7§Åt¬¸)ëÿvÛÝKÊø¾yöõÂ)÷4Bxßs®ùØ M{iùMµOÝýüÚêHÎý+Þ^Œ{*@xÿs}즾ôåbëÊ¿<ýæšb§"$Œ8÷ºßß5/[rm}ãÉç?XØ£K)ò¼[¼õÌt±3§ºgÿò7þ½ô뛃b|Τ9¿½åšé™‰÷1Ö2 &N=qÀÔu Áa'Pj”œàOMl8@¨Åé,/*np»Kwí+Tmà€~ƒôÏLO‹‹1$Ž0FuufI´[-<Ç×7ÕÑ؄$“Åâ ZZ‡Ëªvì;p°´¾¡E#´z|`ÆŒŒõæf¤©Àq:€Îó*‡5„ ˆþ?tžcþ£F6æìù¼ÔRvÙ«•P½ô}W?2Ö¤mÛ3WÝüQÝ‘µ`CášWïÙPpß{/^”!$M¹`È3O …_í÷3­=„úöQ Àðy§'spø‡"ãÛõü¢Þ«mÿ—·ró’Å; f Ò£M®ÍKÖÀ8å¡'®>-¦KO.&Ÿ~ës8Ûh?qJ:€%ST/"-ûß¼ÿ•µÕ)§h€E³ý®³xÉí¯½ºWµeÿg߬Ç>ûÒïþSýr¤a×ûÜÄ¥~pÇpctx·âß,^çîL]q–|󯽛vßúŸ¿_•%õ”1ƒÁ8Ž8½°¹"ðñiñ1c$ML·Æ <‡À ‰f³‘çEM'6G¬?*Ù_¸µ¨xûþÞ0%k·ØLÂð¡¹yòiÇ䦦&ÇÅ:vì+ÊLNž3XD„ùƒeeÕ­±‰ÉཅŻ÷ì®­wa„Œ,XPÃá`0`LKy.BÀëôhSÉ  "iŸ!$'÷°W)kÁ5c^h7ðîÞT£ŒÍA9ôÖcÑîÛ”ÿëÏÍôo|æ·w/k†ðŽükϹ晓¦Ìúôb¥à«ƒÁiãÍ(þbWž?9ƒöƒ§©•<Ù®LöÓï|æîsû)%«^xà… JϹ —o(‰ž)e™r餸€O›ûø?ævýqí7ýç}qÀY×_œŸÌ‰¹VõÐëÇ) Ž)¶¢½›sé3ï/̨~÷¾{?¬÷Šûo3>±äòA Ü×’r¨_¶dÿ ç™@oXþ§?¯s|Õ Ïß0>Ö»ã_·Ýòf©Rð·G—Íxíât¾ÛŒ±I=ƒq¡7`ÚHGû-8fôYCÐú==(-UBC+Éfè[¹¥qNè€Þ­ ޼)™"2¤ç&ÂÆZ°œvzÿ¨½Œ#’á›rºƒˆ÷à¶öÒõŸ0 CA&æ–¶êP½©ÐsíÀ„n2Æ`0ǧÜÔ„æÚ:£$ŽÌH6 ôÙ†}_}³©m/7!ÕÁ·:9“Ñ ñf6cbˆ#@Âõ MÍÍMℤØÄë˜ÑÃ.¿ÃjÈÌH¾ƒjÚˆ!9]'&³™ãP8R¼žêÆ:UUŒ&cRRR¬ÝjyŒ‘Ùn3Úc4„)щ¯ÕY²Ïçp’øTS|j“TàOž5ô€ÓßÞ]ZÑti¸úÛÿ¼üöŠ-…õ>Ò¤%Lœ;íÛK¡iýÆÚ›sÒʵ[ļ ó¸›'yH» %[;oA†ø$3€¿‡ÁÁ"uè›+DÀчÝÇqƒ¤®:wü¢ÁšhmÚ—ŠÀ’hm7>Mí!J(ÝÛÐÖþ½Ý÷Ï÷ƒÄZµ¨€ºËƒÁ`§g/ªl)ÞÈ&`yÈ€œG§ï۲њhâb bkKuSSˆ‚->Ž7›È#Œ]wz<%å•®V— Jf«ÝÛ/=]ç°šcíV (¡2ÓSãŸ?Ðâr—WV‡ü>‹Å vÄ:L¶À\4LP4x£1¬ë¡¶V³Á` Þæy ÑL©ƒ¼í¯„?)ÚäÜ»³9úgÒdÔÚOî¼ì‰ía19ghf¬‰ÔíØ]¥¥NñæŽÂ{ T¯ÛÞ¼(=üÝw† ŽíîG„ô¬Øœ1ÐE^¨ÝU¸$ÕöýPÕ늘â,?¬Z^ŽÜÜ»¢uy𠂺,ìzãùïÝÆ‰ªF»ÖÙQc0ŒãŠÓŽjwY]«•*ÉqÖ†D¾µÖàªIÎÈV}îªC%•5Õ¼Ñ ™Íc@¼À›¬æø”Œl¡tõšH¸-1™¬Ž¢ZM†D‡MxJ  ” ‚Œ’ ¨D•=½¬¼fôèÑi™ñI 1ÉiVG<'ˆ¦kA]ókJ$èš”“ž›Rá쨩«ªT,©ý}H @N†)h`ß»ÿ)mצÓ'§ áoý+Ú}[f<ýÉ“3b1€^»äò¹Ï|‡ãóçÆ» ”®ÞÕ4Þ³¶À4iÁiÝ­îcsœâ©÷ê`‰Kˆ¿¾.Øs6 §’–oˆ„6½óMSþÉGÔš¥·,x¶"sâì ¯¼ná¸ØnFV½-Ú ÁÙRcªç=ûõ?¦Zy—šƒÁ8Aq:,;ìsÇJ\Bjšßïn*+”`‹ÑÀ„ÔxSM ú¼™9ƒ%£DŒÆ”ôŒ¤þÙqñ Á°RvèpFÿù“& ÏÍòÖVðC×°J9¦œÜ”¬‘ãâR2>ÿøSklÜØüI£F2JœâuS„)Pà9j„XGFzÆäÓ'8•ˆz“Hê¼-$IŒº‰ô,ýø¡{>ŒŽ›ðˆß\”%Ðpc¹+z=}°hÈ6i-X_Ù9ƒÕ©5±ãçÆíÞ¥Cñª¯W{Ë,Sæì6ò 3G%îz([·«uaj"Ò²yùãäÙÆ]9;nÃ'­êÎ'î}9ù¹ÆÇwT£Òðí_ï|¶ˆTn^µçÂë/îV‰{]´[sÆgÀ¾R¾µ"4u¤€*Í¥Øîˆ‹‹µˆl¬Ä`0NXœÆ'ƒ<û4 (Ôvn>T][cK²¥¥$ÕUnª«O8@Ï7Fà‘¦ë˜Ì¢‰&±yyf«ÍnwdfdÚ¬vÅdÒ~r8:"OÀsÇs&«y\Þ8·³!ˆ±Ûyëª* ª)jbJÊðñy ¾ pÜw›6ŒÇ:L’¬] !É”?y=ïþ_ýçf(‰øJ·}»½º=HN}ûCsÓx@¢#É €Š¯7V;/ͳã͇žÞÙî.)qi#S£›‰ãçËü®íš¶ó¯€mê¼–î/ö;cVêoÕ軟yè5ûçd„‹—ÿõ™ÒÅ<ÇV'Óèÿ{è‚·}Ö  ÿûæ³?Ëž8vHŠ ê‹6o.õFï²L½ï®ÉŽnƒ úP´Aìþe#_jçG=óÈuS3PýÆWÿðçõ±èÝ÷oÎa«L ã„ÅiùŠobbãÂ:u¶¸jŠö#ªÅÛ­¦Cr›ÆâÃ度†Ã“ÍŒ1‡0§ämó””jóxbc$Iji¨ÅJ@¢}õFôå€RJ)EˆSB!·¯ºÙåINŒU5µ±®†h‘ÌôÔ›t¢i /ˆ(z5…(dôi£SÓRÛ‚FQ9]ÑIÇ‹6úüKŸ”¯úOù>xës]2 }Åß8láì¤Õ6D¶?qÑä'Lï~4á?~æ(~j΄oüpɵÀ1ãæÊÂömjû–&ÇŒyÃL=ößY—ß={Å+[‚;_¹ó²WÌyóóŠ>Ù]#´ÍÅŽI÷¿ö$ç?>¬€«tóêÒ£nH˜vï œ“ÔS¬DïŠöæé':±—zÁâû7]ýøF/Ô.ôšåñ¸¡×?¶(›)ƒÁø1âTZÓÌÕ»5]·H|j| IOÍHNè—–ÂkJMuu«³%¢¨n·Çn5ü¡VgkÀàÞ€!=1–ǘÃÏqX ! €=‰(Šb0šQ¤(&š@ˆÃÈ;Ò“tJ ¥@õ†ºÚ¦zœš–f24BÛ|¾&§#„ÄÇÇ öxGƒOw6cž§¢ý¸I"Ñž‘+OZùíöý‡›°9){ÌÔs/ºl~~úñ¢Ì{W´aU1sþ_>ìÿñ+¯üížr—€­£§ÍýÍu—NLeSz ã8gÌñà7;ÃaQ’ê°ÅƒV¸î »êŸ;}rsuåÖí;J*ª=mm·ßtCvÖ ––æÊòr_[›ÕlÊLMJLˆyhT€%:‡QÐïó¸]‘ˆo·Û! ˆFßê„E E[Zkêë}PVvvöà,Ì ë¾ÛøÉç+⎔ø˜眫mÝÿö¾ÓÌ…¼`(%׎dÕÉ`0¿Š‘ÓÅÓÇj8fºË%µ ß´jYUumÕ¡2¯/0 _fcsS]C}bbB8ª‹(oÈg¤v Bbô‡h¨·NimcS«³ÕC(BÇP(PBR4ì÷ùÜ­a¿/ŽT•—'&Æ#N…B±11ÙÙÙ¥‹J+ª}Æ@U[˜˜íX4@ô]† ƒÁøõˆS@tF€ÕÊõŇKÖ|·1èvYŒÒ€¤Äøø¸²ŠŠüId¦ î—¦†‚˜a¢k”’ö× Rà%ÉÙêÚµ¯r‡pX-D?rÀ)j?åQBbì6»Ý6$7'¢S_ WQ] 3ÒÒÒ32víÙ½uÿÁ ÍxˆÚŒé¹ÄQŠ(`óD ƒñë' Ðöy?B£Ä¸¸1có¶|÷­èºVRZj0Hî¶6%F@£GÞѵï/äSž>_±²¾¡)???)9%ìóþðŃQ}â0P@Ïs ’Ãá-.illlu¹ƒªê×ÀåW)œ+!Æš:À©ê”/°ÁƒÁ`ür8Îf˜ˆÄaÌ!l”Œó/Z˜9(Ë`±ZŽØø8Œ§Íënóª:á#D)E]GŠê „tXÍf»ÕÊø<·»5à÷GßË~äah4ˆ(E€9Œ8u{<^ŸO ¼É’:dty@÷Z“øÌl¿dQO0eb0Œ_—8BãÔÔô9óæg ¦ÌÖ§É¡°ÚÐÔŠ(œ "Ì vÉ¡G^¨éêø¼qãÆŽ1¤¦Æ „縮£«¨&Ee&:XÃ<‡x>¤êµN·"šŒ’2w·ø½¶dœ1Xw$û¯cvZ(ƒÁ`üéó6KJA—§©ê®­›#~¯7¢‡t¨­oòBÖx;Â\T–Úÿס?„hýûgÆ:mb2™DQ„#¯º GFO`^À¢±¹-x¨©­ÈQ¸> D1bÿ\šÔ/,šàØRƒÁ`0q‚¨œð¼0qÊéIÉÉ›7l¨®ªÌÈÊnry\mÞô¤8Äqíá騫РðƒdNM!Dï!}ïÔcŽ@2n¨)óª;[Õ]ûë#F‡yÄéŠ#!ÄIàÂщÁ“ó2'ƒÁ`üLÅ)*”R4hpNVv.!Ziqñ^{ÙétÓÁ9žWGA‹.5¥B\$¢6¶´Ø­V»ÍJ ¥”`Œ0|ël»Ì „1ƼXP^ë¬Éy¹†Üq-ØäC|ˆ"0ŠžïÔ‰Á`0~íâD€JITt ²²° ¹¼¾@8l25EÕ"AL D#&ˆ[·ñ;ÄñòèQf[¬¦*FD ‡t]oߨÛ!~„PÀÇ ˆÃ~Ô45"““ÒFEBc`Óy ƒñ+§Î˜:ˆPÐèˆ#èHo4ÅÌ-i®?\3-o ¯ÂX )ÑBªª P$.Ü¿ŸÃ ‰¢ÃnKMJ´šM¸ãEA€P@ÏIÞbkìÖ’Ê’¦¶`L?›ìC¼‚ˆž(Á`0Œ_³8ÑÎð9„tA Ò RN0ˆ’„‰"Bq£'ìøìÓÊowÉzúþ"Ç…´HˆêÊ!”7f´/Òu]ÕÔ°¢D¥­ÍÃc$ )&“1ú‚=ñŒ!>6V$„q4~œF¼ Z,Ødm j[KK?ݲw{£_Ϧ:’ÃHÐõŽÚGQ}W&n>¼¯¨V±§efd7LÔH ÍÙX]º½ºnИÑýí<(ƒÁ8%Å©FEˆÇ ¨ŠÓ¨õ}mõ>I£§Új!èŸèpXL‘äAJ›¶¶ê`ÕÒÕ×7crnf‚ÉÊ#¤|DSô ×ÛÜP”% IDATïöxDQ=rdJJ²Èq!q/ŠV›&™«ÛÂk÷–~²iÏÞ–Pü¸©¡„~Š`V(Ô.N]ƒ24ÑpCáöƒÁäá³ G4ãEƒÙ‘˜šR¾g÷ž"cþÈd‰ÉƒÁ`œ‚âôu£B µY8—³u÷®b—×}Ù•³sÍFeSñá}u^, ŒC’•ç NQ%†Û’ûíß³ñö}pëySN“çA¯›hºÉlÐ?Ãl2´8E0Òu¢aLæDQ²Øy«£¨ºé_+¿[µ¿"œÐ?ùœ <¢5 c•vÄýA§,µ‹Ì Íæ‘`ÍþƒÞ„‘ãÇ $ÐPRR^ï éHrdæ 6Uí(³Œ8bpë–²jâ`+Vë×¾³¢L=: Û˜åH°nßÖíE•- °1~ਉG$GßUÑý%nØ»~cA¥[A†øAòÄœ¶µ+§Ï¹lj2ÏÜ‘Á`0z%N/-ÿ†Çx`fÒÕòæ6£ÙZÁš ÄR @u$’AâQGaÉb2aªR÷ÁWë+‹ö]:kÊùc¬± ƒ®%¥Á9ºN( ÌqFB‚ÈŒa‚—½á•_× kúȱ\æVÎÀ#dFõ0ÑŠ(ÇàŽì×íØáÔ·­Nª«¬"”0ltœ@ÕÛ…sFOJ4Ó@ã¡â‚º²÷3ñc¦ýp…34Èj'^xñØöÔ©êÜûÕÚa@†…­eçò•áô±3/Çù*w~³yÅ—tÁù£b¸î/áÀáu_l¯yƼ!±´õÐŽo7¸€yvÒƒÁ`ô^œšY@`G9`qéâÛß”pöøBmH$€Mˆr‘P$àFMM’×Ïa¤êŠÅ`°Ì9P]úÜKÖ¯_?yÌÈø»Ýj1 <Ï! išªªáH8 ×;[·î-ú®àM5%ÛÕ ß_^¬c#^ɯZ~JU@áöS%Nh‡“ê®mÅIcb©?xØcÉΚnB fdgÔo*ã¬< j²jP¡`Þh³G_HCUßm)‰¤M>ol’áÊ]ûÛĬsfŽÉÀ:üôi-u+÷4æ¨ëîÒTkua­f=uÜÀ8À>v´¾÷µWdŽÈ`0}§ ` c‰â¤0’Z]‘èABF-îÆòÝ¥¨­%𫪆" SAW|µõ†ËH”»u·Ç%`ÄcŒ£+F:F”RUÓÇT}iùÒ2S²&4ÅР €(â ÌKÈ‘Äe¶õLô}ê;zÐå{† S¥µÎ ŽI¦Žu%J )6FB„Џ£†3º§èë¯Kqî93‡Ø9Í[ëÔQü€ÄÎŽ‹qýâС¦¿Âu 5û€KJ±¶¿AS²â¡ÚË‘Á`0ú N*†0 ¬§b^Ã<0P] µá†ŠÀ=á¦Jh#„!Šª¢q zhqqqC†©(Úòµéá ªé@)àçxŽ,±±Ä`:Üè$šöºE“ÑŠuÄ#L@Z¤Íës\|ÈgÊE$žtÑ'ÚõˆÙã@Õ&XD 4è ƒ”jî4UÜ-AlÍ2bª}š”&uªÔoûrsS\Þü‰ÑE%ªET õkÞzåèHÞˆÞåĈ¼Ôe7y`âÄ`0}'èÜk‹PT*5…k®VölÊ‹RÒâìV‘ã8ÌaŒüþ€Û㊳K¤IøüF£4!üØìþá6— êªF)Á8Žã KÑ[íò­Ý²Ía“íF›Ý,J’Àñš®q…ìEoEÕI§h¹+ËÛ¨a ‘ áæÚ€)ÙѬG|¥ß¬.T1gTl‡ÁoÇO™39Yè’6Â’Eðw©Mà@Wô#¹Õ#a¹!ƒÁ`ôQœÚ{ò΃0 øÜÃEBåÁQ¹ýcÍBCue³ÛÍóBŒÃ!Buñ¾A…‚J0°}û®o¿Ý0qD®ÕKÍJôö·=a G8¡ÆéYýÍúuß~‰èjÀ_u°Øl1%&$Á`0hµZ’’“ÄÆÇĵ*ZöíL´ÆQARu—9üüø`É*ªm ¬ÆøX¡ª±¦%#7µT–TFìäÕu-ÜZ³¿LM™Ú>á§6ïúj}µå´¹§4yoKç ]^lâh7"‰´ùˆÅÄs¸ÛKH‹7Ã!ws€¤Ç` á†Ã-l͉Á`0Ž‚[¼xq—ÙÓÌÄÙ,ˆËÓ¦r¦$iJYQ¤¼0QÔrSã÷îÞyøÐáæ–§ÓÙÜÜìñx"‘H P" ¥àõùvïÙÓÔâò…#Uê‹èN¸®µí@eí×›¶¿ûéòϾüÚå  Á`0 ú|¾æ¦¦§ÓÓæiiq†BAI”l1N2¸êš¬iÈbWyIÇ_[0¨‘#{–Ú_UˆBˆb1[ ÊÊ2Ô¥q› Ëb¦œ¹ãü¶¤ˆ!JõöáÓµ#{Ubn(ØzPë/éoãŽqhð‹BjýÚwW”)ß¿)fܧÅ@¨~ÿ–í…Í€·$ ž7aDŠéöñWlûfKqƒ_GÆ„ì±ùýj¿XÝœ;ïâü¶Ï‰Á`0:f§z'aG zŽ5 ¡€Hu£høZÃáPôp<@@ ¨ä¨«:¥Uohu5:]F€PéXÑúþy²I((Їˆ®%‘ã¨ês ‘ èxen×/õ¦,†äá£{ vlmë?°_J‚ÝÄ# jÐçqµ4Ö×¹„œüÑÉbtOHyõõ3»K˘:jÆÜQÇTõn/!Cê¨Yç¢ÂHýª`´9æ‹ ƒÑ[q2óXÑI0¢bªGäPL4 ¦#PU•Ú@`Œm6Ûà¬,蚪I¢¨iš¢(qqquuu„¸øøp$\^^ †::ï®2E@×uMÓ(!"‡롯*µ_Þgû0'†GÖØ‰qu••{+ŠT@°h‰‰OÌ“d²c‹ˆ¿ä‹÷×·¦Ž6v`¬¤ºËwnoáÒ¦fšØAI ƒÑkqkÜÙhmó! :Æ! QŠ(íˆCè錄Ãá˜4yrLLL$QÅçóB²²²Ö¬Y“˜˜8hР††Ç Öµ'Òñv§Îô(¥„J  bP4ˆŽ¢Oƽ˜ÃзrcÉ‘‘ëÈÈ¢«ªF'<þ/¶äÌ:7¼i[áÚe;uÞ’80ÿ¼ül [ob0ŒÞ‹ÓeY±Ûšƒm¡0 ˆŒ€D÷)uªÏ÷λ; ]×].×¾}û.¸à£Ñ¸víÚúúúìììÚÚÚ¢¢¢)S¦êêj“Ñ„º*Ü‘Å'D»ÐùÞ\Ú¾·©}¤…_>(öDÕ‚¤ÿê”2¤Žž9o4s=ƒÁè¡oîùòõ9±#bññ0F) ÔõݵÝ&ÆH„˜˜˜¢¢"Ç“–––””#FŒHLLT¥  `Ãúõ••PûÂU޼ç0aÑÙ¿·?·ýÑ£b ×çÆ²ºd0Œ_‹8IZuö€Ñq@(EG( €hôŸƒèÅjÍ’[RRâñxZE©ªªòz½@ ''göìÙyãó(PŒqW©k“¥@ %”P ’ ©S8-Ö¸âÌþ"fk6 ƒñËáøÑzi&aÛƒ_>Ðú^¹§ØÒ:"·£Ú€1†nÞ›N)å8Þbµ¤¤¦655©ª …"‘ÆØívûý~„PBB‚ :!½puÔT!íŒÌk‡“Eà†Ä˜.Ër\ŸË”‰Á`0~a ;Û›Á`0ŒŸ%Æ`0 &N ƒÁ`0qb0 'ƒÁ`0˜81 ƒ‰ƒÁ`0Lœ ƒÁĉÁ`0 &N ƒÁ`0qb0 'ƒÁ`0˜81 ƒ‰ƒÁ`0Lœ ƒÁĉÁ`0 &N ƒÁ`âÄ`0 'ƒÁ`0˜81 ƒ‰ƒÁ`0Lœ ƒÁĉÁ`0 &N ƒÁ`âÄ`0 Æÿž™àç‰ÞòÍs‹_Û\ZVãÖ†<°ê­ù‰ø˜2[1Œ_¸8ùæU+¾úvÛÞÒÊzO¤=SlbJz¿ƒwÖ¼3r¬¿°Þ‹^ùÝͯW¾þo/üv¨ "Ùâ¦ßó÷é¤éã«f?q°Çí*ÞøåŸnxx-ÌzøÕ?™È1ƒüQÿûö;ßÚYï'혦üyé_ÎéZÛ¤uÝ⛞\[æêè¶ìý§ÝùÒc³S˜GüìʼnK?}ò§WUjÒ _vïí£§ÆYÕï¬++Ú¹~õŠUŸnY³ÆOËÉaŽ_¾òÓ}~{>ù²òÊ¡C%æ3?¯ßm{>ü²NXõÁž;få@Ì$¿<ĬEÿø|u¯¾vÖ{‚žxbµÜUžpÜŒG>˜ñˆZõæe¾™ýU噘ÝNqz5Ì¡ÂWn¸òñU•$ûª,_òô­—œ9nÈ€´Ä„Ä´Cóθè¦G^]¾êá±&ªéôçmÒôÙugÞ²Áßå#)kîåùɶÔÉWÌÈ”égŠwÕ…ÙGöEW³£ÿ†ÃüOÓùŸ5“S YÄq ç²³;ä©YÿÁ=˜C€8þ—_G¿q¢þ=/Þûê ùýõæ¼ØcŽƒ9ÇмTN#?óQRŲ·öx¾WCö•/­X÷ùó—eNêOz-!ÌzŸöû%kÖ,¹oJ,þï8Ìÿ0ÿa39E’E¶17=|aòô+©£_‡8é «žÿ¸RÞ|fR·3´Bê´ËŽ‹ÿYÏàö¾óQõá9š»pÙÓן¿à‰~Ê\9ÌÍñNéfr²’Ŷ±7/ž—p²åéçUG¿Ž;¾Õê×}\ 4óÌ=-'Iç\>ð{¿5š·/}í?Ÿ}·»¬USòм3\sÍyCmˆ{ÝÝóï]ïtû‡/9¼üãÏWoØQæÒùøçÝôà]ç4¢pÑß~w÷Û-Q³¤äŒ™÷§'®É–”Ãoÿñ‘w¬m#`|Ö­Ï,ž›. –­yã_ï~±µ¸) %äŒ?ûò›®;'Ë„ô¦Uþßã««Ã0ø®Ï߾߷êý>YµÉ9ùéGÒ–=ûöºC^ØvÇ´±Ù÷~þBÚ;¿æË½µ^ }ïò·¦p½Ëö‘R¤n㻯¼½rÓ¾* ÚR3“”²CN”}Í«wµô0Ñ9ôîcýrß¡òZw°5}ÔôKn¹máHûIè Ën¹øÑíA€Ì—¼<¹jÅÒÏW¯ß~Ø¥‹ Ãθêî»/ ›V~¶rÍúmûkýTHüÝãçȲnÒ{þðè­ßÞ3E–e9êô3ç^ºèÆßý¦Ë²,Ëòøó/:+?æœKÝø»›;JôÓKߪTº˜þá™íE nóòk'œ~φ6Ò½szs¡,˲|Ö5÷?ñâ¿þýÖ›ÿ|âÖ¹y²,Ë“nXRé¹² ‚½v0Ji¸ð©³dYžrß¶@g{á'TkZ³ø|Y–§^÷Š]eõ5‡önúüŸwœ)O¹g[€jþc;ŒÞºö÷3dyú-olüÖUãdYλfivÜÊꥃKœzã'ZÝÒë&Ȳ<ïïÅ]E¨Uo]<ëžö”Žá0jõ;˲|Y—JÒ>¾aÑ[U=¨É1¯÷EÖœ¼LðîiT{LÐzúóKî8ºŽ±1"x}ÚI=#Ô¥ÙçÎîA§Gí4vŒ=oŒÜÕî#—².˜› žuK÷ù»L+mø¤Ø>sÁps_³! ZpÛy1úÞ¾®Óz®¬ã`½ñ“HÉ’×w«€F-Zcì: ž<጑ÉR·MAm-kJh§ñ¸¤óŸyvAZÄ~L{mÞ“Vƒ½0éI³L¯×žìnûÓl7<ùÄWݬ=l;ŸäbÃóûÒËýÂ"H{t8B¨÷â¤ÔmÝí€ÔQGG¸ I#sÍP±±È×}Ü 2X$ÐUÒaiœ0íÊi&(zoE•Ò‘µ–õïï’¦]‘ïÀjãîB/ÄgÅ ]z[Š´æjO7+£ÈûÚ©€àîWnž=cÁ­O½ó]™÷~„œütlóžœìIO–eú(O“îxèl{Tž?†{Ö°D#†Àæ;fjú„N¿$OÜ·UÙöÑ×ôsãIýºÏ*3ç=ÙïgIo"A…üdÖ ?‰(Áhú¡ñO\âY¿#½þÜß—¬¯ WmþèùÍ=ŸzÆýÏ>tá྄Rý-è˜æ=95Ø›¦w’,ÓWuL¾ó¡37ßµÚÜøäã_¾í§¶óO]ÌS­—ûŽœ°mØé”ƒÛj"½N5: šòý##¨VD³Ø×šúŸwÅ( øöM­$|èÓÏ›†^vnæ‘ÊM"’âRÓ¾Oj¢õ¿Æ­ØÞÓÌ^þËǼ: U¯yñ¹ °N¿ýê!=ü4UJßûç†XÎüý=sG$OÝã ±cÂ%“Mú®¥›œD­ýzymÖ…gœð2moz‹„2ë…Ÿ`ƒÕñ÷}£4’Ò§ßôÜÒoV/yîîKòS þë'oy~O /}ÜOÒ‚ŽiÞ“Rƒ½kz'Å2}wÑØ)w?8ËÁO>þ½}O?…Úbžr½ÜÿNœ€O;㢚¿^y(ÜÃzÝû×/z£<ú;$~P<€¯É÷½´ÖV×(q`lŸ%'NÿÍé&uû»k|¸Ú?öŠi]v·Ïö‚«¢U=U¬›0ëϯÝ6Rð®úÊãóòò¦Ìÿãë7üuÉcg÷´õ‚øÊ÷5@êÈ~ÆSÜmÌÅ3ìtßÒo«+V¯h²`Zò ϯGZ«= ¦ tôØÊ~ŒƒõÆO„øÁñÍ%Í'êKBlöé—ÜýâGïß:œp~ýYI¸/3?I :¦yOB ö­éý8ËœyÝ””ãLšüôÁ'Ó°‹ÎI€ï¿ñÆç® 'ÇŸð@/\¹ù€‡NÏî9RìÇ8XoüDH{šZ¾]]Ö§.E©\öê×]J‘aÐù—€'Ü—1ØOÒ‚º3oO5¨{«‹Ö{Î{oLz¢–éUŽÛÍÅM»÷3,ÿ;¯˜ˆ8-¬žPÛ>{¹ÿ82ø¿çîg­à©Ûž[ß|L}¢þÒÍÅm ´ÏÜš†]zQ?€ð–÷74q+êÛûþÇÕÒÄ«Î<±™ÓЋd@[qµíœKFímæÍŒ€ê7ŸZZî{Õ#Á(ñ4ùOÊ &õmæÚ?|ÙìÈq×6y|Á ÏÕT__ßÐì öSƒÍ)if¨Þ\Øy˜—æužôò“ƒaðÜóÓ zÕ×mòÅ'ªMĵå¯Zl³MMÀÇSÄw°Þø‰qèå— €Æw¿¼Í¥÷Úaˆ·hùk«ª»tº¿Ù)#’ž8ÞÉoA=˜·»$®u\0ÿª+æ,xr§öÛ^˜ôx–9fë;fNPžfÜû‡iæv¤ÞvÇ+&gIŽç lÝn× õ2?¶—ûÑ;7 ºôù% /=øèûÞ9wçÌË/™=mlnz¼UÐ|ÎÚŠ›×~þéºÃAÈî\V³®zòŽ‚ßþuË÷?­Ý{ÙÄLC zÇ'/>ù‰ÎþÓÏL8ÁLèwþ#_}|°`þàï-Ú ‹|ËCsvÜùyã®g/Y¸yÞ9ãƉšßÕ\WÕh¿üá[GgŠLH1̕ޒ~ÿdÛ¬LäÓͽbjÒ o(Ô}øÀ¢¿Ñš}îͺo~Žù˜ÉsçLµ¯^Õ¶õÁëîÞ3­¿à©,زßÇ!ÐBA‚x*S"öŸ=?ëî± IDATõƒõô…cúô;Ó_SRVÇÇ›Eâ­Úöѳ­óCÆ‚GïÊ?þùá?ÂÁzå'âÀ+Ÿ¼wï5Oo.çæs¿wæÔÑÌrúõ>p¶iÇó÷ÿe宪6 ÆÔ‘çÞþÔ}3º6Dêùöî9÷»ïXöÚ¼d|¬_.V¼ñÚGkvh €“9lìÔ³/¼dÎØ$‘´¬{úþÿoï¼£¢:¾8~·Kï ¨4Q°,"¢ˆ E[ìšÄØvý…hÀn‚-vEQQB) Š(¢ÔX`©»l}ï÷‡ .X¢q>‡s8çqyïÍyï;sg澿Ÿ2¹4­>ƃ\·yÎéÛÜäÅ•÷ÿܸÛ?«Hʃì§ÍT|Ý/>;Ÿõ¦=Ëiöî7r‰‡sΡ=ÒÝ6ψ8äñ¿ÀüöûHÊ.Gnï²mÿmŽ5d]óÚ}6:¯ˆJVÎ3~úyTÉÆI»²4ǽ¶†wÆóìƒì×¥õ•õŒMG,X9äéñ m.ñÜàøÞV}ÿÀ–?Ã3Jh:&Ç{ìùEóž×o¾IYŒ ÈjF,ðÜêXufÛþð'¯Ê¹ =»¬Ý³qtëØV¸x’w©ë™àƒ¥ùNV}wñ8Ï, iõTkdVr1 j˜Ø¸Ìùu‰‹‰Äª¬6°woØçû]ܨj›[¯“vÒ\!âšÌ ¾·c½,ã` «fhf5jâŒ9“©“Ûi0î+V:5ô{ü"·´V…–£¦,Z6wd§‘Ü÷γv¦¥QÊv˽ïýK»5È{å»fÙÑ ™Q[Ní›Ö‹ÒùÝvîRqU’ï‰N=ÓÞi©¹íÝÀ{±´Ü k×]H-{';4-s×½§6¼ÿžÆXÿx¸ïÈ}:t×ZRGuÔ¦&?XLÀØ©gwïõK`pªi2Äf´Ë” †V,¸P  îèå3.ý€O§-_ªÖûíƒ#>¢ªf9/ôÍm•óJÌc=>»È†N§ðHæü' ŠÕÅop˜xð¹´¹½Þå×™TñÙó­¼K_´5•ó½´»n¸·«5ˆ@üÛ鋱‚q}Ã:ÿj‡5î­74e4‡LŸkAàÕ5ü'&5±š´€ )S»ºõVÔôù7 âo–•|huúéÝÛíD >Hœ>Ÿ6GdadS;£÷]¢ú²:@ËLï¿!•„_Ì4˜9Ñ Ëknñ/0¡‹ñêy ÑWƒò½5A©ÝÛýD 8}{®¥Ò¨¢ªâ÷2ñ`줳—ò€`6Ë͈úM–M̼¹rcä›åFÆmïSöKÇ꾪\¿+™âÚÜ@ƒ¿í/a~o5ˆøî!#|.Hºc:žÛã³þX>ÍÖDW‰"j¬.~™éñvfƒáÔ½‡~üv{ªXÍÃãÏÖ ¢ä…_,µ?è=T±+3±øç:áÂÊ”Àè=û—œ¿üh#—MÿFûÝ+½îýÈD >3]Z­‡è*¢êŒ ‹~!÷Ós˜ov iZ½Í­ì'¸Ípø ‡™°Ê9ÛãÙT+÷µ;W:KÿÍA®ïÚ5§R+Þ®ö”Ó4¾x¿×týOê ŒõÏæE¿ÇVªe9áç-맘оW¯”îý˜D 8!â»Í9!‰@ HœÄ @ $N@â„@ '@ qB âßæó¤/Âëßé™]Ò 1Ã7xSÿï0Û±˜ã³Ç/éifa=@ßuAWæôü޳E‰+c{žMz•W\#è·-Ìwšê}.âb÷nzù<½¦@mÅÂË>=>æ—äœ^‘œú°´˜ ÐßéÒ£a=P†XÄ·)N%«•>—§n¿<šû9oç>?½rŹBã_Žù,1ÿÊÔ´×ÿé°0|ÉØ™¨¥‘46wÀ*nÍŸ¸çå·*°¥×æ¹]µñ½µÚô‹çúÁ…Õµ¹©Ìü—5eÅ\ "Y^SA«¯Fß¡z¦fò’Ÿ™Síà•:"cÖ‘‚OPw=M—›Î¾êë>¿´ÿ.*ÈØjžÑWBÕ„õö$DF—ædUW×ã@PT4´î=úW›©®4ÔiAÀ7ŸøUz;³ƒŒÀ ç™›£Ñ >§6•Ý¿“¯5Ƴ÷V&Qëñ­ìä Ž¬¡®Ù`CÇIŠÊJT*44Õ0ØJb|rc{è^ØßBŸüÕe°ï.ŒÈàµ>ÈgÇýù4Y×Äíw;ëáêªdAYò ÿm.ÜËüg‰»Ï 5”ñ‹“ŒÑ”9Ãã/ÍÜGB™°Šà¥ó¢çµSøÀ ¤·D|ïˆÊâïäiñ4ü‚Ú„‹Ù³B.—rûŽÙnÛ¯¥•ö¨ÑÔ ÔìŒìű§³îy×Õ¬±Õï«úP·˜q4øD"ÖÎ_(4çS®KíÞÆ õL'-žRvîÎq×å;&È¡éß;ßúZÖdÞÑ»1!ýh$+ÑW+òͨŤéÕIm‰ø€‹xünW¶¨,.$_{̸/§M¸¨""íÊ¥2êXëÌÍÛ(S Y}ýñ›‡Z¨5¦Ÿzöªî+ÊäÌ–ìåYoé,ßÎßä5†·êËZZ8÷~Êùò&ÔX_@œ„/Üâ<õ»ÉøÄ–ˆo}ØS“´ÿ—Iî{5vïå-bÆÝ)Ðûå´ ¯˜ÄVgí6ECþC*AQsÔ"}y^E|Dè_~›¯òks9?¹þìøÞ÷ž ·çÎqÒi}cy=u^I=u†õø¯ÎmÜr9‰Ñø¦c£ÕgÀèe»=4ÛyPø/ÏmõÍzÍ` AÍhÔÜÝ{ç¿»„ Ô½Š мŸöªZHTë7þçí›ÝÌä‰ÐôìÈò ~ÏÞ|d4mfïÞ3߈ŠÕ¥üµù`DfAµÈZS·Ú8R­íU1vâÁMþyZR˜lºsy†.‘“uñw¯Ë1¯ë eÝh+€wjÃÆ;·°RÎ^ ¾ŸžW-šŽ¹µ³ûâÅ?˜+µº )ÍZ#, ܾâ`L©@N×rÒ–C#Tß3—­šùG*@ÿ׫'GÝ ‰ŒOÍe‹©šýç{xÌ4‡‚ÄÐàШø”g%8Ek𴵞kƶþ*ÆÍ‹:êJxò‹ .€Œ¦é°ñs–ÿìbÔùÒ\ÀL¸xì\pÂóŠ& P5tõÍFý²sÕˆæ:èVÁ;ÂvzB¬&ÆcÚ¦ø€¾ko’{çVHdBZ[LÖ°øaùÎ “úH„p~éƒ+§/‡&fÕŠ¨J=ôµy¯«&‹Ïl°RèÖ«ZÄŒ»S¨=n·„6aœüØ«oD>|VP# É*©jêX¸nÞ1£µ 6í‡ÃXEa~•b#óÉ“Ôd$îX\Ë~™Ÿ•YSY%Ä@V×mï`}YjŸÞƒuÒJ*§¨èJœœ, ™/oÿý4öNa^¹˜¨¥ã´sª¥:4"`¥Ï6™ÜyÂ0öoÅ,ec §h2aà^ÃGQ:v—˜y&hóÚœ2>€¼rÿãwýÕW™ØÒˆ²ÿº!²:´«—ÂÉöÔ”ôÞ‰q±@@—‘Ak"¶“%GvÑI@sØëýä¦feä_ݼ|W$ëí'ÈeÌ–¾äï3C‡b¾Å÷ÊáùÆ-GyØÎ¿ù¥U+ ³·Ö§bìì°}«¥7âr«/œ¬ îþ÷ík‡æQ€¨²àjáOHTu<îmK€¼ókÝíŸQ§d:Ì~°QTõ,h×ê¿_´L¸ó^_^6}݉(¾ý¶Ëñ 1!Ç~6c¿®ÂA}Ú–E»÷Ö1cïéŒÛ¬MbVôîÙ36ŸŠÇíÖ»q'4ÈÿÌþuSzW=Ž{T%ê‚M.áçÞÌ)Æý¨¯B–¨Ð'/®íHŽÍàËêXŒÐu¥æG$«o. lfëÈË/dÓ‚”Œ ™ÞN&ƒMÈ«Ê{~È…N§ÿx­DÔ|ŒŸstÒðyÌwG8)›íètú¸}Y¼fAÑ•yVt:Ý~{§õ©èN¿?æH^·"hÑ0Ç©¸»Ð…t:>ûzóeù¯O¦ÓéC—Ç7| híZòsþžF§ÓG¬ g‰›b öŒ§Óéà .uŠǪÃÑétú ¿bá›;®IÚãj7ûØ“zñ‡\ÏMÿÍN§;z¦s›¯ÀÉ:>N§Óí6&Ö·x}»C««à8Þ”uh"NwÙ÷¤Ù­‚¼ÓSétúðe¡¬/.døÍ¤Óé?Þ`6›ˆÊn-]ä[$ìRÁ;¨¯ò€9t:>÷V…¸ko¯9aœ,ŸÉt:>¾ù ¸ìö’¡t:}ÖeF³'°š˜µ¶t:Ýagï îC'Åç‘Ò€ŸmètúÔã/¸’fE¾3ÇlLáHmÓ"æë‹KCž(ojyâ°Æ'OϬNˆÍà1ÇqQIι¥¡'ÎW p ¯%>8¼4**÷mMŸyÀ‰ä5}uiËȯ°ör"yMž[øö®ÄýœH^?ûÔ´T—uÝÆÛ‰ä5ÖîqÙ;?Ö\¹èDòr²L.}sI1'mʼnICb³j°6÷± þê½ï—Ý<ÇqqÉþS­þ±£Rç§þBòr"ýí›%ˆ÷#‰ªCݬ©Ü”à̼¹wz7’9A±Ìw]+Aq̽*c×Ú­¥ôédLïíÉdŒ\§è@mL@fcK/‘•øBÙÉ}€ü—knÖÕErÃg”_,gN7¤]+Joö^W€÷ÚÏcûC«?Ž-ëB^b΀@3™0ѸUµB¼¥Ž¬~" 5ŒšwÕÁ}~ýn9€æè‰f´ïÛ U¤‡fu8ó"¬Î«kùD2I{ÒCîzdè~Á?Úáí;†fäl¯5%oË7¼ˆÌÄ4‡ Ñ"·ÌǘZë@}VF¹°;BXr/´¨Çø±o)üœ«çÒ…@¸ÈÝTr:…¬cãl©ó& 'MGc×ê‡Ål 9¨Ëš' Êïù–©Ì D{³Z\PQS lL“ˆÌˆd".îd†ª2ÌM8 ¼N&ßä4'í·T'=¾ŸßÞépaþá[^‘[Ãìû«´. ÖôhcH¤æ¨mZ]ÙÛ!.¾œž 8ÃɵZHŽjAAÅjªµL+ujzSMÈ ¹Wúæµ ,‹b»ŽøÀ¶EŽ ‰ß=T7hLH¯{P\;GcüTS¹/ëAirz=ôØ«ud¢mi&ž7`Òš½7po÷ªs¤åÇ6RëvL(£ ÓÆ@ ÊSao ËÓ³ê@ÃHCb3?YIWD,F­¸£îƒj/UxuÜó\J…[iŠª4RüóÉÞas’U±ðm¹1Ne-r*²DI+eY€¦š¦îL´ KbB‹zŒ£ÿV›% ‰,è5¬›˜,Õ䧃ëÊIiÓQ‘[ð´ (ª&úͺƒUF¿Ì#õn%÷î\âú\2:z’sB¸ A@–ït6E c® ‘³01¡@Õ£Ì÷U «º¶Å›¸8lÜpí6×ÂkâößTYry¨AWbsâÂgÇ÷WC[|Œ”Ñ„B:q‚}Úp9 u⾊'LزÐòC¢B•ÅGT»Úv'% YÏy†% )) íͺnQY\Pžî¤É}¿ô¶ZQUn€‚–b›ÞYYO ª°Z$­Y«ç¹!óÔš­Uœ‚̦ϿÚWXèÈöžhmՌ۱_ÔÑ-{NüÉ^€›~zÅDG÷ÕûüîçÕc]ôÏ'w¸ÔíYVI€WÏÃ$gµj›ä5ºÑ<…%1¡Åz-Ú¢Ê×,•^*.(’Ʀš,m æ—;¿öIR“¬…^ËHUð€ ¯§!é5¬‘Á²‚®ê§x»Ë(ôÔ¼:¯©uGoLŽß6û9»¾*§Pئaå9G–¼Òß7i²IF?xCÙÕáäÞ+ÿ7LmpBH-N@PE"d¨‚Fq+ÿ—<ß?-3£pþù5ñe£ä 4ÄþöhȘM?©J¯È8·*xÆUß\Ãe î®ædôNFtEœ€ h9m¤@SrpfCýÓÀdªãdSE=§©fP^Èe%…—ôu¡ÙÍ.QÕfÖH€øq@b&,‰¾Sb俬÷å[*ñMx Dq›7".ä ¨òT‚´f4æÊÍ=qîè–a2Ì+\Í|æ’PiT‚Œz½¶ôÐRì̵™žËÄF^=ì1k¸.0£÷®ú+ƒƒw£àŸÆáÒÛpꯣå²O¼•]/ÀšQG'ðAÑaíÂ~]‰ ‹£ÃKô\$´ ˆ²Š²üÆŽ·ôJcÓ¬5ñùTe2±yÜWÄnM‰AÖXÏâIWE²EeÌ—U“1šŸ&Ç$.jâÈ(µN‰TW)»þGŸÐñt9€Â”C±š[²èÕÓËwyܨÐ9²ÞÎäæŸ½ ¶V<¿7_ÎÛYéÂÍW-Á9¬Ûn—Ž¿è»1eº› š]'‹)vJMÉ·n¥Òœ0’’Žý4 `†ßI‰ -6šl«ÙípAiÈLGe<3 ŽQy·¢ŸûhaZ”¬ÑW ¡¢¡Í¤Œ¨®´Z}Ô(ÒšI ÿÓoóLtÆmZfNÈ;ã}»Xô9 òvêØÕÂîžBÍdÔ,#7ýW TEçðºQðOãð®ŒÂÇì>»Æ’R¿žã0kkk»i;—þyÕk¼V×[”€^ÒÓÅYB›€¢a¬ÀÊauè[il:R§79] -JÄäå%öþˆXìZECùÏàü‚;Œ&SÓᦟ(a8¯±´ Ⱥýi­z2Æ#7nЖ×7_ù». ðð°Üw› pìÃBÌÞMvá5%~.—ÎTXz¥LgŒÆLˆî‰È˜j¯ ÐtÿÀþD¥±úP€¨eç6˜PqkßÉÜ>£M@ë?ÝE ²ýÏŸa[Ì©ñoL‹R{ØÐU€ù”Ñ:ƒŠ°üÉKÙ›+¤5“|YѨ ¸o]  Îü}í~Ѧmõe9Œ´À#{«@süo;ƾ•`ªtfíI¢"}ÙfÇÈM1õ‘{Nº ÙBWø,‘v‚}Õÿ&§­)|hÖŒ¤©.Cû¨SElViQ¹òœßW[v¼˜¹)÷¤Ç.ʚَ溲¼ÒLJO惒㯮†”*xGÕþ)OH ËQp(½±mѶT4™°â·ÍÓLå‰À/N|ÜÀJI­Z)¶¿òFPQÖkr[m ö™·wÓÓÅû“òýVLˆ:Ö~PMy2ˆ¸5¥…•½î˜o"#•M»EWî­ eÌŠr޾’"€¤d$õÅãN^ÇyMD½‰ƒHOŸŠó¯e„‹jêT{9oÐûãéqò* óIjJ$¼†ýøï¨Ã|0²õpŸNžD{ûUS³¿Í‹]ÿƒÝøÊR^JTt*6šãÞªÞ÷Ú3Ð^•·ÈÕ §¾oHžžžR ™ŠjùÝ&·-‹‡¨›cêò/oF+-Þ4ËL¾¥‹+cö­ö8ñ-à>¿™ÌP:˜”ubÝš}·r›0€Ê‡Q‰EJÖv¦Ù؈ŠÚćׄ.[Ö8ë~(rŽUÅX·ÉûX`ªƒ‚£’‹”‡Ž0RÑ`Øø45‹ÉÈÎxQ&Ö°eg¡Ýî®GJ{–²Tu˱«p_ÅÝ8{òÌÅ+7BR+Õm¦.ÛåµÔ¦å;3ÊÍ…×·.Û~1» ¨I‰|Xoæ4\Og'ôôͪp³ïÞº—¯dc߯m0«¾¿DZøJ?':<.Ý{Ô Qø®uÛåóp€ÊÄðÈä|¹!¶zù'=Ö{_L®ðrb¢Ò*tlm i ÈØý`§+f³*ò3¥¦$%¦½¬+›Ú»Ï¡¯@ê(Å5‰,®¯e>‰¾qé¢ïe?ÿà”J‡y[÷¬ux«ïR¼=Ä•±‡6í8p2¤€T%Þ ‹K)VjÓ[žøÁâõެ^s$±Z²9õ§ü¹vá» >0ã#“+´mGôVÖ¶²3¨M‹Ë®y¿+/¨~ýà^…ÅT‡^2]üitj…ò¨•Üt äù{_bOÚ´ØJõ=’jÿ±®6ÚâÚjVþ³ŒôÇ©)ÉO^³øJ&£§»Ùô|3¤‘Ʀ½h™Æ/š]ÇÑêin@!AFGAœSά%ÈêÙ.¢; W¾*~Î‘Šºj¿Ic'꨷ÞÛ$fæxë|GДö2.Š­èhÔ‡Ä^Ÿ°qÊ>Ýc·6 ï"º‡¸:ñÏ•žÏ]Nžœ/±KãW>ñÛ¼âïL¡¬ÃñÈÃh_uðúò;ÿKÏ£èMñ´ì-ÿ™ßÑ'r¤ÏþT0=½êØbE$ˆ¯…¯jäŒÕ¤dhL™îÀÏ'IDATjŒ” Ñ=„ŒëÖùW;¬qo½›(£9dú\ ¯®Aøµ—‚ ¤í0S‹ÚPv¡¸æKÝ­ˆ#F=$Ní?%á3 fN4  zAtO›Š#²0²©ÑûC#Q}Y e¦'÷õ„ h3pêeQVÖUŸW…¬›Á„•)/d—ò>ö’_Sø·?Ó.fÞ\ó§ò®}cÕˆ `Üö>Ua`¬Êúˆèn_‹J£ˆªŠëĠЪa줳—ò€`6Ë͈ú-…@é1Éfžvv˜_îí­Åz# -èš= iD\ÄáÕ2ë˜/Y9)¬&ƒ>¿ôÖC™@ qú¤`5<[3ˆ’~!°Ôþ ÷PõFt’ŽçvÄø¬ÿ–O³5ÑU¢ˆ«‹_¦Dú_¼Ù`8uï¡¿9¤6lÀKƒ¼øÂÌÔ‚ˆ9’cMUƒºV+Í uÈ÷Èàm~#_GûÿWD`•s¶Ç³¨:Vîkw®tÖ£¢:A|¢êŒ ‹~!÷Ós˜o’NiZ½Í­ì'¸Íp¨ñ ‡Œ1qSŸ'À 2MY†ú) ‚¬‡;&ŧ¿Ý†%ÛCkèÖiÛ—©¡l ˆï]œh´Ï @ Hœ@â„@ $N@ qB '@ 8!‰@ Hœ@â„@ ˆ¯ÿõv¥,×7Ø;IEND®B`‚pytest-2.5.1/doc/en/changelog.txt0000664000175000017500000000014312254002202016261 0ustar hpkhpk00000000000000 .. _changelog: Changelog history ================================= .. include:: ../../CHANGELOG pytest-2.5.1/doc/en/Makefile0000664000175000017500000001275212254002202015242 0ustar hpkhpk00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . SITETARGET=latest .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest regen: PYTHONDONTWRITEBYTECODE=1 COLUMNS=76 regendoc --update *.txt */*.txt help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* install: html rsync -avz _build/html/ pytest.org:/www/pytest.org/$(SITETARGET) installpdf: latexpdf @scp $(BUILDDIR)/latex/pytest.pdf pytest.org:/www/pytest.org/$(SITETARGET) installall: clean install installpdf @echo "done" html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pytest.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pytest.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/pytest" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pytest" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." make -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." texinfo: mkdir -p $(BUILDDIR)/texinfo $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: mkdir -p $(BUILDDIR)/texinfo $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." pytest-2.5.1/doc/en/attic_fixtures.txt0000664000175000017500000001552312254002202017377 0ustar hpkhpk00000000000000 **Test classes, modules or whole projects can make use of one or more fixtures**. All required fixture functions will execute before a test from the specifying context executes. As You can use this to make tests operate from a pre-initialized directory or with certain environment variables or with pre-configured global application settings. For example, the Django_ project requires database initialization to be able to import from and use its model objects. For that, the `pytest-django`_ plugin provides fixtures which your project can then easily depend or extend on, simply by referencing the name of the particular fixture. Fixture functions have limited visilibity which depends on where they are defined. If they are defined on a test class, only its test methods may use it. A fixture defined in a module can only be used from that test module. A fixture defined in a conftest.py file can only be used by the tests below the directory of that file. Lastly, plugins can define fixtures which are available across all projects. Python, Java and many other languages support a so called xUnit_ style for providing a fixed state, `test fixtures`_, for running tests. It typically involves calling a autouse function ahead and a teardown function after test execute. In 2005 pytest introduced a scope-specific model of automatically detecting and calling autouse and teardown functions on a per-module, class or function basis. The Python unittest package and nose have subsequently incorporated them. This model remains supported by pytest as :ref:`classic xunit`. One property of xunit fixture functions is that they work implicitely by preparing global state or setting attributes on TestCase objects. By contrast, pytest provides :ref:`funcargs` which allow to dependency-inject application test state into test functions or methods as function arguments. If your application is sufficiently modular or if you are creating a new project, we recommend you now rather head over to :ref:`funcargs` instead because many pytest users agree that using this paradigm leads to better application and test organisation. However, not all programs and frameworks work and can be tested in a fully modular way. They rather require preparation of global state like database autouse on which further fixtures like preparing application specific tables or wrapping tests in transactions can take place. For those needs, pytest-2.3 now supports new **fixture functions** which come with a ton of improvements over classic xunit fixture writing. Fixture functions: - allow to separate different autouse concerns into multiple modular functions - can receive and fully interoperate with :ref:`funcargs `, - are called multiple times if its funcargs are parametrized, - don't need to be defined directly in your test classes or modules, they can also be defined in a plugin or :ref:`conftest.py ` files and get called - are called on a per-session, per-module, per-class or per-function basis by means of a simple "scope" declaration. - can access the :ref:`request ` object which allows to introspect and interact with the (scoped) testcontext. - can add cleanup functions which will be invoked when the last test of the fixture test context has finished executing. All of these features are now demonstrated by little examples. test modules accessing a global resource ------------------------------------------------------- .. note:: Relying on `global state is considered bad programming practise `_ but when you work with an application that relies on it you often have no choice. If you want test modules to access a global resource, you can stick the resource to the module globals in a per-module autouse function. We use a :ref:`resource factory <@pytest.fixture>` to create our global resource:: # content of conftest.py import pytest class GlobalResource: def __init__(self): pass @pytest.fixture(scope="session") def globresource(): return GlobalResource() @pytest.fixture(scope="module") def setresource(request, globresource): request.module.globresource = globresource Now any test module can access ``globresource`` as a module global:: # content of test_glob.py def test_1(): print ("test_1 %s" % globresource) def test_2(): print ("test_2 %s" % globresource) Let's run this module without output-capturing:: $ py.test -qs test_glob.py FF ================================= FAILURES ================================= __________________________________ test_1 __________________________________ def test_1(): > print ("test_1 %s" % globresource) E NameError: global name 'globresource' is not defined test_glob.py:3: NameError __________________________________ test_2 __________________________________ def test_2(): > print ("test_2 %s" % globresource) E NameError: global name 'globresource' is not defined test_glob.py:5: NameError 2 failed in 0.01 seconds The two tests see the same global ``globresource`` object. Parametrizing the global resource +++++++++++++++++++++++++++++++++++++++++++++++++ We extend the previous example and add parametrization to the globresource factory and also add a finalizer:: # content of conftest.py import pytest class GlobalResource: def __init__(self, param): self.param = param @pytest.fixture(scope="session", params=[1,2]) def globresource(request): g = GlobalResource(request.param) def fin(): print "finalizing", g request.addfinalizer(fin) return g @pytest.fixture(scope="module") def setresource(request, globresource): request.module.globresource = globresource And then re-run our test module:: $ py.test -qs test_glob.py FF ================================= FAILURES ================================= __________________________________ test_1 __________________________________ def test_1(): > print ("test_1 %s" % globresource) E NameError: global name 'globresource' is not defined test_glob.py:3: NameError __________________________________ test_2 __________________________________ def test_2(): > print ("test_2 %s" % globresource) E NameError: global name 'globresource' is not defined test_glob.py:5: NameError 2 failed in 0.01 seconds We are now running the two tests twice with two different global resource instances. Note that the tests are ordered such that only one instance is active at any given time: the finalizer of the first globresource instance is called before the second instance is created and sent to the autouse functions. pytest-2.5.1/doc/en/projects.txt0000664000175000017500000001117312254002202016170 0ustar hpkhpk00000000000000.. _projects: .. image:: img/gaynor3.png :width: 400px :align: right .. image:: img/theuni.png :width: 400px :align: right .. image:: img/cramer2.png :width: 400px :align: right .. image:: img/keleshev.png :width: 400px :align: right Project examples ========================== Here are some examples of projects using py.test (please send notes via :ref:`contact`): * `PyPy `_, Python with a JIT compiler, running over `21000 tests `_ * the `MoinMoin `_ Wiki Engine * `sentry `_, realtime app-maintenance and exception tracking * `tox `_, virtualenv/Hudson integration tool * `PIDA `_ framework for integrated development * `PyPM `_ ActiveState's package manager * `Fom `_ a fluid object mapper for FluidDB * `applib `_ cross-platform utilities * `six `_ Python 2 and 3 compatibility utilities * `pediapress `_ MediaWiki articles * `mwlib `_ mediawiki parser and utility library * `The Translate Toolkit `_ for localization and conversion * `execnet `_ rapid multi-Python deployment * `pylib `_ cross-platform path, IO, dynamic code library * `Pacha `_ configuration management in five minutes * `bbfreeze `_ create standalone executables from Python scripts * `pdb++ `_ a fancier version of PDB * `py-s3fuse `_ Amazon S3 FUSE based filesystem * `waskr `_ WSGI Stats Middleware * `guachi `_ global persistent configs for Python modules * `Circuits `_ lightweight Event Driven Framework * `pygtk-helpers `_ easy interaction with PyGTK * `QuantumCore `_ statusmessage and repoze openid plugin * `pydataportability `_ libraries for managing the open web * `XIST `_ extensible HTML/XML generator * `tiddlyweb `_ optionally headless, extensible RESTful datastore * `fancycompleter `_ for colorful tab-completion * `Paludis `_ tools for Gentoo Paludis package manager * `Gerald `_ schema comparison tool * `abjad `_ Python API for Formalized Score control * `bu `_ a microscopic build system * `katcp `_ Telescope communication protocol over Twisted * `kss plugin timer `_ * `pyudev `_ a pure Python binding to the Linux library libudev * `pytest-localserver `_ a plugin for pytest that provides a httpserver and smtpserver * `pytest-monkeyplus `_ a plugin that extends monkeypatch These projects help integrate py.test into other Python frameworks: * `pytest-django `_ for Django * `zope.pytest `_ for Zope and Grok * `pytest_gae `_ for Google App Engine * There is `some work `_ underway for Kotti, a CMS built in Pyramid/Pylons Some organisations using py.test ----------------------------------- * `Square Kilometre Array, Cape Town `_ * `Some Mozilla QA people `_ use pytest to distribute their Selenium tests * `Tandberg `_ * `Shootq `_ * `Stups department of Heinrich Heine University Duesseldorf `_ * `cellzome `_ * `Open End, Gothenborg `_ * `Laboraratory of Bioinformatics, Warsaw `_ * `merlinux, Germany `_ * many more ... (please be so kind to send a note via :ref:`contact`) pytest-2.5.1/doc/en/faq.txt0000664000175000017500000001626212254002202015112 0ustar hpkhpk00000000000000Some Issues and Questions ================================== .. note:: This FAQ is here only mostly for historic reasons. Checkout `pytest Q&A at Stackoverflow `_ for many questions and answers related to pytest and/or use :ref:`contact channels` to get help. On naming, nosetests, licensing and magic ------------------------------------------------ How does py.test relate to nose and unittest? +++++++++++++++++++++++++++++++++++++++++++++++++ py.test and nose_ share basic philosophy when it comes to running and writing Python tests. In fact, you can run many tests written for nose with py.test. nose_ was originally created as a clone of ``py.test`` when py.test was in the ``0.8`` release cycle. Note that starting with pytest-2.0 support for running unittest test suites is majorly improved. how does py.test relate to twisted's trial? ++++++++++++++++++++++++++++++++++++++++++++++ Since some time py.test has builtin support for supporting tests written using trial. It does not itself start a reactor, however, and does not handle Deferreds returned from a test in pytest style. If you are using trial's unittest.TestCase chances are that you can just run your tests even if you return Deferreds. In addition, there also is a dedicated `pytest-twisted `_ plugin which allows to return deferreds from pytest-style tests, allowing to use :ref:`fixtures` and other features. how does py.test work with Django? ++++++++++++++++++++++++++++++++++++++++++++++ In 2012, some work is going into the `pytest-django plugin `_. It substitutes the usage of Django's ``manage.py test`` and allows to use all pytest features_ most of which are not available from Django directly. .. _features: features.html What's this "magic" with py.test? (historic notes) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Around 2007 (version ``0.8``) some people thought that py.test was using too much "magic". It had been part of the `pylib`_ which contains a lot of unreleated python library code. Around 2010 there was a major cleanup refactoring, which removed unused or deprecated code and resulted in the new ``pytest`` PyPI package which strictly contains only test-related code. This relese also brought a complete pluginification such that the core is around 300 lines of code and everything else is implemented in plugins. Thus ``pytest`` today is a small, universally runnable and customizable testing framework for Python. Note, however, that ``pytest`` uses metaprogramming techniques and reading its source is thus likely not something for Python beginners. A second "magic" issue was the assert statement debugging feature. Nowadays, py.test explicitely rewrites assert statements in test modules in order to provide more useful :ref:`assert feedback `. This completely avoids previous issues of confusing assertion-reporting. It also means, that you can use Python's ``-O`` optimization without loosing assertions in test modules. py.test contains a second mostly obsolete assert debugging technique, invoked via ``--assert=reinterpret``, activated by default on Python-2.5: When an ``assert`` statement fails, py.test re-interprets the expression part to show intermediate values. This technique suffers from a caveat that the rewriting does not: If your expression has side effects (better to avoid them anyway!) the intermediate values may not be the same, confusing the reinterpreter and obfuscating the initial error (this is also explained at the command line if it happens). You can also turn off all assertion interaction using the ``--assertmode=off`` option. .. _`py namespaces`: index.html .. _`py/__init__.py`: http://bitbucket.org/hpk42/py-trunk/src/trunk/py/__init__.py Why a ``py.test`` instead of a ``pytest`` command? ++++++++++++++++++++++++++++++++++++++++++++++++++ Some of the reasons are historic, others are practical. ``py.test`` used to be part of the ``py`` package which provided several developer utilities, all starting with ``py.``, thus providing nice TAB-completion. If you install ``pip install pycmd`` you get these tools from a separate package. These days the command line tool could be called ``pytest`` but since many people have gotten used to the old name and there is another tool named "pytest" we just decided to stick with ``py.test`` for now. pytest fixtures, parametrized tests ------------------------------------------------------- .. _funcargs: funcargs.html Is using pytest fixtures versus xUnit setup a style question? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ For simple applications and for people experienced with nose_ or unittest-style test setup using `xUnit style setup`_ probably feels natural. For larger test suites, parametrized testing or setup of complex test resources using funcargs_ may feel more natural. Moreover, funcargs are ideal for writing advanced test support code (like e.g. the monkeypatch_, the tmpdir_ or capture_ funcargs) because the support code can register setup/teardown functions in a managed class/module/function scope. .. _monkeypatch: monkeypatch.html .. _tmpdir: tmpdir.html .. _capture: capture.html .. _`why pytest_pyfuncarg__ methods?`: .. _`Convention over Configuration`: http://en.wikipedia.org/wiki/Convention_over_Configuration Can I yield multiple values from a fixture function function? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ There are two conceptual reasons why yielding from a factory function is not possible: * If multiple factories yielded values there would be no natural place to determine the combination policy - in real-world examples some combinations often should not run. * Calling factories for obtaining test function arguments is part of setting up and running a test. At that point it is not possible to add new test calls to the test collection anymore. However, with pytest-2.3 you can use the :ref:`@pytest.fixture` decorator and specify ``params`` so that all tests depending on the factory-created resource will run multiple times with different parameters. You can also use the `pytest_generate_tests`_ hook to implement the `parametrization scheme of your choice`_. .. _`pytest_generate_tests`: test/funcargs.html#parametrizing-tests .. _`parametrization scheme of your choice`: http://tetamap.wordpress.com/2009/05/13/parametrizing-python-tests-generalized/ py.test interaction with other packages --------------------------------------------------- Issues with py.test, multiprocess and setuptools? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ On windows the multiprocess package will instantiate sub processes by pickling and thus implicitly re-import a lot of local modules. Unfortunately, setuptools-0.6.11 does not ``if __name__=='__main__'`` protect its generated command line script. This leads to infinite recursion when running a test that instantiates Processes. As of middle 2013, there shouldn't be a problem anymore when you use the standard setuptools (note that distribute has been merged back into setuptools which is now shipped directly with virtualenv). .. include:: links.inc pytest-2.5.1/doc/en/contents.txt0000664000175000017500000000062112254002202016170 0ustar hpkhpk00000000000000 .. _toc: Full pytest documentation =========================== `Download latest version as PDF `_ .. `Download latest version as EPUB `_ .. toctree:: :maxdepth: 2 overview apiref plugins example/index talks develop funcarg_compare.txt announce/index .. toctree:: :hidden: changelog.txt pytest-2.5.1/doc/en/plugins_index/0000775000175000017500000000000012254002202016443 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/plugins_index/plugins_index.txt0000664000175000017500000007057312254002202022070 0ustar hpkhpk00000000000000.. _plugins_index: List of Third-Party Plugins =========================== ========================================================================================== ==================================================================================== ========= ====================================================================================================== ====================================================================================================== ============================================================================================================================================= Name Author Downloads Python 2.7 Python 3.3 Summary ========================================================================================== ==================================================================================== ========= ====================================================================================================== ====================================================================================================== ============================================================================================================================================= `pytest-bdd-0.6.7 `_ `Oleg Pidsadnyi `_ 1640 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bdd-0.6.7?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bdd-0.6.7?py=py33&pytest=2.5.0 BDD for pytest `pytest-bdd-splinter-0.5.96 `_ `Oleg Pidsadnyi `_ 3463 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bdd-splinter-0.5.96?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bdd-splinter-0.5.96?py=py33&pytest=2.5.0 Splinter subplugin for Pytest BDD plugin `pytest-bench-0.2.5 `_ `Concordus Applications `_ 1588 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bench-0.2.5?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bench-0.2.5?py=py33&pytest=2.5.0 Benchmark utility that plugs into pytest. `pytest-blockage-0.1 `_ `UNKNOWN `_ 110 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-blockage-0.1?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-blockage-0.1?py=py33&pytest=2.5.0 Disable network requests during a test run. `pytest-browsermob-proxy-0.1 `_ `Dave Hunt `_ 61 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-browsermob-proxy-0.1?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-browsermob-proxy-0.1?py=py33&pytest=2.5.0 BrowserMob proxy plugin for py.test. `pytest-bugzilla-0.2 `_ `Noufal Ibrahim `_ 105 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bugzilla-0.2?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-bugzilla-0.2?py=py33&pytest=2.5.0 py.test bugzilla integration plugin `pytest-cache-1.0 `_ `Holger Krekel `_ 5690 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-cache-1.0?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-cache-1.0?py=py33&pytest=2.5.0 pytest plugin with mechanisms for caching across test runs `pytest-capturelog-0.7 `_ `Meme Dough `_ 1615 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-capturelog-0.7?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-capturelog-0.7?py=py33&pytest=2.5.0 py.test plugin to capture log messages `pytest-codecheckers-0.2 `_ `Ronny Pfannschmidt `_ 408 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-codecheckers-0.2?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-codecheckers-0.2?py=py33&pytest=2.5.0 pytest plugin to add source code sanity checks (pep8 and friends) `pytest-contextfixture-0.1.1 `_ `Andreas Pelme `_ 101 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-contextfixture-0.1.1?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-contextfixture-0.1.1?py=py33&pytest=2.5.0 Define pytest fixtures as context managers. `pytest-couchdbkit-0.5.1 `_ `RonnyPfannschmidt `_ 215 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-couchdbkit-0.5.1?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-couchdbkit-0.5.1?py=py33&pytest=2.5.0 py.test extension for per-test couchdb databases using couchdbkit `pytest-cov-1.6 `_ `Meme Dough `_ 23787 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-cov-1.6?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-cov-1.6?py=py33&pytest=2.5.0 py.test plugin for coverage reporting with support for both centralised and distributed testing, including subprocesses and multiprocessing `pytest-dbfixtures-0.4.0 `_ `Clearcode - The A Room `_ 6332 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-dbfixtures-0.4.0?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-dbfixtures-0.4.0?py=py33&pytest=2.5.0 dbfixtures plugin for py.test. `pytest-django-2.4 `_ `Andreas Pelme `_ 4935 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-django-2.4?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-django-2.4?py=py33&pytest=2.5.0 A Django plugin for py.test. `pytest-django-lite-0.1.0 `_ `David Cramer `_ 1075 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-django-lite-0.1.0?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-django-lite-0.1.0?py=py33&pytest=2.5.0 The bare minimum to integrate py.test with Django. `pytest-figleaf-1.0 `_ `holger krekel `_ 59 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-figleaf-1.0?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-figleaf-1.0?py=py33&pytest=2.5.0 py.test figleaf coverage plugin `pytest-flakes-0.2 `_ `Florian Schulze, Holger Krekel and Ronny Pfannschmidt `_ 1203 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-flakes-0.2?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-flakes-0.2?py=py33&pytest=2.5.0 pytest plugin to check source code with pyflakes `pytest-greendots-0.2 `_ `UNKNOWN `_ 149 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-greendots-0.2?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-greendots-0.2?py=py33&pytest=2.5.0 Green progress dots `pytest-growl-0.1 `_ `Anthony Long `_ 65 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-growl-0.1?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-growl-0.1?py=py33&pytest=2.5.0 Growl notifications for pytest results. `pytest-incremental-0.3.0 `_ `Eduardo Naufel Schettino `_ 192 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-incremental-0.3.0?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-incremental-0.3.0?py=py33&pytest=2.5.0 an incremental test runner (pytest plugin) `pytest-instafail-0.1.1 `_ `Janne Vanhala `_ 431 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-instafail-0.1.1?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-instafail-0.1.1?py=py33&pytest=2.5.0 py.test plugin to show failures instantly `pytest-ipdb-0.1-prerelease `_ `Matthew de Verteuil `_ 99 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-ipdb-0.1-prerelease?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-ipdb-0.1-prerelease?py=py33&pytest=2.5.0 A py.test plug-in to enable drop to ipdb debugger on test failure. `pytest-jira-0.01 `_ `James Laska `_ 94 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-jira-0.01?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-jira-0.01?py=py33&pytest=2.5.0 py.test JIRA integration plugin, using markers `pytest-konira-0.2 `_ `Alfredo Deza `_ 99 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-konira-0.2?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-konira-0.2?py=py33&pytest=2.5.0 Run Konira DSL tests with py.test `pytest-localserver-0.3.2 `_ `Sebastian Rahlf `_ 470 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-localserver-0.3.2?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-localserver-0.3.2?py=py33&pytest=2.5.0 py.test plugin to test server connections locally. `pytest-marker-bugzilla-0.06 `_ `Eric Sammons `_ 205 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-marker-bugzilla-0.06?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-marker-bugzilla-0.06?py=py33&pytest=2.5.0 py.test bugzilla integration plugin, using markers `pytest-markfiltration-0.8 `_ `adam goucher `_ 269 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-markfiltration-0.8?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-markfiltration-0.8?py=py33&pytest=2.5.0 UNKNOWN `pytest-marks-0.4 `_ `adam goucher `_ 241 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-marks-0.4?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-marks-0.4?py=py33&pytest=2.5.0 UNKNOWN `pytest-monkeyplus-1.1.0 `_ `Virgil Dupras `_ 132 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-monkeyplus-1.1.0?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-monkeyplus-1.1.0?py=py33&pytest=2.5.0 pytest's monkeypatch subclass with extra functionalities `pytest-mozwebqa-1.1.1 `_ `Dave Hunt `_ 1087 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-mozwebqa-1.1.1?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-mozwebqa-1.1.1?py=py33&pytest=2.5.0 Mozilla WebQA plugin for py.test. `pytest-oerp-0.2.0 `_ `Leonardo Santagada `_ 158 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-oerp-0.2.0?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-oerp-0.2.0?py=py33&pytest=2.5.0 pytest plugin to test OpenERP modules `pytest-osxnotify-0.1.4 `_ `Daniel Bader `_ 200 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-osxnotify-0.1.4?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-osxnotify-0.1.4?py=py33&pytest=2.5.0 OS X notifications for py.test results. `pytest-paste-config-0.1 `_ `UNKNOWN `_ 169 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-paste-config-0.1?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-paste-config-0.1?py=py33&pytest=2.5.0 Allow setting the path to a paste config file `pytest-pep8-1.0.5 `_ `Holger Krekel and Ronny Pfannschmidt `_ 5971 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pep8-1.0.5?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pep8-1.0.5?py=py33&pytest=2.5.0 pytest plugin to check PEP8 requirements `pytest-poo-0.2 `_ `Andreas Pelme `_ 116 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-poo-0.2?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-poo-0.2?py=py33&pytest=2.5.0 Visualize your crappy tests `pytest-pydev-0.1 `_ `Sebastian Rahlf `_ 107 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pydev-0.1?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-pydev-0.1?py=py33&pytest=2.5.0 py.test plugin to connect to a remote debug server with PyDev or PyCharm. `pytest-qt-1.0.2 `_ `Bruno Oliveira `_ 140 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-qt-1.0.2?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-qt-1.0.2?py=py33&pytest=2.5.0 pytest plugin that adds fixtures for testing Qt (PyQt and PySide) applications. `pytest-quickcheck-0.8 `_ `Tetsuya Morimoto `_ 380 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-quickcheck-0.8?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-quickcheck-0.8?py=py33&pytest=2.5.0 pytest plugin to generate random data inspired by QuickCheck `pytest-rage-0.1 `_ `Leonardo Santagada `_ 64 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-rage-0.1?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-rage-0.1?py=py33&pytest=2.5.0 pytest plugin to implement PEP712 `pytest-random-0.02 `_ `Leah Klearman `_ 125 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-random-0.02?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-random-0.02?py=py33&pytest=2.5.0 py.test plugin to randomize tests `pytest-rerunfailures-0.03 `_ `Leah Klearman `_ 153 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-rerunfailures-0.03?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-rerunfailures-0.03?py=py33&pytest=2.5.0 py.test plugin to re-run tests to eliminate flakey failures `pytest-runfailed-0.3 `_ `Dimitri Merejkowsky `_ 96 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-runfailed-0.3?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-runfailed-0.3?py=py33&pytest=2.5.0 implement a --failed option for pytest `pytest-runner-2.0 `_ `Jason R. Coombs `_ 5726 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-runner-2.0?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-runner-2.0?py=py33&pytest=2.5.0 UNKNOWN `pytest-sugar-0.2.2 `_ `Teemu, Janne Vanhala `_ 374 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-sugar-0.2.2?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-sugar-0.2.2?py=py33&pytest=2.5.0 py.test plugin that adds instafail, ETA and neat graphics `pytest-timeout-0.3 `_ `Floris Bruynooghe `_ 4514 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-timeout-0.3?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-timeout-0.3?py=py33&pytest=2.5.0 pytest plugin to abort tests after a timeout `pytest-twisted-1.4 `_ `Ralf Schmitt `_ 257 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-twisted-1.4?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-twisted-1.4?py=py33&pytest=2.5.0 A twisted plugin for py.test. `pytest-xdist-1.9 `_ `holger krekel and contributors `_ 8103 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-xdist-1.9?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-xdist-1.9?py=py33&pytest=2.5.0 py.test xdist plugin for distributed testing and loop-on-failing modes `pytest-xprocess-0.8 `_ `Holger Krekel `_ 108 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-xprocess-0.8?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-xprocess-0.8?py=py33&pytest=2.5.0 pytest plugin to manage external processes across test runs `pytest-yamlwsgi-0.6 `_ `Ali Afshar `_ 210 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-yamlwsgi-0.6?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-yamlwsgi-0.6?py=py33&pytest=2.5.0 Run tests against wsgi apps defined in yaml `pytest-zap-0.1 `_ `Dave Hunt `_ 69 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-zap-0.1?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-zap-0.1?py=py33&pytest=2.5.0 OWASP ZAP plugin for py.test. ========================================================================================== ==================================================================================== ========= ====================================================================================================== ====================================================================================================== ============================================================================================================================================= *(Downloads are given from last month only)* *(Updated on 2013-12-12)* pytest-2.5.1/doc/en/plugins_index/plugins_index.py0000664000175000017500000002000312254002202021660 0ustar hpkhpk00000000000000''' Script to generate the file `plugins_index.txt` with information about pytest plugins taken directly from a live PyPI server. This will evolve to include test compatibility (pythons and pytest versions) information also. ''' from collections import namedtuple import datetime from distutils.version import LooseVersion import itertools from optparse import OptionParser import os import sys import xmlrpclib import pytest #=================================================================================================== # iter_plugins #=================================================================================================== def iter_plugins(client, search='pytest-'): ''' Returns an iterator of (name, version) from PyPI. :param client: xmlrpclib.ServerProxy :param search: package names to search for ''' for plug_data in client.search({'name' : search}): yield plug_data['name'], plug_data['version'] #=================================================================================================== # get_latest_versions #=================================================================================================== def get_latest_versions(plugins): ''' Returns an iterator of (name, version) from the given list of (name, version), but returning only the latest version of the package. Uses distutils.LooseVersion to ensure compatibility with PEP386. ''' plugins = [(name, LooseVersion(version)) for (name, version) in plugins] for name, grouped_plugins in itertools.groupby(plugins, key=lambda x: x[0]): name, loose_version = list(grouped_plugins)[-1] yield name, str(loose_version) #=================================================================================================== # obtain_plugins_table #=================================================================================================== def obtain_plugins_table(plugins, client): ''' Returns information to populate a table of plugins, their versions, authors, etc. The returned information is a list of columns of `ColumnData` namedtuples(text, link). Link can be None if the text for that column should not be linked to anything. :param plugins: list of (name, version) :param client: xmlrpclib.ServerProxy ''' rows = [] ColumnData = namedtuple('ColumnData', 'text link') headers = ['Name', 'Author', 'Downloads', 'Python 2.7', 'Python 3.3', 'Summary'] pytest_version = pytest.__version__ print '*** pytest-{} ***'.format(pytest_version) plugins = list(plugins) for index, (package_name, version) in enumerate(plugins): print package_name, version, '...', release_data = client.release_data(package_name, version) download_count = release_data['downloads']['last_month'] image_url = '.. image:: http://pytest-plugs.herokuapp.com/status/{name}-{version}'.format(name=package_name, version=version) image_url += '?py={py}&pytest={pytest}' row = ( ColumnData(package_name + '-' + version, release_data['release_url']), ColumnData(release_data['author'], release_data['author_email']), ColumnData(str(download_count), None), ColumnData(image_url.format(py='py27', pytest=pytest_version), None), ColumnData(image_url.format(py='py33', pytest=pytest_version), None), ColumnData(release_data['summary'], None), ) assert len(row) == len(headers) rows.append(row) print 'OK (%d%%)' % ((index + 1) * 100 / len(plugins)) return headers, rows #=================================================================================================== # generate_plugins_index_from_table #=================================================================================================== def generate_plugins_index_from_table(filename, headers, rows): ''' Generates a RST file with the table data given. :param filename: output filename :param headers: see `obtain_plugins_table` :param rows: see `obtain_plugins_table` ''' # creates a list of rows, each being a str containing appropriate column text and link table_texts = [] for row in rows: column_texts = [] for i, col_data in enumerate(row): text = '`%s <%s>`_' % (col_data.text, col_data.link) if col_data.link else col_data.text column_texts.append(text) table_texts.append(column_texts) # compute max length of each column so we can build the rst table column_lengths = [len(x) for x in headers] for column_texts in table_texts: for i, row_text in enumerate(column_texts): column_lengths[i] = max(column_lengths[i], len(row_text) + 2) def get_row_limiter(char): return ' '.join(char * length for length in column_lengths) with file(filename, 'w') as f: # write welcome print >> f, '.. _plugins_index:' print >> f print >> f, 'List of Third-Party Plugins' print >> f, '===========================' print >> f # table print >> f, get_row_limiter('=') for i, header in enumerate(headers): print >> f, '{:^{fill}}'.format(header, fill=column_lengths[i]), print >> f print >> f, get_row_limiter('=') for column_texts in table_texts: for i, row_text in enumerate(column_texts): print >> f, '{:^{fill}}'.format(row_text, fill=column_lengths[i]), print >> f print >> f print >> f, get_row_limiter('=') print >> f print >> f, '*(Downloads are given from last month only)*' print >> f print >> f, '*(Updated on %s)*' % _get_today_as_str() #=================================================================================================== # _get_today_as_str #=================================================================================================== def _get_today_as_str(): ''' internal. only exists so we can patch it in testing. ''' return datetime.date.today().strftime('%Y-%m-%d') #=================================================================================================== # generate_plugins_index #=================================================================================================== def generate_plugins_index(client, filename): ''' Generates an RST file with a table of the latest pytest plugins found in PyPI. :param client: xmlrpclib.ServerProxy :param filename: output filename ''' plugins = get_latest_versions(iter_plugins(client)) headers, rows = obtain_plugins_table(plugins, client) generate_plugins_index_from_table(filename, headers, rows) #=================================================================================================== # main #=================================================================================================== def main(argv): filename = os.path.join(os.path.dirname(__file__), 'plugins_index.txt') url = 'http://pypi.python.org/pypi' parser = OptionParser(description='Generates a restructured document of pytest plugins from PyPI') parser.add_option('-f', '--filename', default=filename, help='output filename [default: %default]') parser.add_option('-u', '--url', default=url, help='url of PyPI server to obtain data from [default: %default]') (options, _) = parser.parse_args(argv[1:]) client = xmlrpclib.ServerProxy(options.url) generate_plugins_index(client, options.filename) print print '%s Updated.' % options.filename return 0 #=================================================================================================== # main #=================================================================================================== if __name__ == '__main__': sys.exit(main(sys.argv)) pytest-2.5.1/doc/en/plugins_index/test_plugins_index.expected.txt0000664000175000017500000000360512254002202024717 0ustar hpkhpk00000000000000.. _plugins_index: List of Third-Party Plugins =========================== ============================================ ============================= ========= ============================================================================================= ============================================================================================= =================== Name Author Downloads Python 2.7 Python 3.3 Summary ============================================ ============================= ========= ============================================================================================= ============================================================================================= =================== `pytest-plugin1-1.0 `_ `someone `_ 4 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-plugin1-1.0?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-plugin1-1.0?py=py33&pytest=2.5.0 some plugin `pytest-plugin2-1.2 `_ `other `_ 40 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-plugin2-1.2?py=py27&pytest=2.5.0 .. image:: http://pytest-plugs.herokuapp.com/status/pytest-plugin2-1.2?py=py33&pytest=2.5.0 some other plugin ============================================ ============================= ========= ============================================================================================= ============================================================================================= =================== *(Downloads are given from last month only)* *(Updated on 2013-10-20)* pytest-2.5.1/doc/en/plugins_index/test_plugins_index.py0000664000175000017500000000677312254002202022741 0ustar hpkhpk00000000000000import os import xmlrpclib import pytest #=================================================================================================== # test_plugins_index #=================================================================================================== @pytest.mark.xfail(reason="issue405 fails, not py33 ready, not a core pytest test") def test_plugins_index(tmpdir, monkeypatch): ''' Blackbox testing for plugins_index script. Calls main() generating a file and compares produced output to expected. .. note:: if the test fails, a file named `test_plugins_index.obtained` will be generated in the same directory as this test file. Ensure the contents are correct and overwrite the global `expected_output` with the new contents. ''' import plugins_index # dummy interface to xmlrpclib.ServerProxy class DummyProxy(object): expected_url = 'http://dummy.pypi' def __init__(self, url): assert url == self.expected_url def search(self, query): assert query == {'name' : 'pytest-'} return [ {'name': 'pytest-plugin1', 'version' : '0.8'}, {'name': 'pytest-plugin1', 'version' : '1.0'}, {'name': 'pytest-plugin2', 'version' : '1.2'}, ] def release_data(self, package_name, version): results = { ('pytest-plugin1', '1.0') : { 'package_url' : 'http://plugin1', 'release_url' : 'http://plugin1/1.0', 'author' : 'someone', 'author_email' : 'someone@py.com', 'summary' : 'some plugin', 'downloads': {'last_day': 1, 'last_month': 4, 'last_week': 2}, }, ('pytest-plugin2', '1.2') : { 'package_url' : 'http://plugin2', 'release_url' : 'http://plugin2/1.2', 'author' : 'other', 'author_email' : 'other@py.com', 'summary' : 'some other plugin', 'downloads': {'last_day': 10, 'last_month': 40, 'last_week': 20}, }, } return results[(package_name, version)] monkeypatch.setattr(xmlrpclib, 'ServerProxy', DummyProxy, 'foo') monkeypatch.setattr(plugins_index, '_get_today_as_str', lambda: '2013-10-20') output_file = str(tmpdir.join('output.txt')) assert plugins_index.main(['', '-f', output_file, '-u', DummyProxy.expected_url]) == 0 with file(output_file, 'rU') as f: obtained_output = f.read() expected_output = get_expected_output() if obtained_output != expected_output: obtained_file = os.path.splitext(__file__)[0] + '.obtained.txt' with file(obtained_file, 'w') as f: f.write(obtained_output) assert obtained_output == expected_output def get_expected_output(): """ :return: string with expected rst output from the plugins_index.py script. """ expected_filename = os.path.join(os.path.dirname(__file__), 'test_plugins_index.expected.txt') expected_output = open(expected_filename, 'rU').read() return expected_output.replace('pytest=2.X.Y', 'pytest={}'.format(pytest.__version__)) #=================================================================================================== # main #=================================================================================================== if __name__ == '__main__': pytest.main() pytest-2.5.1/doc/en/links.inc0000664000175000017500000000157512254002202015416 0ustar hpkhpk00000000000000 .. _`skipping plugin`: plugin/skipping.html .. _`funcargs mechanism`: funcargs.html .. _`doctest.py`: http://docs.python.org/library/doctest.html .. _`xUnit style setup`: xunit_setup.html .. _`pytest_nose`: plugin/nose.html .. _`reStructured Text`: http://docutils.sourceforge.net .. _`Python debugger`: http://docs.python.org/lib/module-pdb.html .. _nose: http://somethingaboutorange.com/mrl/projects/nose/ .. _pytest: http://pypi.python.org/pypi/pytest .. _mercurial: http://mercurial.selenic.com/wiki/ .. _`setuptools`: http://pypi.python.org/pypi/setuptools .. _`easy_install`: .. _`distribute docs`: .. _`distribute`: http://pypi.python.org/pypi/distribute .. _`pip`: http://pypi.python.org/pypi/pip .. _`virtualenv`: http://pypi.python.org/pypi/virtualenv .. _hudson: http://hudson-ci.org/ .. _jenkins: http://jenkins-ci.org/ .. _tox: http://testrun.org/tox .. _pylib: http://pylib.org pytest-2.5.1/doc/en/monkeypatch.txt0000664000175000017500000000566012254002202016665 0ustar hpkhpk00000000000000 Monkeypatching/mocking modules and environments ================================================================ .. currentmodule:: _pytest.monkeypatch Sometimes tests need to invoke functionality which depends on global settings or which invokes code which cannot be easily tested such as network access. The ``monkeypatch`` function argument helps you to safely set/delete an attribute, dictionary item or environment variable or to modify ``sys.path`` for importing. See the `monkeypatch blog post`_ for some introduction material and a discussion of its motivation. .. _`monkeypatch blog post`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/ Simple example: monkeypatching functions --------------------------------------------------- If you want to pretend that ``os.expanduser`` returns a certain directory, you can use the :py:meth:`monkeypatch.setattr` method to patch this function before calling into a function which uses it:: # content of test_module.py import os.path def getssh(): # pseudo application code return os.path.join(os.path.expanduser("~admin"), '.ssh') def test_mytest(monkeypatch): def mockreturn(path): return '/abc' monkeypatch.setattr(os.path, 'expanduser', mockreturn) x = getssh() assert x == '/abc/.ssh' Here our test function monkeypatches ``os.path.expanduser`` and then calls into an function that calls it. After the test function finishes the ``os.path.expanduser`` modification will be undone. example: preventing "requests" from remote operations ------------------------------------------------------ If you want to prevent the "requests" library from performing http requests in all your tests, you can do:: # content of conftest.py import pytest @pytest.fixture(autouse=True) def no_requests(monkeypatch): monkeypatch.delattr("requests.session.Session.request") This autouse fixture will be executed for each test function and it will delete the method ``request.session.Session.request`` so that any attempts within tests to create http requests will fail. example: setting an attribute on some class ------------------------------------------------------ If you need to patch out ``os.getcwd()`` to return an artifical value:: def test_some_interaction(monkeypatch): monkeypatch.setattr("os.getcwd", lambda: "/") which is equivalent to the long form:: def test_some_interaction(monkeypatch): import os monkeypatch.setattr(os, "getcwd", lambda: "/") Method reference of the monkeypatch function argument ----------------------------------------------------- .. autoclass:: monkeypatch :members: setattr, replace, delattr, setitem, delitem, setenv, delenv, syspath_prepend, chdir, undo ``monkeypatch.setattr/delattr/delitem/delenv()`` all by default raise an Exception if the target does not exist. Pass ``raising=False`` if you want to skip this check. pytest-2.5.1/doc/en/bash-completion.txt0000664000175000017500000000130212254002202017414 0ustar hpkhpk00000000000000 .. _bash_completion: Setting up bash completion ========================== When using bash as your shell, ``py.test`` can use argcomplete (https://argcomplete.readthedocs.org/) for auto-completion. For this ``argcomplete`` needs to be installed **and** enabled. Install argcomplete using:: sudo pip install 'argcomplete>=0.5.7' For global activation of all argcomplete enabled python applications run:: sudo activate-global-python-argcomplete For permanent (but not global) ``py.test`` activation, use:: register-python-argcomplete py.test >> ~/.bashrc For one-time activation of argcomplete for ``py.test`` only, use:: eval "$(register-python-argcomplete py.test)" pytest-2.5.1/doc/en/conftest.py0000664000175000017500000000003512254002202015770 0ustar hpkhpk00000000000000collect_ignore = ["conf.py"] pytest-2.5.1/doc/en/feedback.rst0000664000175000017500000000031512254002202016050 0ustar hpkhpk00000000000000 What users say: `py.test is pretty much the best thing ever`_ (Alex Gaynor) .. _`py.test is pretty much the best thing ever`_ (Alex Gaynor) http://twitter.com/#!/alex_gaynor/status/22389410366 pytest-2.5.1/doc/en/apiref.txt0000664000175000017500000000061112254002202015600 0ustar hpkhpk00000000000000 .. _apiref: py.test reference documentation ================================================ .. toctree:: :maxdepth: 2 builtin.txt customize.txt assert.txt fixture.txt yieldfixture.txt parametrize.txt xunit_setup.txt capture.txt monkeypatch.txt xdist.txt tmpdir.txt mark.txt skipping.txt recwarn.txt unittest.txt nose.txt doctest.txt pytest-2.5.1/doc/en/genapi.py0000664000175000017500000000215312254002202015411 0ustar hpkhpk00000000000000import textwrap import inspect class Writer: def __init__(self, clsname): self.clsname = clsname def __enter__(self): self.file = open("%s.api" % self.clsname, "w") return self def __exit__(self, *args): self.file.close() print "wrote", self.file.name def line(self, line): self.file.write(line+"\n") def docmethod(self, method): doc = " ".join(method.__doc__.split()) indent = " " w = textwrap.TextWrapper(initial_indent=indent, subsequent_indent=indent) spec = inspect.getargspec(method) del spec.args[0] self.line(".. py:method:: " + method.__name__ + inspect.formatargspec(*spec)) self.line("") self.line(w.fill(doc)) self.line("") def pytest_funcarg__a(request): with Writer("request") as writer: writer.docmethod(request.getfuncargvalue) writer.docmethod(request.cached_setup) writer.docmethod(request.addfinalizer) writer.docmethod(request.applymarker) def test_hello(a): pass pytest-2.5.1/doc/en/pytest.ini0000664000175000017500000000011212254002202015616 0ustar hpkhpk00000000000000[pytest] # just defined to prevent the root level tox.ini from kicking in pytest-2.5.1/doc/en/capture.txt0000664000175000017500000000757112254002202016011 0ustar hpkhpk00000000000000 .. _`captures`: Capturing of the stdout/stderr output ========================================================= Default stdout/stderr/stdin capturing behaviour --------------------------------------------------------- During test execution any output sent to ``stdout`` and ``stderr`` is captured. If a test or a setup method fails its according captured output will usually be shown along with the failure traceback. In addition, ``stdin`` is set to a "null" object which will fail on attempts to read from it because it is rarely desired to wait for interactive input when running automated tests. By default capturing is done by intercepting writes to low level file descriptors. This allows to capture output from simple print statements as well as output from a subprocess started by a test. Setting capturing methods or disabling capturing ------------------------------------------------- There are two ways in which ``py.test`` can perform capturing: * file descriptor (FD) level capturing (default): All writes going to the operating system file descriptors 1 and 2 will be captured. * ``sys`` level capturing: Only writes to Python files ``sys.stdout`` and ``sys.stderr`` will be captured. No capturing of writes to filedescriptors is performed. .. _`disable capturing`: You can influence output capturing mechanisms from the command line:: py.test -s # disable all capturing py.test --capture=sys # replace sys.stdout/stderr with in-mem files py.test --capture=fd # also point filedescriptors 1 and 2 to temp file .. _printdebugging: Using print statements for debugging --------------------------------------------------- One primary benefit of the default capturing of stdout/stderr output is that you can use print statements for debugging:: # content of test_module.py def setup_function(function): print ("setting up %s" % function) def test_func1(): assert True def test_func2(): assert False and running this module will show you precisely the output of the failing function and hide the other one:: $ py.test =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 2 items test_module.py .F ================================= FAILURES ================================= ________________________________ test_func2 ________________________________ def test_func2(): > assert False E assert False test_module.py:9: AssertionError ----------------------------- Captured stdout ------------------------------ setting up ==================== 1 failed, 1 passed in 0.01 seconds ==================== Accessing captured output from a test function --------------------------------------------------- The :ref:`funcarg mechanism` allows test function a very easy way to access the captured output by simply using the names ``capsys`` or ``capfd`` in the test function signature. Here is an example test function that performs some output related checks:: def test_myoutput(capsys): # or use "capfd" for fd-level print ("hello") sys.stderr.write("world\n") out, err = capsys.readouterr() assert out == "hello\n" assert err == "world\n" print "next" out, err = capsys.readouterr() assert out == "next\n" The ``readouterr()`` call snapshots the output so far - and capturing will be continued. After the test function finishes the original streams will be restored. Using ``capsys`` this way frees your test from having to care about setting/resetting output streams and also interacts well with py.test's own per-test capturing. If you want to capture on ``fd`` level you can use the ``capfd`` function argument which offers the exact same interface. .. include:: links.inc pytest-2.5.1/doc/en/doctest.txt0000664000175000017500000000303512254002202016002 0ustar hpkhpk00000000000000 Doctest integration for modules and test files ========================================================= By default all files matching the ``test*.txt`` pattern will be run through the python standard ``doctest`` module. You can change the pattern by issuing:: py.test --doctest-glob='*.rst' on the command line. You can also trigger running of doctests from docstrings in all python modules (including regular python test modules):: py.test --doctest-modules You can make these changes permanent in your project by putting them into a pytest.ini file like this:: # content of pytest.ini [pytest] addopts = --doctest-modules If you then have a text file like this:: # content of example.rst hello this is a doctest >>> x = 3 >>> x 3 and another like this:: # content of mymodule.py def something(): """ a doctest in a docstring >>> something() 42 """ return 42 then you can just invoke ``py.test`` without command line options:: $ py.test =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 1 items mymodule.py . ========================= 1 passed in 0.01 seconds ========================= It is possible to use fixtures using the ``getfixture`` helper:: # content of example.rst >>> tmp = getfixture('tmpdir') >>> ... >>> Also, :ref:`usefixtures` and :ref:`autouse` fixtures are supported when executing text doctest files. pytest-2.5.1/doc/en/setup.txt0000664000175000017500000000046612254002202015502 0ustar hpkhpk00000000000000 setup: is now an "autouse fixture" ======================================================== During development prior to the pytest-2.3 release the name ``pytest.setup`` was used but before the release it was renamed and moved to become part of the general fixture mechanism, namely :ref:`autouse fixtures` pytest-2.5.1/doc/en/contact.txt0000664000175000017500000000305112254002202015766 0ustar hpkhpk00000000000000 .. _`contact channels`: .. _`contact`: Contact channels =================================== - `pytest issue tracker`_ to report bugs or suggest features (for version 2.0 and above). - `pytest on stackoverflow.com `_ to post questions with the tag ``pytest``. New Questions will usually be seen by pytest users or developers and answered quickly. - `Testing In Python`_: a mailing list for Python testing tools and discussion. - `pytest-dev at python.org (mailing list)`_ pytest specific announcements and discussions. - `pytest-commit at python.org (mailing list)`_: for commits and new issues - #pylib on irc.freenode.net IRC channel for random questions. - private mail to Holger.Krekel at gmail com if you want to communicate sensitive issues - `merlinux.eu`_ offers pytest and tox-related professional teaching and consulting. .. _`pytest issue tracker`: http://bitbucket.org/hpk42/pytest/issues/ .. _`old issue tracker`: http://bitbucket.org/hpk42/py-trunk/issues/ .. _`merlinux.eu`: http://merlinux.eu .. _`get an account`: .. _tetamap: http://tetamap.wordpress.com .. _`@pylibcommit`: http://twitter.com/pylibcommit .. _`Testing in Python`: http://lists.idyll.org/listinfo/testing-in-python .. _FOAF: http://en.wikipedia.org/wiki/FOAF .. _`py-dev`: .. _`development mailing list`: .. _`pytest-dev at python.org (mailing list)`: http://mail.python.org/mailman/listinfo/pytest-dev .. _`py-svn`: .. _`pytest-commit at python.org (mailing list)`: http://mail.python.org/mailman/listinfo/pytest-commit pytest-2.5.1/doc/en/fixture.txt0000664000175000017500000006225412254002202016033 0ustar hpkhpk00000000000000.. _fixture: .. _fixtures: .. _`fixture functions`: pytest fixtures: explicit, modular, scalable ======================================================== .. currentmodule:: _pytest.python .. versionadded:: 2.0/2.3/2.4 .. _`xUnit`: http://en.wikipedia.org/wiki/XUnit .. _`purpose of test fixtures`: http://en.wikipedia.org/wiki/Test_fixture#Software .. _`Dependency injection`: http://en.wikipedia.org/wiki/Dependency_injection#Definition The `purpose of test fixtures`_ is to provide a fixed baseline upon which tests can reliably and repeatedly execute. pytest fixtures offer dramatic improvements over the classic xUnit style of setup/teardown functions: * fixtures have explicit names and are activated by declaring their use from test functions, modules, classes or whole projects. * fixtures are implemented in a modular manner, as each fixture name triggers a *fixture function* which can itself use other fixtures. * fixture management scales from simple unit to complex functional testing, allowing to parametrize fixtures and tests according to configuration and component options, or to re-use fixtures across class, module or whole test session scopes. In addition, pytest continues to support :ref:`xunitsetup`. You can mix both styles, moving incrementally from classic to new style, as you prefer. You can also start out from existing :ref:`unittest.TestCase style ` or :ref:`nose based ` projects. .. note:: pytest-2.4 introduced an additional experimental :ref:`yield fixture mechanism ` for easier context manager integration and more linear writing of teardown code. .. _`funcargs`: .. _`funcarg mechanism`: .. _`fixture function`: .. _`@pytest.fixture`: .. _`pytest.fixture`: Fixtures as Function arguments ----------------------------------------- Test functions can receive fixture objects by naming them as an input argument. For each argument name, a fixture function with that name provides the fixture object. Fixture functions are registered by marking them with :py:func:`@pytest.fixture <_pytest.python.fixture>`. Let's look at a simple self-contained test module containing a fixture and a test function using it:: # content of ./test_smtpsimple.py import pytest @pytest.fixture def smtp(): import smtplib return smtplib.SMTP("merlinux.eu") def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 assert "merlinux" in msg assert 0 # for demo purposes Here, the ``test_ehlo`` needs the ``smtp`` fixture value. pytest will discover and call the :py:func:`@pytest.fixture <_pytest.python.fixture>` marked ``smtp`` fixture function. Running the test looks like this:: $ py.test test_smtpsimple.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 1 items test_smtpsimple.py F ================================= FAILURES ================================= ________________________________ test_ehlo _________________________________ smtp = def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 assert "merlinux" in msg > assert 0 # for demo purposes E assert 0 test_smtpsimple.py:12: AssertionError ========================= 1 failed in 0.21 seconds ========================= In the failure traceback we see that the test function was called with a ``smtp`` argument, the ``smtplib.SMTP()`` instance created by the fixture function. The test function fails on our deliberate ``assert 0``. Here is an exact protocol of how py.test comes to call the test function this way: 1. pytest :ref:`finds ` the ``test_ehlo`` because of the ``test_`` prefix. The test function needs a function argument named ``smtp``. A matching fixture function is discovered by looking for a fixture-marked function named ``smtp``. 2. ``smtp()`` is called to create an instance. 3. ``test_ehlo()`` is called and fails in the last line of the test function. Note that if you misspell a function argument or want to use one that isn't available, you'll see an error with a list of available function arguments. .. Note:: You can always issue:: py.test --fixtures test_simplefactory.py to see available fixtures. In versions prior to 2.3 there was no ``@pytest.fixture`` marker and you had to use a magic ``pytest_funcarg__NAME`` prefix for the fixture factory. This remains and will remain supported but is not anymore advertised as the primary means of declaring fixture functions. "Funcargs" a prime example of dependency injection --------------------------------------------------- When injecting fixtures to test functions, pytest-2.0 introduced the term "funcargs" or "funcarg mechanism" which continues to be present also in docs today. It now refers to the specific case of injecting fixture values as arguments to test functions. With pytest-2.3 there are more possibilities to use fixtures but "funcargs" remain as the main way as they allow to directly state the dependencies of a test function. As the following examples show in more detail, funcargs allow test functions to easily receive and work against specific pre-initialized application objects without having to care about import/setup/cleanup details. It's a prime example of `dependency injection`_ where fixture functions take the role of the *injector* and test functions are the *consumers* of fixture objects. .. _smtpshared: Sharing a fixture across tests in a module (or class/session) ----------------------------------------------------------------- .. regendoc:wipe Fixtures requiring network access depend on connectivity and are usually time-expensive to create. Extending the previous example, we can add a ``scope='module'`` parameter to the :py:func:`@pytest.fixture <_pytest.python.fixture>` invocation to cause the decorated ``smtp`` fixture function to only be invoked once per test module. Multiple test functions in a test module will thus each receive the same ``smtp`` fixture instance. The next example puts the fixture function into a separate ``conftest.py`` file so that tests from multiple test modules in the directory can access the fixture function:: # content of conftest.py import pytest import smtplib @pytest.fixture(scope="module") def smtp(): return smtplib.SMTP("merlinux.eu") The name of the fixture again is ``smtp`` and you can access its result by listing the name ``smtp`` as an input parameter in any test or fixture function (in or below the directory where ``conftest.py`` is located):: # content of test_module.py def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 assert "merlinux" in response[1] assert 0 # for demo purposes def test_noop(smtp): response = smtp.noop() assert response[0] == 250 assert 0 # for demo purposes We deliberately insert failing ``assert 0`` statements in order to inspect what is going on and can now run the tests:: $ py.test test_module.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 2 items test_module.py FF ================================= FAILURES ================================= ________________________________ test_ehlo _________________________________ smtp = def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 assert "merlinux" in response[1] > assert 0 # for demo purposes E assert 0 test_module.py:6: AssertionError ________________________________ test_noop _________________________________ smtp = def test_noop(smtp): response = smtp.noop() assert response[0] == 250 > assert 0 # for demo purposes E assert 0 test_module.py:11: AssertionError ========================= 2 failed in 0.17 seconds ========================= You see the two ``assert 0`` failing and more importantly you can also see that the same (module-scoped) ``smtp`` object was passed into the two test functions because pytest shows the incoming argument values in the traceback. As a result, the two test functions using ``smtp`` run as quick as a single one because they reuse the same instance. If you decide that you rather want to have a session-scoped ``smtp`` instance, you can simply declare it:: @pytest.fixture(scope="session") def smtp(...): # the returned fixture value will be shared for # all tests needing it .. _`finalization`: fixture finalization / executing teardown code ------------------------------------------------------------- pytest supports execution of fixture specific finalization code when the fixture goes out of scope. By accepting a ``request`` object into your fixture function you can call its ``request.addfinalizer`` one or multiple times:: # content of conftest.py import smtplib import pytest @pytest.fixture(scope="module") def smtp(request): smtp = smtplib.SMTP("merlinux.eu") def fin(): print ("teardown smtp") smtp.close() request.addfinalizer(fin) return smtp # provide the fixture value The ``fin`` function will execute when the last test using the fixture in the module has finished execution. Let's execute it:: $ py.test -s -q --tb=no FFteardown smtp 2 failed in 0.17 seconds We see that the ``smtp`` instance is finalized after the two tests finished execution. Note that if we decorated our fixture function with ``scope='function'`` then fixture setup and cleanup would occur around each single test. In either case the test module itself does not need to change or know about these details of fixture setup. .. _`request-context`: Fixtures can introspect the requesting test context ------------------------------------------------------------- Fixture function can accept the :py:class:`request ` object to introspect the "requesting" test function, class or module context. Further extending the previous ``smtp`` fixture example, let's read an optional server URL from the test module which uses our fixture:: # content of conftest.py import pytest import smtplib @pytest.fixture(scope="module") def smtp(request): server = getattr(request.module, "smtpserver", "merlinux.eu") smtp = smtplib.SMTP(server) def fin(): print ("finalizing %s (%s)" % (smtp, server)) smtp.close() return smtp We use the ``request.module`` attribute to optionally obtain an ``smtpserver`` attribute from the test module. If we just execute again, nothing much has changed:: $ py.test -s -q --tb=no FF 2 failed in 0.21 seconds Let's quickly create another test module that actually sets the server URL in its module namespace:: # content of test_anothersmtp.py smtpserver = "mail.python.org" # will be read by smtp fixture def test_showhelo(smtp): assert 0, smtp.helo() Running it:: $ py.test -qq --tb=short test_anothersmtp.py F ================================= FAILURES ================================= ______________________________ test_showhelo _______________________________ test_anothersmtp.py:5: in test_showhelo > assert 0, smtp.helo() E AssertionError: (250, 'mail.python.org') voila! The ``smtp`` fixture function picked up our mail server name from the module namespace. .. _`fixture-parametrize`: Parametrizing a fixture ----------------------------------------------------------------- Fixture functions can be parametrized in which case they will be called multiple times, each time executing the set of dependent tests, i. e. the tests that depend on this fixture. Test functions do usually not need to be aware of their re-running. Fixture parametrization helps to write exhaustive functional tests for components which themselves can be configured in multiple ways. Extending the previous example, we can flag the fixture to create two ``smtp`` fixture instances which will cause all tests using the fixture to run twice. The fixture function gets access to each parameter through the special :py:class:`request ` object:: # content of conftest.py import pytest import smtplib @pytest.fixture(scope="module", params=["merlinux.eu", "mail.python.org"]) def smtp(request): smtp = smtplib.SMTP(request.param) def fin(): print ("finalizing %s" % smtp) smtp.close() request.addfinalizer(fin) return smtp The main change is the declaration of ``params`` with :py:func:`@pytest.fixture <_pytest.python.fixture>`, a list of values for each of which the fixture function will execute and can access a value via ``request.param``. No test function code needs to change. So let's just do another run:: $ py.test -q test_module.py FFFF ================================= FAILURES ================================= __________________________ test_ehlo[merlinux.eu] __________________________ smtp = def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 assert "merlinux" in response[1] > assert 0 # for demo purposes E assert 0 test_module.py:6: AssertionError __________________________ test_noop[merlinux.eu] __________________________ smtp = def test_noop(smtp): response = smtp.noop() assert response[0] == 250 > assert 0 # for demo purposes E assert 0 test_module.py:11: AssertionError ________________________ test_ehlo[mail.python.org] ________________________ smtp = def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 > assert "merlinux" in response[1] E assert 'merlinux' in 'mail.python.org\nSIZE 25600000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN' test_module.py:5: AssertionError ----------------------------- Captured stdout ------------------------------ finalizing ________________________ test_noop[mail.python.org] ________________________ smtp = def test_noop(smtp): response = smtp.noop() assert response[0] == 250 > assert 0 # for demo purposes E assert 0 test_module.py:11: AssertionError 4 failed in 6.58 seconds We see that our two test functions each ran twice, against the different ``smtp`` instances. Note also, that with the ``mail.python.org`` connection the second test fails in ``test_ehlo`` because a different server string is expected than what arrived. .. _`interdependent fixtures`: Modularity: using fixtures from a fixture function ---------------------------------------------------------- You can not only use fixtures in test functions but fixture functions can use other fixtures themselves. This contributes to a modular design of your fixtures and allows re-use of framework-specific fixtures across many projects. As a simple example, we can extend the previous example and instantiate an object ``app`` where we stick the already defined ``smtp`` resource into it:: # content of test_appsetup.py import pytest class App: def __init__(self, smtp): self.smtp = smtp @pytest.fixture(scope="module") def app(smtp): return App(smtp) def test_smtp_exists(app): assert app.smtp Here we declare an ``app`` fixture which receives the previously defined ``smtp`` fixture and instantiates an ``App`` object with it. Let's run it:: $ py.test -v test_appsetup.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 2 items test_appsetup.py:12: test_smtp_exists[merlinux.eu] PASSED test_appsetup.py:12: test_smtp_exists[mail.python.org] PASSED ========================= 2 passed in 5.95 seconds ========================= Due to the parametrization of ``smtp`` the test will run twice with two different ``App`` instances and respective smtp servers. There is no need for the ``app`` fixture to be aware of the ``smtp`` parametrization as pytest will fully analyse the fixture dependency graph. Note, that the ``app`` fixture has a scope of ``module`` and uses a module-scoped ``smtp`` fixture. The example would still work if ``smtp`` was cached on a ``session`` scope: it is fine for fixtures to use "broader" scoped fixtures but not the other way round: A session-scoped fixture could not use a module-scoped one in a meaningful way. .. _`automatic per-resource grouping`: Automatic grouping of tests by fixture instances ---------------------------------------------------------- .. regendoc: wipe pytest minimizes the number of active fixtures during test runs. If you have a parametrized fixture, then all the tests using it will first execute with one instance and then finalizers are called before the next fixture instance is created. Among other things, this eases testing of applications which create and use global state. The following example uses two parametrized funcargs, one of which is scoped on a per-module basis, and all the functions perform ``print`` calls to show the setup/teardown flow:: # content of test_module.py import pytest @pytest.fixture(scope="module", params=["mod1", "mod2"]) def modarg(request): param = request.param print "create", param def fin(): print ("fin %s" % param) return param @pytest.fixture(scope="function", params=[1,2]) def otherarg(request): return request.param def test_0(otherarg): print " test0", otherarg def test_1(modarg): print " test1", modarg def test_2(otherarg, modarg): print " test2", otherarg, modarg Let's run the tests in verbose mode and with looking at the print-output:: $ py.test -v -s test_module.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 8 items test_module.py:15: test_0[1] test0 1 PASSED test_module.py:15: test_0[2] test0 2 PASSED test_module.py:17: test_1[mod1] create mod1 test1 mod1 PASSED test_module.py:19: test_2[1-mod1] test2 1 mod1 PASSED test_module.py:19: test_2[2-mod1] test2 2 mod1 PASSED test_module.py:17: test_1[mod2] create mod2 test1 mod2 PASSED test_module.py:19: test_2[1-mod2] test2 1 mod2 PASSED test_module.py:19: test_2[2-mod2] test2 2 mod2 PASSED ========================= 8 passed in 0.01 seconds ========================= You can see that the parametrized module-scoped ``modarg`` resource caused an ordering of test execution that lead to the fewest possible "active" resources. The finalizer for the ``mod1`` parametrized resource was executed before the ``mod2`` resource was setup. .. _`usefixtures`: using fixtures from classes, modules or projects ---------------------------------------------------------------------- .. regendoc:wipe Sometimes test functions do not directly need access to a fixture object. For example, tests may require to operate with an empty directory as the current working directory but otherwise do not care for the concrete directory. Here is how you can can use the standard `tempfile `_ and pytest fixtures to achieve it. We separate the creation of the fixture into a conftest.py file:: # content of conftest.py import pytest import tempfile import os @pytest.fixture() def cleandir(): newpath = tempfile.mkdtemp() os.chdir(newpath) and declare its use in a test module via a ``usefixtures`` marker:: # content of test_setenv.py import os import pytest @pytest.mark.usefixtures("cleandir") class TestDirectoryInit: def test_cwd_starts_empty(self): assert os.listdir(os.getcwd()) == [] with open("myfile", "w") as f: f.write("hello") def test_cwd_again_starts_empty(self): assert os.listdir(os.getcwd()) == [] Due to the ``usefixtures`` marker, the ``cleandir`` fixture will be required for the execution of each test method, just as if you specified a "cleandir" function argument to each of them. Let's run it to verify our fixture is activated and the tests pass:: $ py.test -q .. 2 passed in 0.01 seconds You can specify multiple fixtures like this:: @pytest.mark.usefixtures("cleandir", "anotherfixture") and you may specify fixture usage at the test module level, using a generic feature of the mark mechanism:: pytestmark = pytest.mark.usefixtures("cleandir") Lastly you can put fixtures required by all tests in your project into an ini-file:: # content of pytest.ini [pytest] usefixtures = cleandir .. _`autouse`: .. _`autouse fixtures`: autouse fixtures (xUnit setup on steroids) ---------------------------------------------------------------------- .. regendoc:wipe Occasionally, you may want to have fixtures get invoked automatically without a `usefixtures`_ or `funcargs`_ reference. As a practical example, suppose we have a database fixture which has a begin/rollback/commit architecture and we want to automatically surround each test method by a transaction and a rollback. Here is a dummy self-contained implementation of this idea:: # content of test_db_transact.py import pytest class DB: def __init__(self): self.intransaction = [] def begin(self, name): self.intransaction.append(name) def rollback(self): self.intransaction.pop() @pytest.fixture(scope="module") def db(): return DB() class TestClass: @pytest.fixture(autouse=True) def transact(self, request, db): db.begin(request.function.__name__) request.addfinalizer(db.rollback) def test_method1(self, db): assert db.intransaction == ["test_method1"] def test_method2(self, db): assert db.intransaction == ["test_method2"] The class-level ``transact`` fixture is marked with *autouse=true* which implies that all test methods in the class will use this fixture without a need to state it in the test function signature or with a class-level ``usefixtures`` decorator. If we run it, we get two passing tests:: $ py.test -q .. 2 passed in 0.01 seconds Here is how autouse fixtures work in other scopes: - if an autouse fixture is defined in a test module, all its test functions automatically use it. - if an autouse fixture is defined in a conftest.py file then all tests in all test modules belows its directory will invoke the fixture. - lastly, and **please use that with care**: if you define an autouse fixture in a plugin, it will be invoked for all tests in all projects where the plugin is installed. This can be useful if a fixture only anyway works in the presence of certain settings e. g. in the ini-file. Such a global fixture should always quickly determine if it should do any work and avoid expensive imports or computation otherwise. Note that the above ``transact`` fixture may very well be a fixture that you want to make available in your project without having it generally active. The canonical way to do that is to put the transact definition into a conftest.py file **without** using ``autouse``:: # content of conftest.py @pytest.fixture() def transact(self, request, db): db.begin() request.addfinalizer(db.rollback) and then e.g. have a TestClass using it by declaring the need:: @pytest.mark.usefixtures("transact") class TestClass: def test_method1(self): ... All test methods in this TestClass will use the transaction fixture while other test classes or functions in the module will not use it unless they also add a ``transact`` reference. Shifting (visibility of) fixture functions ---------------------------------------------------- If during implementing your tests you realize that you want to use a fixture function from multiple test files you can move it to a :ref:`conftest.py ` file or even separately installable :ref:`plugins ` without changing test code. The discovery of fixtures functions starts at test classes, then test modules, then ``conftest.py`` files and finally builtin and third party plugins. pytest-2.5.1/doc/en/_templates/0000775000175000017500000000000012254002202015730 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/_templates/globaltoc.html0000664000175000017500000000115612254002202020567 0ustar hpkhpk00000000000000

{{ _('Table Of Contents') }}

{%- if display_toc %}
{{ toc }} {%- endif %} pytest-2.5.1/doc/en/_templates/links.html0000664000175000017500000000102312254002202017732 0ustar hpkhpk00000000000000

Useful Links

pytest-2.5.1/doc/en/_templates/sidebarintro.html0000664000175000017500000000017612254002202021307 0ustar hpkhpk00000000000000

About pytest

pytest is a mature full-featured Python testing tool that helps you write better programs.

pytest-2.5.1/doc/en/_templates/layout.html0000664000175000017500000000117612254002202020140 0ustar hpkhpk00000000000000{% extends "!layout.html" %} {% block footer %} {{ super() }} {% endblock %} pytest-2.5.1/doc/en/announce/0000775000175000017500000000000012254002202015401 5ustar hpkhpk00000000000000pytest-2.5.1/doc/en/announce/release-2.1.3.txt0000664000175000017500000000226512254002202020226 0ustar hpkhpk00000000000000py.test 2.1.3: just some more fixes =========================================================================== pytest-2.1.3 is a minor backward compatible maintenance release of the popular py.test testing tool. It is commonly used for unit, functional- and integration testing. See extensive docs with examples here: http://pytest.org/ The release contains another fix to the perfected assertions introduced with the 2.1 series as well as the new possibility to customize reporting for assertion expressions on a per-directory level. If you want to install or upgrade pytest, just type one of:: pip install -U pytest # or easy_install -U pytest Thanks to the bug reporters and to Ronny Pfannschmidt, Benjamin Peterson and Floris Bruynooghe who implemented the fixes. best, holger krekel Changes between 2.1.2 and 2.1.3 ---------------------------------------- - fix issue79: assertion rewriting failed on some comparisons in boolops, - correctly handle zero length arguments (a la pytest '') - fix issue67 / junitxml now contains correct test durations - fix issue75 / skipping test failure on jython - fix issue77 / Allow assertrepr_compare hook to apply to a subset of tests pytest-2.5.1/doc/en/announce/release-2.4.0.txt0000664000175000017500000002202412254002202020221 0ustar hpkhpk00000000000000pytest-2.4.0: new fixture features/hooks and bug fixes =========================================================================== The just released pytest-2.4.0 brings many improvements and numerous bug fixes while remaining plugin- and test-suite compatible apart from a few supposedly very minor incompatibilities. See below for a full list of details. A few feature highlights: - new yield-style fixtures `pytest.yield_fixture `_, allowing to use existing with-style context managers in fixture functions. - improved pdb support: ``import pdb ; pdb.set_trace()`` now works without requiring prior disabling of stdout/stderr capturing. Also the ``--pdb`` options works now on collection and internal errors and we introduced a new experimental hook for IDEs/plugins to intercept debugging: ``pytest_exception_interact(node, call, report)``. - shorter monkeypatch variant to allow specifying an import path as a target, for example: ``monkeypatch.setattr("requests.get", myfunc)`` - better unittest/nose compatibility: all teardown methods are now only called if the corresponding setup method succeeded. - integrate tab-completion on command line options if you have `argcomplete `_ configured. - allow boolean expression directly with skipif/xfail if a "reason" is also specified. - a new hook ``pytest_load_initial_conftests`` allows plugins like `pytest-django `_ to influence the environment before conftest files import ``django``. - reporting: color the last line red or green depending if failures/errors occured or everything passed. The documentation has been updated to accomodate the changes, see `http://pytest.org `_ To install or upgrade pytest:: pip install -U pytest # or easy_install -U pytest **Many thanks to all who helped, including Floris Bruynooghe, Brianna Laugher, Andreas Pelme, Anthon van der Neut, Anatoly Bubenkoff, Vladimir Keleshev, Mathieu Agopian, Ronny Pfannschmidt, Christian Theunert and many others.** may passing tests be with you, holger krekel Changes between 2.3.5 and 2.4 ----------------------------------- known incompatibilities: - if calling --genscript from python2.7 or above, you only get a standalone script which works on python2.7 or above. Use Python2.6 to also get a python2.5 compatible version. - all xunit-style teardown methods (nose-style, pytest-style, unittest-style) will not be called if the corresponding setup method failed, see issue322 below. - the pytest_plugin_unregister hook wasn't ever properly called and there is no known implementation of the hook - so it got removed. - pytest.fixture-decorated functions cannot be generators (i.e. use yield) anymore. This change might be reversed in 2.4.1 if it causes unforeseen real-life issues. However, you can always write and return an inner function/generator and change the fixture consumer to iterate over the returned generator. This change was done in lieu of the new ``pytest.yield_fixture`` decorator, see below. new features: - experimentally introduce a new ``pytest.yield_fixture`` decorator which accepts exactly the same parameters as pytest.fixture but mandates a ``yield`` statement instead of a ``return statement`` from fixture functions. This allows direct integration with "with-style" context managers in fixture functions and generally avoids registering of finalization callbacks in favour of treating the "after-yield" as teardown code. Thanks Andreas Pelme, Vladimir Keleshev, Floris Bruynooghe, Ronny Pfannschmidt and many others for discussions. - allow boolean expression directly with skipif/xfail if a "reason" is also specified. Rework skipping documentation to recommend "condition as booleans" because it prevents surprises when importing markers between modules. Specifying conditions as strings will remain fully supported. - reporting: color the last line red or green depending if failures/errors occured or everything passed. thanks Christian Theunert. - make "import pdb ; pdb.set_trace()" work natively wrt capturing (no "-s" needed anymore), making ``pytest.set_trace()`` a mere shortcut. - fix issue181: --pdb now also works on collect errors (and on internal errors) . This was implemented by a slight internal refactoring and the introduction of a new hook ``pytest_exception_interact`` hook (see next item). - fix issue341: introduce new experimental hook for IDEs/terminals to intercept debugging: ``pytest_exception_interact(node, call, report)``. - new monkeypatch.setattr() variant to provide a shorter invocation for patching out classes/functions from modules: monkeypatch.setattr("requests.get", myfunc) will replace the "get" function of the "requests" module with ``myfunc``. - fix issue322: tearDownClass is not run if setUpClass failed. Thanks Mathieu Agopian for the initial fix. Also make all of pytest/nose finalizer mimick the same generic behaviour: if a setupX exists and fails, don't run teardownX. This internally introduces a new method "node.addfinalizer()" helper which can only be called during the setup phase of a node. - simplify pytest.mark.parametrize() signature: allow to pass a CSV-separated string to specify argnames. For example: ``pytest.mark.parametrize("input,expected", [(1,2), (2,3)])`` works as well as the previous: ``pytest.mark.parametrize(("input", "expected"), ...)``. - add support for setUpModule/tearDownModule detection, thanks Brian Okken. - integrate tab-completion on options through use of "argcomplete". Thanks Anthon van der Neut for the PR. - change option names to be hyphen-separated long options but keep the old spelling backward compatible. py.test -h will only show the hyphenated version, for example "--collect-only" but "--collectonly" will remain valid as well (for backward-compat reasons). Many thanks to Anthon van der Neut for the implementation and to Hynek Schlawack for pushing us. - fix issue 308 - allow to mark/xfail/skip individual parameter sets when parametrizing. Thanks Brianna Laugher. - call new experimental pytest_load_initial_conftests hook to allow 3rd party plugins to do something before a conftest is loaded. Bug fixes: - fix issue358 - capturing options are now parsed more properly by using a new parser.parse_known_args method. - pytest now uses argparse instead of optparse (thanks Anthon) which means that "argparse" is added as a dependency if installing into python2.6 environments or below. - fix issue333: fix a case of bad unittest/pytest hook interaction. - PR27: correctly handle nose.SkipTest during collection. Thanks Antonio Cuni, Ronny Pfannschmidt. - fix issue355: junitxml puts name="pytest" attribute to testsuite tag. - fix issue336: autouse fixture in plugins should work again. - fix issue279: improve object comparisons on assertion failure for standard datatypes and recognise collections.abc. Thanks to Brianna Laugher and Mathieu Agopian. - fix issue317: assertion rewriter support for the is_package method - fix issue335: document py.code.ExceptionInfo() object returned from pytest.raises(), thanks Mathieu Agopian. - remove implicit distribute_setup support from setup.py. - fix issue305: ignore any problems when writing pyc files. - SO-17664702: call fixture finalizers even if the fixture function partially failed (finalizers would not always be called before) - fix issue320 - fix class scope for fixtures when mixed with module-level functions. Thanks Anatloy Bubenkoff. - you can specify "-q" or "-qq" to get different levels of "quieter" reporting (thanks Katarzyna Jachim) - fix issue300 - Fix order of conftest loading when starting py.test in a subdirectory. - fix issue323 - sorting of many module-scoped arg parametrizations - make sessionfinish hooks execute with the same cwd-context as at session start (helps fix plugin behaviour which write output files with relative path such as pytest-cov) - fix issue316 - properly reference collection hooks in docs - fix issue 306 - cleanup of -k/-m options to only match markers/test names/keywords respectively. Thanks Wouter van Ackooy. - improved doctest counting for doctests in python modules -- files without any doctest items will not show up anymore and doctest examples are counted as separate test items. thanks Danilo Bellini. - fix issue245 by depending on the released py-1.4.14 which fixes py.io.dupfile to work with files with no mode. Thanks Jason R. Coombs. - fix junitxml generation when test output contains control characters, addressing issue267, thanks Jaap Broekhuizen - fix issue338: honor --tb style for setup/teardown errors as well. Thanks Maho. - fix issue307 - use yaml.safe_load in example, thanks Mark Eichin. - better parametrize error messages, thanks Brianna Laugher - pytest_terminal_summary(terminalreporter) hooks can now use ".section(title)" and ".line(msg)" methods to print extra information at the end of a test run. pytest-2.5.1/doc/en/announce/index.txt0000664000175000017500000000075212254002202017255 0ustar hpkhpk00000000000000 Release announcements =========================================== .. toctree:: :maxdepth: 2 release-2.5.1 release-2.5.0 release-2.4.2 release-2.4.1 release-2.4.0 release-2.3.5 release-2.3.4 release-2.3.3 release-2.3.2 release-2.3.1 release-2.3.0 release-2.2.4 release-2.2.2 release-2.2.1 release-2.2.0 release-2.1.3 release-2.1.2 release-2.1.1 release-2.1.0 release-2.0.3 release-2.0.2 release-2.0.1 release-2.0.0 pytest-2.5.1/doc/en/announce/release-2.2.0.txt0000664000175000017500000001034712254002202020224 0ustar hpkhpk00000000000000py.test 2.2.0: test marking++, parametrization++ and duration profiling =========================================================================== pytest-2.2.0 is a test-suite compatible release of the popular py.test testing tool. Plugins might need upgrades. It comes with these improvements: * easier and more powerful parametrization of tests: - new @pytest.mark.parametrize decorator to run tests with different arguments - new metafunc.parametrize() API for parametrizing arguments independently - see examples at http://pytest.org/latest/example/parametrize.html - NOTE that parametrize() related APIs are still a bit experimental and might change in future releases. * improved handling of test markers and refined marking mechanism: - "-m markexpr" option for selecting tests according to their mark - a new "markers" ini-variable for registering test markers for your project - the new "--strict" bails out with an error if using unregistered markers. - see examples at http://pytest.org/latest/example/markers.html * duration profiling: new "--duration=N" option showing the N slowest test execution or setup/teardown calls. This is most useful if you want to find out where your slowest test code is. * also 2.2.0 performs more eager calling of teardown/finalizers functions resulting in better and more accurate reporting when they fail Besides there is the usual set of bug fixes along with a cleanup of pytest's own test suite allowing it to run on a wider range of environments. For general information, see extensive docs with examples here: http://pytest.org/ If you want to install or upgrade pytest you might just type:: pip install -U pytest # or easy_install -U pytest Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner, Daniel Nouri, Alfredo Deza and all who gave feedback or sent bug reports. best, holger krekel notes on incompatibility ------------------------------ While test suites should work unchanged you might need to upgrade plugins: * You need a new version of the pytest-xdist plugin (1.7) for distributing test runs. * Other plugins might need an upgrade if they implement the ``pytest_runtest_logreport`` hook which now is called unconditionally for the setup/teardown fixture phases of a test. You may choose to ignore setup/teardown failures by inserting "if rep.when != 'call': return" or something similar. Note that most code probably "just" works because the hook was already called for failing setup/teardown phases of a test so a plugin should have been ready to grok such reports already. Changes between 2.1.3 and 2.2.0 ---------------------------------------- - fix issue90: introduce eager tearing down of test items so that teardown function are called earlier. - add an all-powerful metafunc.parametrize function which allows to parametrize test function arguments in multiple steps and therefore from independent plugins and places. - add a @pytest.mark.parametrize helper which allows to easily call a test function with different argument values. - Add examples to the "parametrize" example page, including a quick port of Test scenarios and the new parametrize function and decorator. - introduce registration for "pytest.mark.*" helpers via ini-files or through plugin hooks. Also introduce a "--strict" option which will treat unregistered markers as errors allowing to avoid typos and maintain a well described set of markers for your test suite. See examples at http://pytest.org/latest/mark.html and its links. - issue50: introduce "-m marker" option to select tests based on markers (this is a stricter and more predictable version of "-k" in that "-m" only matches complete markers and has more obvious rules for and/or semantics. - new feature to help optimizing the speed of your tests: --durations=N option for displaying N slowest test calls and setup/teardown methods. - fix issue87: --pastebin now works with python3 - fix issue89: --pdb with unexpected exceptions in doctest work more sensibly - fix and cleanup pytest's own test suite to not leak FDs - fix issue83: link to generated funcarg list - fix issue74: pyarg module names are now checked against imp.find_module false positives - fix compatibility with twisted/trial-11.1.0 use cases pytest-2.5.1/doc/en/announce/release-2.2.1.txt0000664000175000017500000000303112254002202020215 0ustar hpkhpk00000000000000pytest-2.2.1: bug fixes, perfect teardowns =========================================================================== pytest-2.2.1 is a minor backward-compatible release of the the py.test testing tool. It contains bug fixes and little improvements, including documentation fixes. If you are using the distributed testing pluginmake sure to upgrade it to pytest-xdist-1.8. For general information see here: http://pytest.org/ To install or upgrade pytest: pip install -U pytest # or easy_install -U pytest Special thanks for helping on this release to Ronny Pfannschmidt, Jurko Gospodnetic and Ralf Schmitt. best, holger krekel Changes between 2.2.0 and 2.2.1 ---------------------------------------- - fix issue99 (in pytest and py) internallerrors with resultlog now produce better output - fixed by normalizing pytest_internalerror input arguments. - fix issue97 / traceback issues (in pytest and py) improve traceback output in conjunction with jinja2 and cython which hack tracebacks - fix issue93 (in pytest and pytest-xdist) avoid "delayed teardowns": the final test in a test node will now run its teardown directly instead of waiting for the end of the session. Thanks Dave Hunt for the good reporting and feedback. The pytest_runtest_protocol as well as the pytest_runtest_teardown hooks now have "nextitem" available which will be None indicating the end of the test run. - fix collection crash due to unknown-source collected items, thanks to Ralf Schmitt (fixed by depending on a more recent pylib) pytest-2.5.1/doc/en/announce/release-2.2.4.txt0000664000175000017500000000266012254002202020227 0ustar hpkhpk00000000000000pytest-2.2.4: bug fixes, better junitxml/unittest/python3 compat =========================================================================== pytest-2.2.4 is a minor backward-compatible release of the versatile py.test testing tool. It contains bug fixes and a few refinements to junitxml reporting, better unittest- and python3 compatibility. For general information see here: http://pytest.org/ To install or upgrade pytest: pip install -U pytest # or easy_install -U pytest Special thanks for helping on this release to Ronny Pfannschmidt and Benjamin Peterson and the contributors of issues. best, holger krekel Changes between 2.2.3 and 2.2.4 ----------------------------------- - fix error message for rewritten assertions involving the % operator - fix issue 126: correctly match all invalid xml characters for junitxml binary escape - fix issue with unittest: now @unittest.expectedFailure markers should be processed correctly (you can also use @pytest.mark markers) - document integration with the extended distribute/setuptools test commands - fix issue 140: propperly get the real functions of bound classmethods for setup/teardown_class - fix issue #141: switch from the deceased paste.pocoo.org to bpaste.net - fix issue #143: call unconfigure/sessionfinish always when configure/sessionstart where called - fix issue #144: better mangle test ids to junitxml classnames - upgrade distribute_setup.py to 0.6.27 pytest-2.5.1/doc/en/announce/release-2.5.0.txt0000664000175000017500000001621412254002202020226 0ustar hpkhpk00000000000000pytest-2.5.0: now down to ZERO reported bugs! =========================================================================== pytest-2.5.0 is a big fixing release, the result of two community bug fixing days plus numerous additional works from many people and reporters. The release should be fully compatible to 2.4.2, existing plugins and test suites. We aim at maintaining this level of ZERO reported bugs because it's no fun if your testing tool has bugs, is it? Under a condition, though: when submitting a bug report please provide clear information about the circumstances and a simple example which reproduces the problem. The issue tracker is of course not empty now. We have many remaining "enhacement" issues which we'll hopefully can tackle in 2014 with your help. For those who use older Python versions, please note that pytest is not automatically tested on python2.5 due to virtualenv, setuptools and tox not supporting it anymore. Manual verification shows that it mostly works fine but it's not going to be part of the automated release process and thus likely to break in the future. As usual, current docs are at http://pytest.org and you can upgrade from pypi via:: pip install -U pytest Particular thanks for helping with this release go to Anatoly Bubenkoff, Floris Bruynooghe, Marc Abramowitz, Ralph Schmitt, Ronny Pfannschmidt, Donald Stufft, James Lan, Rob Dennis, Jason R. Coombs, Mathieu Agopian, Virgil Dupras, Bruno Oliveira, Alex Gaynor and others. have fun, holger krekel 2.5.0 ----------------------------------- - dropped python2.5 from automated release testing of pytest itself which means it's probably going to break soon (but still works with this release we believe). - simplified and fixed implementation for calling finalizers when parametrized fixtures or function arguments are involved. finalization is now performed lazily at setup time instead of in the "teardown phase". While this might sound odd at first, it helps to ensure that we are correctly handling setup/teardown even in complex code. User-level code should not be affected unless it's implementing the pytest_runtest_teardown hook and expecting certain fixture instances are torn down within (very unlikely and would have been unreliable anyway). - PR90: add --color=yes|no|auto option to force terminal coloring mode ("auto" is default). Thanks Marc Abramowitz. - fix issue319 - correctly show unicode in assertion errors. Many thanks to Floris Bruynooghe for the complete PR. Also means we depend on py>=1.4.19 now. - fix issue396 - correctly sort and finalize class-scoped parametrized tests independently from number of methods on the class. - refix issue323 in a better way -- parametrization should now never cause Runtime Recursion errors because the underlying algorithm for re-ordering tests per-scope/per-fixture is not recursive anymore (it was tail-call recursive before which could lead to problems for more than >966 non-function scoped parameters). - fix issue290 - there is preliminary support now for parametrizing with repeated same values (sometimes useful to to test if calling a second time works as with the first time). - close issue240 - document precisely how pytest module importing works, discuss the two common test directory layouts, and how it interacts with PEP420-namespace packages. - fix issue246 fix finalizer order to be LIFO on independent fixtures depending on a parametrized higher-than-function scoped fixture. (was quite some effort so please bear with the complexity of this sentence :) Thanks Ralph Schmitt for the precise failure example. - fix issue244 by implementing special index for parameters to only use indices for paramentrized test ids - fix issue287 by running all finalizers but saving the exception from the first failing finalizer and re-raising it so teardown will still have failed. We reraise the first failing exception because it might be the cause for other finalizers to fail. - fix ordering when mock.patch or other standard decorator-wrappings are used with test methods. This fixues issue346 and should help with random "xdist" collection failures. Thanks to Ronny Pfannschmidt and Donald Stufft for helping to isolate it. - fix issue357 - special case "-k" expressions to allow for filtering with simple strings that are not valid python expressions. Examples: "-k 1.3" matches all tests parametrized with 1.3. "-k None" filters all tests that have "None" in their name and conversely "-k 'not None'". Previously these examples would raise syntax errors. - fix issue384 by removing the trial support code since the unittest compat enhancements allow trial to handle it on its own - don't hide an ImportError when importing a plugin produces one. fixes issue375. - fix issue275 - allow usefixtures and autouse fixtures for running doctest text files. - fix issue380 by making --resultlog only rely on longrepr instead of the "reprcrash" attribute which only exists sometimes. - address issue122: allow @pytest.fixture(params=iterator) by exploding into a list early on. - fix pexpect-3.0 compatibility for pytest's own tests. (fixes issue386) - allow nested parametrize-value markers, thanks James Lan for the PR. - fix unicode handling with new monkeypatch.setattr(import_path, value) API. Thanks Rob Dennis. Fixes issue371. - fix unicode handling with junitxml, fixes issue368. - In assertion rewriting mode on Python 2, fix the detection of coding cookies. See issue #330. - make "--runxfail" turn imperative pytest.xfail calls into no ops (it already did neutralize pytest.mark.xfail markers) - refine pytest / pkg_resources interactions: The AssertionRewritingHook PEP302 compliant loader now registers itself with setuptools/pkg_resources properly so that the pkg_resources.resource_stream method works properly. Fixes issue366. Thanks for the investigations and full PR to Jason R. Coombs. - pytestconfig fixture is now session-scoped as it is the same object during the whole test run. Fixes issue370. - avoid one surprising case of marker malfunction/confusion:: @pytest.mark.some(lambda arg: ...) def test_function(): would not work correctly because pytest assumes @pytest.mark.some gets a function to be decorated already. We now at least detect if this arg is an lambda and thus the example will work. Thanks Alex Gaynor for bringing it up. - xfail a test on pypy that checks wrong encoding/ascii (pypy does not error out). fixes issue385. - internally make varnames() deal with classes's __init__, although it's not needed by pytest itself atm. Also fix caching. Fixes issue376. - fix issue221 - handle importing of namespace-package with no __init__.py properly. - refactor internal FixtureRequest handling to avoid monkeypatching. One of the positive user-facing effects is that the "request" object can now be used in closures. - fixed version comparison in pytest.importskip(modname, minverstring) - fix issue377 by clarifying in the nose-compat docs that pytest does not duplicate the unittest-API into the "plain" namespace. - fix verbose reporting for @mock'd test functions pytest-2.5.1/doc/en/announce/release-2.3.4.txt0000664000175000017500000000323112254002202020223 0ustar hpkhpk00000000000000pytest-2.3.4: stabilization, more flexible selection via "-k expr" =========================================================================== pytest-2.3.4 is a small stabilization release of the py.test tool which offers uebersimple assertions, scalable fixture mechanisms and deep customization for testing with Python. This release comes with the following fixes and features: - make "-k" option accept an expressions the same as with "-m" so that one can write: -k "name1 or name2" etc. This is a slight usage incompatibility if you used special syntax like "TestClass.test_method" which you now need to write as -k "TestClass and test_method" to match a certain method in a certain test class. - allow to dynamically define markers via item.keywords[...]=assignment integrating with "-m" option - yielded test functions will now have autouse-fixtures active but cannot accept fixtures as funcargs - it's anyway recommended to rather use the post-2.0 parametrize features instead of yield, see: http://pytest.org/latest/example/parametrize.html - fix autouse-issue where autouse-fixtures would not be discovered if defined in a a/conftest.py file and tests in a/tests/test_some.py - fix issue226 - LIFO ordering for fixture teardowns - fix issue224 - invocations with >256 char arguments now work - fix issue91 - add/discuss package/directory level setups in example - fixes related to autouse discovery and calling Thanks in particular to Thomas Waldmann for spotting and reporting issues. See http://pytest.org/ for general information. To install or upgrade pytest: pip install -U pytest # or easy_install -U pytest best, holger krekel pytest-2.5.1/doc/en/announce/release-2.3.2.txt0000664000175000017500000000334612254002202020230 0ustar hpkhpk00000000000000pytest-2.3.2: some fixes and more traceback-printing speed =========================================================================== pytest-2.3.2 is a another stabilization release: - issue 205: fixes a regression with conftest detection - issue 208/29: fixes traceback-printing speed in some bad cases - fix teardown-ordering for parametrized setups - fix unittest and trial compat behaviour with respect to runTest() methods - issue 206 and others: some improvements to packaging - fix issue127 and others: improve some docs See http://pytest.org/ for general information. To install or upgrade pytest: pip install -U pytest # or easy_install -U pytest best, holger krekel Changes between 2.3.1 and 2.3.2 ----------------------------------- - fix issue208 and fix issue29 use new py version to avoid long pauses when printing tracebacks in long modules - fix issue205 - conftests in subdirs customizing pytest_pycollect_makemodule and pytest_pycollect_makeitem now work properly - fix teardown-ordering for parametrized setups - fix issue127 - better documentation for pytest_addoption and related objects. - fix unittest behaviour: TestCase.runtest only called if there are test methods defined - improve trial support: don't collect its empty unittest.TestCase.runTest() method - "python setup.py test" now works with pytest itself - fix/improve internal/packaging related bits: - exception message check of test_nose.py now passes on python33 as well - issue206 - fix test_assertrewrite.py to work when a global PYTHONDONTWRITEBYTECODE=1 is present - add tox.ini to pytest distribution so that ignore-dirs and others config bits are properly distributed for maintainers who run pytest-own tests pytest-2.5.1/doc/en/announce/release-2.0.0.txt0000664000175000017500000001203712254002202020220 0ustar hpkhpk00000000000000py.test 2.0.0: asserts++, unittest++, reporting++, config++, docs++ =========================================================================== Welcome to pytest-2.0.0, a major new release of "py.test", the rapid easy Python testing tool. There are many new features and enhancements, see below for summary and detailed lists. A lot of long-deprecated code has been removed, resulting in a much smaller and cleaner implementation. See the new docs with examples here: http://pytest.org/2.0.0/index.html A note on packaging: pytest used to part of the "py" distribution up until version py-1.3.4 but this has changed now: pytest-2.0.0 only contains py.test related code and is expected to be backward-compatible to existing test code. If you want to install pytest, just type one of:: pip install -U pytest easy_install -U pytest Many thanks to all issue reporters and people asking questions or complaining. Particular thanks to Floris Bruynooghe and Ronny Pfannschmidt for their great coding contributions and many others for feedback and help. best, holger krekel New Features ----------------------- - new invocations through Python interpreter and from Python:: python -m pytest # on all pythons >= 2.5 or from a python program:: import pytest ; pytest.main(arglist, pluginlist) see http://pytest.org/2.0.0/usage.html for details. - new and better reporting information in assert expressions if comparing lists, sequences or strings. see http://pytest.org/2.0.0/assert.html#newreport - new configuration through ini-files (setup.cfg or tox.ini recognized), for example:: [pytest] norecursedirs = .hg data* # don't ever recurse in such dirs addopts = -x --pyargs # add these command line options by default see http://pytest.org/2.0.0/customize.html - improved standard unittest support. In general py.test should now better be able to run custom unittest.TestCases like twisted trial or Django based TestCases. Also you can now run the tests of an installed 'unittest' package with py.test:: py.test --pyargs unittest - new "-q" option which decreases verbosity and prints a more nose/unittest-style "dot" output. - many many more detailed improvements details Fixes ----------------------- - fix issue126 - introduce py.test.set_trace() to trace execution via PDB during the running of tests even if capturing is ongoing. - fix issue124 - make reporting more resilient against tests opening files on filedescriptor 1 (stdout). - fix issue109 - sibling conftest.py files will not be loaded. (and Directory collectors cannot be customized anymore from a Directory's conftest.py - this needs to happen at least one level up). - fix issue88 (finding custom test nodes from command line arg) - fix issue93 stdout/stderr is captured while importing conftest.py - fix bug: unittest collected functions now also can have "pytestmark" applied at class/module level Important Notes -------------------- * The usual way in pre-2.0 times to use py.test in python code was to import "py" and then e.g. use "py.test.raises" for the helper. This remains valid and is not planned to be deprecated. However, in most examples and internal code you'll find "import pytest" and "pytest.raises" used as the recommended default way. * pytest now first performs collection of the complete test suite before running any test. This changes for example the semantics of when pytest_collectstart/pytest_collectreport are called. Some plugins may need upgrading. * The pytest package consists of a 400 LOC core.py and about 20 builtin plugins, summing up to roughly 5000 LOCs, including docstrings. To be fair, it also uses generic code from the "pylib", and the new "py" package to help with filesystem and introspection/code manipulation. (Incompatible) Removals ----------------------------- - py.test.config is now only available if you are in a test run. - the following (mostly already deprecated) functionality was removed: - removed support for Module/Class/... collection node definitions in conftest.py files. They will cause nothing special. - removed support for calling the pre-1.0 collection API of "run()" and "join" - removed reading option values from conftest.py files or env variables. This can now be done much much better and easier through the ini-file mechanism and the "addopts" entry in particular. - removed the "disabled" attribute in test classes. Use the skipping and pytestmark mechanism to skip or xfail a test class. - py.test.collect.Directory does not exist anymore and it is not possible to provide an own "Directory" object. If you have used this and don't know what to do, get in contact. We'll figure something out. Note that pytest_collect_directory() is still called but any return value will be ignored. This allows to keep old code working that performed for example "py.test.skip()" in collect() to prevent recursion into directory trees if a certain dependency or command line option is missing. see :ref:`changelog` for more detailed changes. pytest-2.5.1/doc/en/announce/release-2.1.1.txt0000664000175000017500000000305412254002202020221 0ustar hpkhpk00000000000000py.test 2.1.1: assertion fixes and improved junitxml output =========================================================================== pytest-2.1.1 is a backward compatible maintenance release of the popular py.test testing tool. See extensive docs with examples here: http://pytest.org/ Most bug fixes address remaining issues with the perfected assertions introduced with 2.1.0 - many thanks to the bug reporters and to Benjamin Peterson for helping to fix them. Also, junitxml output now produces system-out/err tags which lead to better displays of tracebacks with Jenkins. Also a quick note to package maintainers and others interested: there now is a "pytest" man page which can be generated with "make man" in doc/. If you want to install or upgrade pytest, just type one of:: pip install -U pytest # or easy_install -U pytest best, holger krekel / http://merlinux.eu Changes between 2.1.0 and 2.1.1 ---------------------------------------------- - fix issue64 / pytest.set_trace now works within pytest_generate_tests hooks - fix issue60 / fix error conditions involving the creation of __pycache__ - fix issue63 / assertion rewriting on inserts involving strings containing '%' - fix assertion rewriting on calls with a ** arg - don't cache rewritten modules if bytecode generation is disabled - fix assertion rewriting in read-only directories - fix issue59: provide system-out/err tags for junitxml output - fix issue61: assertion rewriting on boolean operations with 3 or more operands - you can now build a man page with "cd doc ; make man" pytest-2.5.1/doc/en/announce/release-2.3.0.txt0000664000175000017500000001264612254002202020231 0ustar hpkhpk00000000000000pytest-2.3: improved fixtures / better unittest integration ============================================================================= pytest-2.3 comes with many major improvements for fixture/funcarg management and parametrized testing in Python. It is now easier, more efficient and more predicatable to re-run the same tests with different fixture instances. Also, you can directly declare the caching "scope" of fixtures so that dependent tests throughout your whole test suite can re-use database or other expensive fixture objects with ease. Lastly, it's possible for fixture functions (formerly known as funcarg factories) to use other fixtures, allowing for a completely modular and re-useable fixture design. For detailed info and tutorial-style examples, see: http://pytest.org/latest/fixture.html Moreover, there is now support for using pytest fixtures/funcargs with unittest-style suites, see here for examples: http://pytest.org/latest/unittest.html Besides, more unittest-test suites are now expected to "simply work" with pytest. All changes are backward compatible and you should be able to continue to run your test suites and 3rd party plugins that worked with pytest-2.2.4. If you are interested in the precise reasoning (including examples) of the pytest-2.3 fixture evolution, please consult http://pytest.org/latest/funcarg_compare.html For general info on installation and getting started: http://pytest.org/latest/getting-started.html Docs and PDF access as usual at: http://pytest.org and more details for those already in the knowing of pytest can be found in the CHANGELOG below. Particular thanks for this release go to Floris Bruynooghe, Alex Okrushko Carl Meyer, Ronny Pfannschmidt, Benjamin Peterson and Alex Gaynor for helping to get the new features right and well integrated. Ronny and Floris also helped to fix a number of bugs and yet more people helped by providing bug reports. have fun, holger krekel Changes between 2.2.4 and 2.3.0 ----------------------------------- - fix issue202 - better automatic names for parametrized test functions - fix issue139 - introduce @pytest.fixture which allows direct scoping and parametrization of funcarg factories. Introduce new @pytest.setup marker to allow the writing of setup functions which accept funcargs. - fix issue198 - conftest fixtures were not found on windows32 in some circumstances with nested directory structures due to path manipulation issues - fix issue193 skip test functions with were parametrized with empty parameter sets - fix python3.3 compat, mostly reporting bits that previously depended on dict ordering - introduce re-ordering of tests by resource and parametrization setup which takes precedence to the usual file-ordering - fix issue185 monkeypatching time.time does not cause pytest to fail - fix issue172 duplicate call of pytest.setup-decoratored setup_module functions - fix junitxml=path construction so that if tests change the current working directory and the path is a relative path it is constructed correctly from the original current working dir. - fix "python setup.py test" example to cause a proper "errno" return - fix issue165 - fix broken doc links and mention stackoverflow for FAQ - catch unicode-issues when writing failure representations to terminal to prevent the whole session from crashing - fix xfail/skip confusion: a skip-mark or an imperative pytest.skip will now take precedence before xfail-markers because we can't determine xfail/xpass status in case of a skip. see also: http://stackoverflow.com/questions/11105828/in-py-test-when-i-explicitly-skip-a-test-that-is-marked-as-xfail-how-can-i-get - always report installed 3rd party plugins in the header of a test run - fix issue160: a failing setup of an xfail-marked tests should be reported as xfail (not xpass) - fix issue128: show captured output when capsys/capfd are used - fix issue179: propperly show the dependency chain of factories - pluginmanager.register(...) now raises ValueError if the plugin has been already registered or the name is taken - fix issue159: improve http://pytest.org/latest/faq.html especially with respect to the "magic" history, also mention pytest-django, trial and unittest integration. - make request.keywords and node.keywords writable. All descendant collection nodes will see keyword values. Keywords are dictionaries containing markers and other info. - fix issue 178: xml binary escapes are now wrapped in py.xml.raw - fix issue 176: correctly catch the builtin AssertionError even when we replaced AssertionError with a subclass on the python level - factory discovery no longer fails with magic global callables that provide no sane __code__ object (mock.call for example) - fix issue 182: testdir.inprocess_run now considers passed plugins - fix issue 188: ensure sys.exc_info is clear on python2 before calling into a test - fix issue 191: add unittest TestCase runTest method support - fix issue 156: monkeypatch correctly handles class level descriptors - reporting refinements: - pytest_report_header now receives a "startdir" so that you can use startdir.bestrelpath(yourpath) to show nice relative path - allow plugins to implement both pytest_report_header and pytest_sessionstart (sessionstart is invoked first). - don't show deselected reason line if there is none - py.test -vv will show all of assert comparisations instead of truncating pytest-2.5.1/doc/en/announce/release-2.1.2.txt0000664000175000017500000000240512254002202020221 0ustar hpkhpk00000000000000py.test 2.1.2: bug fixes and fixes for jython =========================================================================== pytest-2.1.2 is a minor backward compatible maintenance release of the popular py.test testing tool. pytest is commonly used for unit, functional- and integration testing. See extensive docs with examples here: http://pytest.org/ Most bug fixes address remaining issues with the perfected assertions introduced in the 2.1 series - many thanks to the bug reporters and to Benjamin Peterson for helping to fix them. pytest should also work better with Jython-2.5.1 (and Jython trunk). If you want to install or upgrade pytest, just type one of:: pip install -U pytest # or easy_install -U pytest best, holger krekel / http://merlinux.eu Changes between 2.1.1 and 2.1.2 ---------------------------------------- - fix assertion rewriting on files with windows newlines on some Python versions - refine test discovery by package/module name (--pyargs), thanks Florian Mayer - fix issue69 / assertion rewriting fixed on some boolean operations - fix issue68 / packages now work with assertion rewriting - fix issue66: use different assertion rewriting caches when the -O option is passed - don't try assertion rewriting on Jython, use reinterp pytest-2.5.1/doc/en/announce/release-2.1.0.txt0000664000175000017500000000415112254002202020217 0ustar hpkhpk00000000000000py.test 2.1.0: perfected assertions and bug fixes =========================================================================== Welcome to the release of pytest-2.1, a mature testing tool for Python, supporting CPython 2.4-3.2, Jython and latest PyPy interpreters. See the improved extensive docs (now also as PDF!) with tested examples here: http://pytest.org/ The single biggest news about this release are **perfected assertions** courtesy of Benjamin Peterson. You can now safely use ``assert`` statements in test modules without having to worry about side effects or python optimization ("-OO") options. This is achieved by rewriting assert statements in test modules upon import, using a PEP302 hook. See http://pytest.org/assert.html#advanced-assertion-introspection for detailed information. The work has been partly sponsored by my company, merlinux GmbH. For further details on bug fixes and smaller enhancements see below. If you want to install or upgrade pytest, just type one of:: pip install -U pytest # or easy_install -U pytest best, holger krekel / http://merlinux.eu Changes between 2.0.3 and 2.1.0 ---------------------------------------------- - fix issue53 call nosestyle setup functions with correct ordering - fix issue58 and issue59: new assertion code fixes - merge Benjamin's assertionrewrite branch: now assertions for test modules on python 2.6 and above are done by rewriting the AST and saving the pyc file before the test module is imported. see doc/assert.txt for more info. - fix issue43: improve doctests with better traceback reporting on unexpected exceptions - fix issue47: timing output in junitxml for test cases is now correct - fix issue48: typo in MarkInfo repr leading to exception - fix issue49: avoid confusing error when initialization partially fails - fix issue44: env/username expansion for junitxml file path - show releaselevel information in test runs for pypy - reworked doc pages for better navigation and PDF generation - report KeyboardInterrupt even if interrupted during session startup - fix issue 35 - provide PDF doc version and download link from index page pytest-2.5.1/doc/en/announce/release-2.3.5.txt0000664000175000017500000000635312254002202020234 0ustar hpkhpk00000000000000pytest-2.3.5: bug fixes and little improvements =========================================================================== pytest-2.3.5 is a maintenance release with many bug fixes and little improvements. See the changelog below for details. No backward compatibility issues are foreseen and all plugins which worked with the prior version are expected to work unmodified. Speaking of which, a few interesting new plugins saw the light last month: - pytest-instafail: show failure information while tests are running - pytest-qt: testing of GUI applications written with QT/Pyside - pytest-xprocess: managing external processes across test runs - pytest-random: randomize test ordering And several others like pytest-django saw maintenance releases. For a more complete list, check out https://pypi.python.org/pypi?%3Aaction=search&term=pytest&submit=search. For general information see: http://pytest.org/ To install or upgrade pytest: pip install -U pytest # or easy_install -U pytest Particular thanks to Floris, Ronny, Benjamin and the many bug reporters and fix providers. may the fixtures be with you, holger krekel Changes between 2.3.4 and 2.3.5 ----------------------------------- - never consider a fixture function for test function collection - allow re-running of test items / helps to fix pytest-reruntests plugin and also help to keep less fixture/resource references alive - put captured stdout/stderr into junitxml output even for passing tests (thanks Adam Goucher) - Issue 265 - integrate nose setup/teardown with setupstate so it doesnt try to teardown if it did not setup - issue 271 - dont write junitxml on slave nodes - Issue 274 - dont try to show full doctest example when doctest does not know the example location - issue 280 - disable assertion rewriting on buggy CPython 2.6.0 - inject "getfixture()" helper to retrieve fixtures from doctests, thanks Andreas Zeidler - issue 259 - when assertion rewriting, be consistent with the default source encoding of ASCII on Python 2 - issue 251 - report a skip instead of ignoring classes with init - issue250 unicode/str mixes in parametrization names and values now works - issue257, assertion-triggered compilation of source ending in a comment line doesn't blow up in python2.5 (fixed through py>=1.4.13.dev6) - fix --genscript option to generate standalone scripts that also work with python3.3 (importer ordering) - issue171 - in assertion rewriting, show the repr of some global variables - fix option help for "-k" - move long description of distribution into README.rst - improve docstring for metafunc.parametrize() - fix bug where using capsys with pytest.set_trace() in a test function would break when looking at capsys.readouterr() - allow to specify prefixes starting with "_" when customizing python_functions test discovery. (thanks Graham Horler) - improve PYTEST_DEBUG tracing output by puting extra data on a new lines with additional indent - ensure OutcomeExceptions like skip/fail have initialized exception attributes - issue 260 - don't use nose special setup on plain unittest cases - fix issue134 - print the collect errors that prevent running specified test items - fix issue266 - accept unicode in MarkEvaluator expressions pytest-2.5.1/doc/en/announce/release-2.0.3.txt0000664000175000017500000000243412254002202020223 0ustar hpkhpk00000000000000py.test 2.0.3: bug fixes and speed ups =========================================================================== Welcome to pytest-2.0.3, a maintenance and bug fix release of pytest, a mature testing tool for Python, supporting CPython 2.4-3.2, Jython and latest PyPy interpreters. See the extensive docs with tested examples here: http://pytest.org/ If you want to install or upgrade pytest, just type one of:: pip install -U pytest # or easy_install -U pytest There also is a bugfix release 1.6 of pytest-xdist, the plugin that enables seemless distributed and "looponfail" testing for Python. best, holger krekel Changes between 2.0.2 and 2.0.3 ---------------------------------------------- - fix issue38: nicer tracebacks on calls to hooks, particularly early configure/sessionstart ones - fix missing skip reason/meta information in junitxml files, reported via http://lists.idyll.org/pipermail/testing-in-python/2011-March/003928.html - fix issue34: avoid collection failure with "test" prefixed classes deriving from object. - don't require zlib (and other libs) for genscript plugin without --genscript actually being used. - speed up skips (by not doing a full traceback represenation internally) - fix issue37: avoid invalid characters in junitxml's output pytest-2.5.1/doc/en/announce/release-2.3.1.txt0000664000175000017500000000214012254002202020216 0ustar hpkhpk00000000000000pytest-2.3.1: fix regression with factory functions =========================================================================== pytest-2.3.1 is a quick follow-up release: - fix issue202 - regression with fixture functions/funcarg factories: using "self" is now safe again and works as in 2.2.4. Thanks to Eduard Schettino for the quick bug report. - disable pexpect pytest self tests on Freebsd - thanks Koob for the quick reporting - fix/improve interactive docs with --markers See http://pytest.org/ for general information. To install or upgrade pytest: pip install -U pytest # or easy_install -U pytest best, holger krekel Changes between 2.3.0 and 2.3.1 ----------------------------------- - fix issue202 - fix regression: using "self" from fixture functions now works as expected (it's the same "self" instance that a test method which uses the fixture sees) - skip pexpect using tests (test_pdb.py mostly) on freebsd* systems due to pexpect not supporting it properly (hanging) - link to web pages from --markers output which provides help for pytest.mark.* usage. pytest-2.5.1/doc/en/announce/release-2.4.2.txt0000664000175000017500000000262712254002202020232 0ustar hpkhpk00000000000000pytest-2.4.2: colorama on windows, plugin/tmpdir fixes =========================================================================== pytest-2.4.2 is another bug-fixing release: - on Windows require colorama and a newer py lib so that py.io.TerminalWriter() now uses colorama instead of its own ctypes hacks. (fixes issue365) thanks Paul Moore for bringing it up. - fix "-k" matching of tests where "repr" and "attr" and other names would cause wrong matches because of an internal implementation quirk (don't ask) which is now properly implemented. fixes issue345. - avoid tmpdir fixture to create too long filenames especially when parametrization is used (issue354) - fix pytest-pep8 and pytest-flakes / pytest interactions (collection names in mark plugin was assuming an item always has a function which is not true for those plugins etc.) Thanks Andi Zeidler. - introduce node.get_marker/node.add_marker API for plugins like pytest-pep8 and pytest-flakes to avoid the messy details of the node.keywords pseudo-dicts. Adapated docs. - remove attempt to "dup" stdout at startup as it's icky. the normal capturing should catch enough possibilities of tests messing up standard FDs. - add pluginmanager.do_configure(config) as a link to config.do_configure() for plugin-compatibility as usual, docs at http://pytest.org and upgrades via:: pip install -U pytest have fun, holger krekel pytest-2.5.1/doc/en/announce/release-2.3.3.txt0000664000175000017500000000405412254002202020226 0ustar hpkhpk00000000000000pytest-2.3.3: integration fixes, py24 suport, ``*/**`` shown in traceback =========================================================================== pytest-2.3.3 is a another stabilization release of the py.test tool which offers uebersimple assertions, scalable fixture mechanisms and deep customization for testing with Python. Particularly, this release provides: - integration fixes and improvements related to flask, numpy, nose, unittest, mock - makes pytest work on py24 again (yes, people sometimes still need to use it) - show ``*,**`` args in pytest tracebacks Thanks to Manuel Jacob, Thomas Waldmann, Ronny Pfannschmidt, Pavel Repin and Andreas Taumoefolau for providing patches and all for the issues. See http://pytest.org/ for general information. To install or upgrade pytest: pip install -U pytest # or easy_install -U pytest best, holger krekel Changes between 2.3.2 and 2.3.3 ----------------------------------- - fix issue214 - parse modules that contain special objects like e. g. flask's request object which blows up on getattr access if no request is active. thanks Thomas Waldmann. - fix issue213 - allow to parametrize with values like numpy arrays that do not support an __eq__ operator - fix issue215 - split test_python.org into multiple files - fix issue148 - @unittest.skip on classes is now recognized and avoids calling setUpClass/tearDownClass, thanks Pavel Repin - fix issue209 - reintroduce python2.4 support by depending on newer pylib which re-introduced statement-finding for pre-AST interpreters - nose support: only call setup if its a callable, thanks Andrew Taumoefolau - fix issue219 - add py2.4-3.3 classifiers to TROVE list - in tracebacks *,** arg values are now shown next to normal arguments (thanks Manuel Jacob) - fix issue217 - support mock.patch with pytest's fixtures - note that you need either mock-1.0.1 or the python3.3 builtin unittest.mock. - fix issue127 - improve documentation for pytest_addoption() and add a ``config.getoption(name)`` helper function for consistency. pytest-2.5.1/doc/en/announce/release-2.5.1.txt0000664000175000017500000000327212254002202020227 0ustar hpkhpk00000000000000pytest-2.5.1: fixes and new home page styling =========================================================================== pytest is a mature Python testing tool with more than a 1000 tests against itself, passing on many different interpreters and platforms. The 2.5.1 release maintains the "zero-reported-bugs" promise by fixing the three bugs reported since the last release a few days ago. It also features a new home page styling implemented by Tobias Bieniek, based on the flask theme from Armin Ronacher: http://pytest.org If you have anything more to improve styling and docs, we'd be very happy to merge further pull requests. On the coding side, the release also contains a little enhancement to fixture decorators allowing to directly influence generation of test ids, thanks to Floris Bruynooghe. Other thanks for helping with this release go to Anatoly Bubenkoff and Ronny Pfannschmidt. As usual, you can upgrade from pypi via:: pip install -U pytest have fun and a nice remaining "bug-free" time of the year :) holger krekel 2.5.1 ----------------------------------- - merge new documentation styling PR from Tobias Bieniek. - fix issue403: allow parametrize of multiple same-name functions within a collection node. Thanks Andreas Kloeckner and Alex Gaynor for reporting and analysis. - Allow parameterized fixtures to specify the ID of the parameters by adding an ids argument to pytest.fixture() and pytest.yield_fixture(). Thanks Floris Bruynooghe. - fix issue404 by always using the binary xml escape in the junitxml plugin. Thanks Ronny Pfannschmidt. - fix issue407: fix addoption docstring to point to argparse instead of optparse. Thanks Daniel D. Wright. pytest-2.5.1/doc/en/announce/release-2.2.2.txt0000664000175000017500000000312712254002202020224 0ustar hpkhpk00000000000000pytest-2.2.2: bug fixes =========================================================================== pytest-2.2.2 (updated to 2.2.3 to fix packaging issues) is a minor backward-compatible release of the versatile py.test testing tool. It contains bug fixes and a few refinements particularly to reporting with "--collectonly", see below for betails. For general information see here: http://pytest.org/ To install or upgrade pytest: pip install -U pytest # or easy_install -U pytest Special thanks for helping on this release to Ronny Pfannschmidt and Ralf Schmitt and the contributors of issues. best, holger krekel Changes between 2.2.1 and 2.2.2 ---------------------------------------- - fix issue101: wrong args to unittest.TestCase test function now produce better output - fix issue102: report more useful errors and hints for when a test directory was renamed and some pyc/__pycache__ remain - fix issue106: allow parametrize to be applied multiple times e.g. from module, class and at function level. - fix issue107: actually perform session scope finalization - don't check in parametrize if indirect parameters are funcarg names - add chdir method to monkeypatch funcarg - fix crash resulting from calling monkeypatch undo a second time - fix issue115: make --collectonly robust against early failure (missing files/directories) - "-qq --collectonly" now shows only files and the number of tests in them - "-q --collectonly" now shows test ids - allow adding of attributes to test reports such that it also works with distributed testing (no upgrade of pytest-xdist needed) pytest-2.5.1/doc/en/announce/release-2.0.2.txt0000664000175000017500000000535512254002202020227 0ustar hpkhpk00000000000000py.test 2.0.2: bug fixes, improved xfail/skip expressions, speed ups =========================================================================== Welcome to pytest-2.0.2, a maintenance and bug fix release of pytest, a mature testing tool for Python, supporting CPython 2.4-3.2, Jython and latest PyPy interpreters. See the extensive docs with tested examples here: http://pytest.org/ If you want to install or upgrade pytest, just type one of:: pip install -U pytest # or easy_install -U pytest Many thanks to all issue reporters and people asking questions or complaining, particularly Jurko for his insistence, Laura, Victor and Brianna for helping with improving and Ronny for his general advise. best, holger krekel Changes between 2.0.1 and 2.0.2 ---------------------------------------------- - tackle issue32 - speed up test runs of very quick test functions by reducing the relative overhead - fix issue30 - extended xfail/skipif handling and improved reporting. If you have a syntax error in your skip/xfail expressions you now get nice error reports. Also you can now access module globals from xfail/skipif expressions so that this for example works now:: import pytest import mymodule @pytest.mark.skipif("mymodule.__version__[0] == "1") def test_function(): pass This will not run the test function if the module's version string does not start with a "1". Note that specifying a string instead of a boolean expressions allows py.test to report meaningful information when summarizing a test run as to what conditions lead to skipping (or xfail-ing) tests. - fix issue28 - setup_method and pytest_generate_tests work together The setup_method fixture method now gets called also for test function invocations generated from the pytest_generate_tests hook. - fix issue27 - collectonly and keyword-selection (-k) now work together Also, if you do "py.test --collectonly -q" you now get a flat list of test ids that you can use to paste to the py.test commandline in order to execute a particular test. - fix issue25 avoid reported problems with --pdb and python3.2/encodings output - fix issue23 - tmpdir argument now works on Python3.2 and WindowsXP Starting with Python3.2 os.symlink may be supported. By requiring a newer py lib version the py.path.local() implementation acknowledges this. - fixed typos in the docs (thanks Victor Garcia, Brianna Laugher) and particular thanks to Laura Creighton who also revieved parts of the documentation. - fix slighly wrong output of verbose progress reporting for classes (thanks Amaury) - more precise (avoiding of) deprecation warnings for node.Class|Function accesses - avoid std unittest assertion helper code in tracebacks (thanks Ronny) pytest-2.5.1/doc/en/announce/release-2.4.1.txt0000664000175000017500000000156212254002202020226 0ustar hpkhpk00000000000000pytest-2.4.1: fixing three regressions compared to 2.3.5 =========================================================================== pytest-2.4.1 is a quick follow up release to fix three regressions compared to 2.3.5 before they hit more people: - When using parser.addoption() unicode arguments to the "type" keyword should also be converted to the respective types. thanks Floris Bruynooghe, @dnozay. (fixes issue360 and issue362) - fix dotted filename completion when using argcomplete thanks Anthon van der Neuth. (fixes issue361) - fix regression when a 1-tuple ("arg",) is used for specifying parametrization (the values of the parametrization were passed nested in a tuple). Thanks Donald Stufft. - also merge doc typo fixes, thanks Andy Dirnberger as usual, docs at http://pytest.org and upgrades via:: pip install -U pytest have fun, holger krekel pytest-2.5.1/doc/en/announce/release-2.0.1.txt0000664000175000017500000000602112254002202020215 0ustar hpkhpk00000000000000py.test 2.0.1: bug fixes =========================================================================== Welcome to pytest-2.0.1, a maintenance and bug fix release of pytest, a mature testing tool for Python, supporting CPython 2.4-3.2, Jython and latest PyPy interpreters. See extensive docs with tested examples here: http://pytest.org/ If you want to install or upgrade pytest, just type one of:: pip install -U pytest # or easy_install -U pytest Many thanks to all issue reporters and people asking questions or complaining. Particular thanks to Floris Bruynooghe and Ronny Pfannschmidt for their great coding contributions and many others for feedback and help. best, holger krekel Changes between 2.0.0 and 2.0.1 ---------------------------------------------- - refine and unify initial capturing so that it works nicely even if the logging module is used on an early-loaded conftest.py file or plugin. - fix issue12 - show plugin versions with "--version" and "--traceconfig" and also document how to add extra information to reporting test header - fix issue17 (import-* reporting issue on python3) by requiring py>1.4.0 (1.4.1 is going to include it) - fix issue10 (numpy arrays truth checking) by refining assertion interpretation in py lib - fix issue15: make nose compatibility tests compatible with python3 (now that nose-1.0 supports python3) - remove somewhat surprising "same-conftest" detection because it ignores conftest.py when they appear in several subdirs. - improve assertions ("not in"), thanks Floris Bruynooghe - improve behaviour/warnings when running on top of "python -OO" (assertions and docstrings are turned off, leading to potential false positives) - introduce a pytest_cmdline_processargs(args) hook to allow dynamic computation of command line arguments. This fixes a regression because py.test prior to 2.0 allowed to set command line options from conftest.py files which so far pytest-2.0 only allowed from ini-files now. - fix issue7: assert failures in doctest modules. unexpected failures in doctests will not generally show nicer, i.e. within the doctest failing context. - fix issue9: setup/teardown functions for an xfail-marked test will report as xfail if they fail but report as normally passing (not xpassing) if they succeed. This only is true for "direct" setup/teardown invocations because teardown_class/ teardown_module cannot closely relate to a single test. - fix issue14: no logging errors at process exit - refinements to "collecting" output on non-ttys - refine internal plugin registration and --traceconfig output - introduce a mechanism to prevent/unregister plugins from the command line, see http://pytest.org/latest/plugins.html#cmdunregister - activate resultlog plugin by default - fix regression wrt yielded tests which due to the collection-before-running semantics were not setup as with pytest 1.3.4. Note, however, that the recommended and much cleaner way to do test parametrization remains the "pytest_generate_tests" mechanism, see the docs. pytest-2.5.1/doc/en/tmpdir.txt0000664000175000017500000000455312254002202015642 0ustar hpkhpk00000000000000 .. _`tmpdir handling`: .. _tmpdir: Temporary directories and files ================================================ The 'tmpdir' test function argument ----------------------------------- You can use the ``tmpdir`` function argument which will provide a temporary directory unique to the test invocation, created in the `base temporary directory`_. ``tmpdir`` is a `py.path.local`_ object which offers ``os.path`` methods and more. Here is an example test usage:: # content of test_tmpdir.py import os def test_create_file(tmpdir): p = tmpdir.mkdir("sub").join("hello.txt") p.write("content") assert p.read() == "content" assert len(tmpdir.listdir()) == 1 assert 0 Running this would result in a passed test except for the last ``assert 0`` line which we use to look at values:: $ py.test test_tmpdir.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 1 items test_tmpdir.py F ================================= FAILURES ================================= _____________________________ test_create_file _____________________________ tmpdir = local('/tmp/pytest-39/test_create_file0') def test_create_file(tmpdir): p = tmpdir.mkdir("sub").join("hello.txt") p.write("content") assert p.read() == "content" assert len(tmpdir.listdir()) == 1 > assert 0 E assert 0 test_tmpdir.py:7: AssertionError ========================= 1 failed in 0.01 seconds ========================= .. _`base temporary directory`: The default base temporary directory ----------------------------------------------- Temporary directories are by default created as sub-directories of the system temporary directory. The base name will be ``pytest-NUM`` where ``NUM`` will be incremented with each test run. Moreover, entries older than 3 temporary directories will be removed. You can override the default temporary directory setting like this:: py.test --basetemp=mydir When distributing tests on the local machine, ``py.test`` takes care to configure a basetemp directory for the sub processes such that all temporary data lands below a single per-test run basetemp directory. .. _`py.path.local`: http://py.rtfd.org/en/latest/path.html pytest-2.5.1/doc/en/customize.txt0000664000175000017500000000765012254002202016366 0ustar hpkhpk00000000000000Basic test configuration =================================== Command line options and configuration file settings ----------------------------------------------------------------- You can get help on command line options and values in INI-style configurations files by using the general help option:: py.test -h # prints options _and_ config file settings This will display command line and configuration file settings which were registered by installed plugins. .. _inifiles: How test configuration is read from configuration INI-files ------------------------------------------------------------- py.test searches for the first matching ini-style configuration file in the directories of command line argument and the directories above. It looks for file basenames in this order:: pytest.ini tox.ini setup.cfg Searching stops when the first ``[pytest]`` section is found in any of these files. There is no merging of configuration values from multiple files. Example:: py.test path/to/testdir will look in the following dirs for a config file:: path/to/testdir/pytest.ini path/to/testdir/tox.ini path/to/testdir/setup.cfg path/to/pytest.ini path/to/tox.ini path/to/setup.cfg ... # up until root of filesystem If argument is provided to a py.test run, the current working directory is used to start the search. .. _`how to change command line options defaults`: .. _`adding default options`: How to change command line options defaults ------------------------------------------------ It can be tedious to type the same series of command line options every time you use py.test . For example, if you always want to see detailed info on skipped and xfailed tests, as well as have terser "dot" progress output, you can write it into a configuration file:: # content of pytest.ini # (or tox.ini or setup.cfg) [pytest] addopts = -rsxX -q From now on, running ``py.test`` will add the specified options. Builtin configuration file options ---------------------------------------------- .. confval:: minversion Specifies a minimal pytest version required for running tests. minversion = 2.1 # will fail if we run with pytest-2.0 .. confval:: addopts Add the specified ``OPTS`` to the set of command line arguments as if they had been specified by the user. Example: if you have this ini file content:: [pytest] addopts = --maxfail=2 -rf # exit after 2 failures, report fail info issuing ``py.test test_hello.py`` actually means:: py.test --maxfail=2 -rf test_hello.py Default is to add no options. .. confval:: norecursedirs Set the directory basename patterns to avoid when recursing for test discovery. The individual (fnmatch-style) patterns are applied to the basename of a directory to decide if to recurse into it. Pattern matching characters:: * matches everything ? matches any single character [seq] matches any character in seq [!seq] matches any char not in seq Default patterns are ``.* _darcs CVS {args}``. Setting a ``norecursedir`` replaces the default. Here is an example of how to avoid certain directories:: # content of setup.cfg [pytest] norecursedirs = .svn _build tmp* This would tell py.test to not look into typical subversion or sphinx-build directories or into any ``tmp`` prefixed directory. .. confval:: python_files One or more Glob-style file patterns determining which python files are considered as test modules. .. confval:: python_classes One or more name prefixes determining which test classes are considered as test modules. .. confval:: python_functions One or more name prefixes determining which test functions and methods are considered as test modules. Note that this has no effect on methods that live on a ``unittest.TestCase`` derived class. See :ref:`change naming conventions` for examples. pytest-2.5.1/doc/en/assert.txt0000664000175000017500000002172212254002202015641 0ustar hpkhpk00000000000000 The writing and reporting of assertions in tests ================================================== .. _`assertfeedback`: .. _`assert with the assert statement`: .. _`assert`: Asserting with the ``assert`` statement --------------------------------------------------------- ``py.test`` allows you to use the standard python ``assert`` for verifying expectations and values in Python tests. For example, you can write the following:: # content of test_assert1.py def f(): return 3 def test_function(): assert f() == 4 to assert that your function returns a certain value. If this assertion fails you will see the return value of the function call:: $ py.test test_assert1.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 1 items test_assert1.py F ================================= FAILURES ================================= ______________________________ test_function _______________________________ def test_function(): > assert f() == 4 E assert 3 == 4 E + where 3 = f() test_assert1.py:5: AssertionError ========================= 1 failed in 0.01 seconds ========================= py.test has support for showing the values of the most common subexpressions including calls, attributes, comparisons, and binary and unary operators. (See :ref:`tbreportdemo`). This allows you to use the idiomatic python constructs without boilerplate code while not losing introspection information. However, if you specify a message with the assertion like this:: assert a % 2 == 0, "value was odd, should be even" then no assertion introspection takes places at all and the message will be simply shown in the traceback. See :ref:`assert-details` for more information on assertion introspection. .. _`assertraises`: Assertions about expected exceptions ------------------------------------------ In order to write assertions about raised exceptions, you can use ``pytest.raises`` as a context manager like this:: import pytest with pytest.raises(ZeroDivisionError): 1 / 0 and if you need to have access to the actual exception info you may use:: with pytest.raises(RuntimeError) as excinfo: def f(): f() f() # do checks related to excinfo.type, excinfo.value, excinfo.traceback ``excinfo`` is a `py.code.ExceptionInfo`_ instance, which is a wrapper around the actual exception raised. .. _py.code.ExceptionInfo: http://pylib.readthedocs.org/en/latest/code.html#py-code-exceptioninfo If you want to write test code that works on Python 2.4 as well, you may also use two other ways to test for an expected exception:: pytest.raises(ExpectedException, func, *args, **kwargs) pytest.raises(ExpectedException, "func(*args, **kwargs)") both of which execute the specified function with args and kwargs and asserts that the given ``ExpectedException`` is raised. The reporter will provide you with helpful output in case of failures such as *no exception* or *wrong exception*. .. _newreport: Making use of context-sensitive comparisons ------------------------------------------------- .. versionadded:: 2.0 py.test has rich support for providing context-sensitive information when it encounters comparisons. For example:: # content of test_assert2.py def test_set_comparison(): set1 = set("1308") set2 = set("8035") assert set1 == set2 if you run this module:: $ py.test test_assert2.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 1 items test_assert2.py F ================================= FAILURES ================================= ___________________________ test_set_comparison ____________________________ def test_set_comparison(): set1 = set("1308") set2 = set("8035") > assert set1 == set2 E assert set(['0', '1', '3', '8']) == set(['0', '3', '5', '8']) E Extra items in the left set: E '1' E Extra items in the right set: E '5' test_assert2.py:5: AssertionError ========================= 1 failed in 0.01 seconds ========================= Special comparisons are done for a number of cases: * comparing long strings: a context diff is shown * comparing long sequences: first failing indices * comparing dicts: different entries See the :ref:`reporting demo ` for many more examples. Defining your own assertion comparison ---------------------------------------------- It is possible to add your own detailed explanations by implementing the ``pytest_assertrepr_compare`` hook. .. autofunction:: _pytest.hookspec.pytest_assertrepr_compare As an example consider adding the following hook in a conftest.py which provides an alternative explanation for ``Foo`` objects:: # content of conftest.py from test_foocompare import Foo def pytest_assertrepr_compare(op, left, right): if isinstance(left, Foo) and isinstance(right, Foo) and op == "==": return ['Comparing Foo instances:', ' vals: %s != %s' % (left.val, right.val)] now, given this test module:: # content of test_foocompare.py class Foo: def __init__(self, val): self.val = val def test_compare(): f1 = Foo(1) f2 = Foo(2) assert f1 == f2 you can run the test module and get the custom output defined in the conftest file:: $ py.test -q test_foocompare.py F ================================= FAILURES ================================= _______________________________ test_compare _______________________________ def test_compare(): f1 = Foo(1) f2 = Foo(2) > assert f1 == f2 E assert Comparing Foo instances: E vals: 1 != 2 test_foocompare.py:8: AssertionError 1 failed in 0.01 seconds .. _assert-details: .. _`assert introspection`: Advanced assertion introspection ---------------------------------- .. versionadded:: 2.1 Reporting details about a failing assertion is achieved either by rewriting assert statements before they are run or re-evaluating the assert expression and recording the intermediate values. Which technique is used depends on the location of the assert, py.test's configuration, and Python version being used to run py.test. Note that for assert statements with a manually provided message, i.e. ``assert expr, message``, no assertion introspection takes place and the manually provided message will be rendered in tracebacks. By default, if the Python version is greater than or equal to 2.6, py.test rewrites assert statements in test modules. Rewritten assert statements put introspection information into the assertion failure message. py.test only rewrites test modules directly discovered by its test collection process, so asserts in supporting modules which are not themselves test modules will not be rewritten. .. note:: py.test rewrites test modules on import. It does this by using an import hook to write a new pyc files. Most of the time this works transparently. However, if you are messing with import yourself, the import hook may interfere. If this is the case, simply use ``--assert=reinterp`` or ``--assert=plain``. Additionally, rewriting will fail silently if it cannot write new pycs, i.e. in a read-only filesystem or a zipfile. If an assert statement has not been rewritten or the Python version is less than 2.6, py.test falls back on assert reinterpretation. In assert reinterpretation, py.test walks the frame of the function containing the assert statement to discover sub-expression results of the failing assert statement. You can force py.test to always use assertion reinterpretation by passing the ``--assert=reinterp`` option. Assert reinterpretation has a caveat not present with assert rewriting: If evaluating the assert expression has side effects you may get a warning that the intermediate values could not be determined safely. A common example of this issue is an assertion which reads from a file:: assert f.read() != '...' If this assertion fails then the re-evaluation will probably succeed! This is because ``f.read()`` will return an empty string when it is called the second time during the re-evaluation. However, it is easy to rewrite the assertion and avoid any trouble:: content = f.read() assert content != '...' All assert introspection can be turned off by passing ``--assert=plain``. For further information, Benjamin Peterson wrote up `Behind the scenes of py.test's new assertion rewriting `_. .. versionadded:: 2.1 Add assert rewriting as an alternate introspection technique. .. versionchanged:: 2.1 Introduce the ``--assert`` option. Deprecate ``--no-assert`` and ``--nomagic``. pytest-2.5.1/doc/en/funcarg_compare.txt0000664000175000017500000002105212254002202017467 0ustar hpkhpk00000000000000 .. _`funcargcompare`: pytest-2.3: reasoning for fixture/funcarg evolution ============================================================= **Target audience**: Reading this document requires basic knowledge of python testing, xUnit setup methods and the (previous) basic pytest funcarg mechanism, see http://pytest.org/2.2.4/funcargs.html If you are new to pytest, then you can simply ignore this section and read the other sections. .. currentmodule:: _pytest Shortcomings of the previous ``pytest_funcarg__`` mechanism -------------------------------------------------------------- The pre pytest-2.3 funcarg mechanism calls a factory each time a funcarg for a test function is required. If a factory wants to re-use a resource across different scopes, it often used the ``request.cached_setup()`` helper to manage caching of resources. Here is a basic example how we could implement a per-session Database object:: # content of conftest.py class Database: def __init__(self): print ("database instance created") def destroy(self): print ("database instance destroyed") def pytest_funcarg__db(request): return request.cached_setup(setup=DataBase, teardown=lambda db: db.destroy, scope="session") There are several limitations and difficulties with this approach: 1. Scoping funcarg resource creation is not straight forward, instead one must understand the intricate cached_setup() method mechanics. 2. parametrizing the "db" resource is not straight forward: you need to apply a "parametrize" decorator or implement a :py:func:`~hookspec.pytest_generate_tests` hook calling :py:func:`~python.Metafunc.parametrize` which performs parametrization at the places where the resource is used. Moreover, you need to modify the factory to use an ``extrakey`` parameter containing ``request.param`` to the :py:func:`~python.Request.cached_setup` call. 3. Multiple parametrized session-scoped resources will be active at the same time, making it hard for them to affect global state of the application under test. 4. there is no way how you can make use of funcarg factories in xUnit setup methods. 5. A non-parametrized fixture function cannot use a parametrized funcarg resource if it isn't stated in the test function signature. All of these limitations are addressed with pytest-2.3 and its improved :ref:`fixture mechanism `. Direct scoping of fixture/funcarg factories -------------------------------------------------------- Instead of calling cached_setup() with a cache scope, you can use the :ref:`@pytest.fixture ` decorator and directly state the scope:: @pytest.fixture(scope="session") def db(request): # factory will only be invoked once per session - db = DataBase() request.addfinalizer(db.destroy) # destroy when session is finished return db This factory implementation does not need to call ``cached_setup()`` anymore because it will only be invoked once per session. Moreover, the ``request.addfinalizer()`` registers a finalizer according to the specified resource scope on which the factory function is operating. Direct parametrization of funcarg resource factories ---------------------------------------------------------- Previously, funcarg factories could not directly cause parametrization. You needed to specify a ``@parametrize`` decorator on your test function or implement a ``pytest_generate_tests`` hook to perform parametrization, i.e. calling a test multiple times with different value sets. pytest-2.3 introduces a decorator for use on the factory itself:: @pytest.fixture(params=["mysql", "pg"]) def db(request): ... # use request.param Here the factory will be invoked twice (with the respective "mysql" and "pg" values set as ``request.param`` attributes) and and all of the tests requiring "db" will run twice as well. The "mysql" and "pg" values will also be used for reporting the test-invocation variants. This new way of parametrizing funcarg factories should in many cases allow to re-use already written factories because effectively ``request.param`` was already used when test functions/classes were parametrized via :py:func:`~_pytest.python.Metafunc.parametrize(indirect=True)` calls. Of course it's perfectly fine to combine parametrization and scoping:: @pytest.fixture(scope="session", params=["mysql", "pg"]) def db(request): if request.param == "mysql": db = MySQL() elif request.param == "pg": db = PG() request.addfinalizer(db.destroy) # destroy when session is finished return db This would execute all tests requiring the per-session "db" resource twice, receiving the values created by the two respective invocations to the factory function. No ``pytest_funcarg__`` prefix when using @fixture decorator ------------------------------------------------------------------- When using the ``@fixture`` decorator the name of the function denotes the name under which the resource can be accessed as a function argument:: @pytest.fixture() def db(request): ... The name under which the funcarg resource can be requested is ``db``. You can still use the "old" non-decorator way of specifying funcarg factories aka:: def pytest_funcarg__db(request): ... But it is then not possible to define scoping and parametrization. It is thus recommended to use the factory decorator. solving per-session setup / autouse fixtures -------------------------------------------------------------- pytest for a long time offered a pytest_configure and a pytest_sessionstart hook which are often used to setup global resources. This suffers from several problems: 1. in distributed testing the master process would setup test resources that are never needed because it only co-ordinates the test run activities of the slave processes. 2. if you only perform a collection (with "--collect-only") resource-setup will still be executed. 3. If a pytest_sessionstart is contained in some subdirectories conftest.py file, it will not be called. This stems from the fact that this hook is actually used for reporting, in particular the test-header with platform/custom information. Moreover, it was not easy to define a scoped setup from plugins or conftest files other than to implement a ``pytest_runtest_setup()`` hook and caring for scoping/caching yourself. And it's virtually impossible to do this with parametrization as ``pytest_runtest_setup()`` is called during test execution and parametrization happens at collection time. It follows that pytest_configure/session/runtest_setup are often not appropriate for implementing common fixture needs. Therefore, pytest-2.3 introduces :ref:`autouse fixtures` which fully integrate with the generic :ref:`fixture mechanism ` and obsolete many prior uses of pytest hooks. funcargs/fixture discovery now happens at collection time --------------------------------------------------------------------- pytest-2.3 takes care to discover fixture/funcarg factories at collection time. This is more efficient especially for large test suites. Moreover, a call to "py.test --collect-only" should be able to in the future show a lot of setup-information and thus presents a nice method to get an overview of fixture management in your project. .. _`compatibility notes`: .. _`funcargscompat`: Conclusion and compatibility notes --------------------------------------------------------- **funcargs** were originally introduced to pytest-2.0. In pytest-2.3 the mechanism was extended and refined and is now described as fixtures: * previously funcarg factories were specified with a special ``pytest_funcarg__NAME`` prefix instead of using the ``@pytest.fixture`` decorator. * Factories received a ``request`` object which managed caching through ``request.cached_setup()`` calls and allowed using other funcargs via ``request.getfuncargvalue()`` calls. These intricate APIs made it hard to do proper parametrization and implement resource caching. The new :py:func:`pytest.fixture`` decorator allows to declare the scope and let pytest figure things out for you. * if you used parametrization and funcarg factories which made use of ``request.cached_setup()`` it is recommeneded to invest a few minutes and simplify your fixture function code to use the :ref:`@pytest.fixture` decorator instead. This will also allow to take advantage of the automatic per-resource grouping of tests. pytest-2.5.1/doc/en/skipping.txt0000664000175000017500000002264712254002202016173 0ustar hpkhpk00000000000000.. _`skip and xfail`: .. _skipping: Skip and xfail: dealing with tests that can not succeed ===================================================================== If you have test functions that cannot be run on certain platforms or that you expect to fail you can mark them accordingly or you may call helper functions during execution of setup or test functions. A *skip* means that you expect your test to pass unless the environment (e.g. wrong Python interpreter, missing dependency) prevents it to run. And *xfail* means that your test can run but you expect it to fail because there is an implementation problem. py.test counts and lists *skip* and *xfail* tests separately. Detailed information about skipped/xfailed tests is not shown by default to avoid cluttering the output. You can use the ``-r`` option to see details corresponding to the "short" letters shown in the test progress:: py.test -rxs # show extra info on skips and xfails (See :ref:`how to change command line options defaults`) .. _skipif: .. _`condition booleans`: Marking a test function to be skipped ------------------------------------------- .. versionadded:: 2.0, 2.4 Here is an example of marking a test function to be skipped when run on a Python3.3 interpreter:: import sys @pytest.mark.skipif(sys.version_info >= (3,3), reason="requires python3.3") def test_function(): ... During test function setup the condition ("sys.version_info >= (3,3)") is checked. If it evaluates to True, the test function will be skipped with the specified reason. Note that pytest enforces specifying a reason in order to report meaningful "skip reasons" (e.g. when using ``-rs``). If the condition is a string, it will be evaluated as python expression. You can share skipif markers between modules. Consider this test module:: # content of test_mymodule.py import mymodule minversion = pytest.mark.skipif(mymodule.__versioninfo__ >= (1,1), reason="at least mymodule-1.1 required") @minversion def test_function(): ... You can import it from another test module:: # test_myothermodule.py from test_mymodule import minversion @minversion def test_anotherfunction(): ... For larger test suites it's usually a good idea to have one file where you define the markers which you then consistently apply throughout your test suite. Alternatively, the pre pytest-2.4 way to specify :ref:`condition strings ` instead of booleans will remain fully supported in future versions of pytest. It couldn't be easily used for importing markers between test modules so it's no longer advertised as the primary method. Skip all test functions of a class or module --------------------------------------------- As with all function :ref:`marking ` you can skip test functions at the `whole class- or module level`_. If your code targets python2.6 or above you use the skipif decorator (and any other marker) on classes:: @pytest.mark.skipif(sys.platform == 'win32', reason="requires windows") class TestPosixCalls: def test_function(self): "will not be setup or run under 'win32' platform" If the condition is true, this marker will produce a skip result for each of the test methods. If your code targets python2.5 where class-decorators are not available, you can set the ``pytestmark`` attribute of a class:: class TestPosixCalls: pytestmark = pytest.mark.skipif(sys.platform == 'win32', reason="requires Windows") def test_function(self): "will not be setup or run under 'win32' platform" As with the class-decorator, the ``pytestmark`` special name tells py.test to apply it to each test function in the class. If you want to skip all test functions of a module, you must use the ``pytestmark`` name on the global level:: # test_module.py pytestmark = pytest.mark.skipif(...) If multiple "skipif" decorators are applied to a test function, it will be skipped if any of the skip conditions is true. .. _`whole class- or module level`: mark.html#scoped-marking .. _xfail: Mark a test function as expected to fail ------------------------------------------------------- You can use the ``xfail`` marker to indicate that you expect the test to fail:: @pytest.mark.xfail def test_function(): ... This test will be run but no traceback will be reported when it fails. Instead terminal reporting will list it in the "expected to fail" or "unexpectedly passing" sections. By specifying on the commandline:: pytest --runxfail you can force the running and reporting of an ``xfail`` marked test as if it weren't marked at all. As with skipif_ you can also mark your expectation of a failure on a particular platform:: @pytest.mark.xfail(sys.version_info >= (3,3), reason="python3.3 api changes") def test_function(): ... You can furthermore prevent the running of an "xfail" test or specify a reason such as a bug ID or similar. Here is a simple test file with the several usages: .. literalinclude:: example/xfail_demo.py Running it with the report-on-xfail option gives this output:: example $ py.test -rx xfail_demo.py =========================== test session starts ============================ platform linux2 -- Python 2.7.3 -- pytest-2.5.1 collected 6 items xfail_demo.py xxxxxx ========================= short test summary info ========================== XFAIL xfail_demo.py::test_hello XFAIL xfail_demo.py::test_hello2 reason: [NOTRUN] XFAIL xfail_demo.py::test_hello3 condition: hasattr(os, 'sep') XFAIL xfail_demo.py::test_hello4 bug 110 XFAIL xfail_demo.py::test_hello5 condition: pytest.__version__[0] != "17" XFAIL xfail_demo.py::test_hello6 reason: reason ======================== 6 xfailed in 0.04 seconds ========================= .. _`skip/xfail with parametrize`: Skip/xfail with parametrize --------------------------- It is possible to apply markers like skip and xfail to individual test instances when using parametrize:: import pytest @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.xfail((1, 0)), pytest.mark.xfail(reason="some bug")((1, 3)), (2, 3), (3, 4), (4, 5), pytest.mark.skipif("sys.version_info >= (3,0)")((10, 11)), ]) def test_increment(n, expected): assert n + 1 == expected Imperative xfail from within a test or setup function ------------------------------------------------------ If you cannot declare xfail- of skipif conditions at import time you can also imperatively produce an according outcome imperatively, in test or setup code:: def test_function(): if not valid_config(): pytest.xfail("failing configuration (but should work)") # or pytest.skipif("unsupported configuration") Skipping on a missing import dependency -------------------------------------------------- You can use the following import helper at module level or within a test or test setup function:: docutils = pytest.importorskip("docutils") If ``docutils`` cannot be imported here, this will lead to a skip outcome of the test. You can also skip based on the version number of a library:: docutils = pytest.importorskip("docutils", minversion="0.3") The version will be read from the specified module's ``__version__`` attribute. .. _string conditions: specifying conditions as strings versus booleans ---------------------------------------------------------- Prior to pytest-2.4 the only way to specify skipif/xfail conditions was to use strings:: import sys @pytest.mark.skipif("sys.version_info >= (3,3)") def test_function(): ... During test function setup the skipif condition is evaluated by calling ``eval('sys.version_info >= (3,0)', namespace)``. The namespace contains all the module globals, and ``os`` and ``sys`` as a minimum. Since pytest-2.4 `condition booleans`_ are considered preferable because markers can then be freely imported between test modules. With strings you need to import not only the marker but all variables everything used by the marker, which violates encapsulation. The reason for specifying the condition as a string was that py.test can report a summary of skip conditions based purely on the condition string. With conditions as booleans you are required to specify a ``reason`` string. Note that string conditions will remain fully supported and you are free to use them if you have no need for cross-importing markers. The evaluation of a condition string in ``pytest.mark.skipif(conditionstring)`` or ``pytest.mark.xfail(conditionstring)`` takes place in a namespace dictionary which is constructed as follows: * the namespace is initialized by putting the ``sys`` and ``os`` modules and the pytest ``config`` object into it. * updated with the module globals of the test function for which the expression is applied. The pytest ``config`` object allows you to skip based on a test configuration value which you might have added:: @pytest.mark.skipif("not config.getvalue('db')") def test_function(...): ... The equivalent with "boolean conditions" is:: @pytest.mark.skipif(not pytest.config.getvalue("db"), reason="--db was not specified") def test_function(...): pass pytest-2.5.1/doc/ja/0000775000175000017500000000000012254002202013563 5ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/plugins.txt0000664000175000017500000005460212254002202016014 0ustar hpkhpk00000000000000.. _plugins: プラグイン㨠conftest ファイルã®é€£æº ==================================== .. Working with plugins and conftest files ============================================= .. py.test implements all aspects of configuration, collection, running and reporting by calling `well specified hooks`_. Virtually any Python module can be registered as a plugin. It can implement any number of hook functions (usually two or three) which all have a ``pytest_`` prefix, making hook functions easy to distinguish and find. There are three basic locations types: py.test 㯠:ref:`よã練られãŸãƒ•ック ` を呼ã³å‡ºã™ã“ã¨ã«ã‚ˆã‚Šã€è¨­å®šã€ã‚³ãƒ¬ã‚¯ã‚·ãƒ§ãƒ³ã€å®Ÿè¡Œã€ãƒ¬ãƒãƒ¼ãƒˆã®å…¨ç®‡æ‰€ã§å‡¦ç†ã‚’実装ã—ã¾ã™ã€‚事実上ã€ä»»æ„ã® Python モジュールをプラグインã¨ã—ã¦ç™»éŒ²ã§ãã¾ã™ã€‚ã“ã®ãƒ—ラグインã¯ã€ãƒ•ック関数ã®åŒºåˆ¥ã‚„検出を容易ã«ã—㦠``pytest_`` ã¨ã„ã†æŽ¥é ­è¾žã‚’ã‚‚ã¤å…¨ãƒ•ック関数ã‹ã‚‰ä»»æ„ã®ãƒ•ック関数 (通常ã¯2ã¤ã‹3ã¤) を実装ã—ã¾ã™ã€‚3ã¤ã®åŸºæœ¬çš„ãªé…置場所ãŒã‚りã¾ã™: .. * `builtin plugins`_: loaded from py.test's own ``pytest/plugin`` directory. * `external plugins`_: modules discovered through `setuptools entry points`_ * `conftest.py plugins`_: modules auto-discovered in test directories * :ref:`組ã¿è¾¼ã¿ãƒ—ラグイン `: py.test ãŒã‚‚㤠``pytest/plugin`` ディレクトリã‹ã‚‰èª­ã¿è¾¼ã‚€ * :ref:`外部プラグイン `: :ref:`setuptools ã®ã‚¨ãƒ³ãƒˆãƒªãƒ¼ãƒã‚¤ãƒ³ãƒˆ ` ã‹ã‚‰ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã‚’検出 * :ref:`conftest.py プラグイン `: テストディレクトリã‹ã‚‰è‡ªå‹•çš„ã«ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã‚’検出 .. _`pytest/plugin`: http://bitbucket.org/hpk42/pytest/src/tip/pytest/plugin/ .. _`conftest.py plugins`: .. _`conftest.py`: .. _`localplugin`: .. _`conftest`: conftest.py: ディレクトリ毎ã®ãƒ­ãƒ¼ã‚«ãƒ«ãƒ—ラグイン ----------------------------------------------- .. conftest.py: local per-directory plugins -------------------------------------------------------------- .. local ``conftest.py`` plugins contain directory-specific hook implementations. Session and test running activities will invoke all hooks defined in ``conftest.py`` files closer to the root of the filesystem. Example: Assume the following layout and content of files:: ローカル㮠``conftest.py`` プラグインã¯ã€ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªå›ºæœ‰ã®ãƒ•ック実装をå«ã¿ã¾ã™ã€‚セッションã¨ãƒ†ã‚¹ãƒˆã®å®Ÿè¡Œå‡¦ç†ã¯ã€ãƒ•ァイルシステムã®ãƒ«ãƒ¼ãƒˆãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã«è¿‘ã„ ``conftest.py`` ファイルã§å®šç¾©ã•れãŸå…¨ã¦ã®ãƒ•ックを実行ã—ã¾ã™ã€‚ファイルを次ã®å ´æ‰€ã«ç½®ãã¨ä»®å®šã—ã¦ãã ã•ã„:: a/conftest.py: def pytest_runtest_setup(item): # 'a' ディレクトリã«ã‚ã‚‹å„テストã®å®Ÿè¡Œå‘ã‘ã«å‘¼ã°ã‚Œã‚‹ print ("setting up", item) a/test_in_subdir.py: def test_sub(): pass test_flat.py: def test_flat(): pass .. Here is how you might run it:: ã“ã®ã‚³ãƒ¼ãƒ‰ã®å®Ÿè¡Œæ–¹æ³•ã§ã™:: py.test test_flat.py # "setting up" を表示ã—ãªã„ py.test a/test_sub.py # "setting up" を表示 .. Note:: .. If you have ``conftest.py`` files which do not reside in a python package directory (i.e. one containing an ``__init__.py``) then "import conftest" can be ambiguous because there might be other ``conftest.py`` files as well on your PYTHONPATH or ``sys.path``. It is thus good practise for projects to either put ``conftest.py`` under a package scope or to never import anything from a conftest.py file. Python パッケージディレクトリ (例ãˆã° ``__Init__.py`` ã‚’å«ã‚€ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒª) ã«ç½®ã‹ã‚Œã¦ãªã„ ``conftest.py`` ファイルãŒã‚ã‚‹å ´åˆã€PYTHONPATH ã¾ãŸã¯ ``sys.path`` ã«åŒã˜åå‰ã‚’ã‚‚ã¤åˆ¥ã® ``conftest.py`` ファイルを置ãå¯èƒ½æ€§ãŒã‚りã€"import conftest" ãŒæ›–昧ã«ãªã‚‹ã¨ããŒã‚りã¾ã™ã€‚ã“ã†ã„ã£ãŸãƒ—ロジェクトã§ã¯ã€ãƒ‘ッケージスコープã®ä¸­ã§ ``conftest.py`` ã‚’ç½®ãã‹ ``conftest.py`` ファイルã‹ã‚‰æ±ºã—ã¦ã‚¤ãƒ³ãƒãƒ¼ãƒˆã—ãªã„ã‹ã®ã©ã¡ã‚‰ã‹ä¸€æ–¹ã‚’é¸æŠžã™ã‚‹ã®ãŒè‰¯ã„プラクティスã§ã™ã€‚ .. _`external plugins`: .. _`extplugins`: 外部プラグインã®ã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã¨æŽ¢ç´¢ ---------------------------------- .. Installing External Plugins / Searching ------------------------------------------------------ .. Installing a plugin happens through any usual Python installation tool, for example:: プラグインã®ã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã¯ã€æ™®é€šã® Python インストールツールを使ã£ã¦è¡Œã„ã¾ã™ã€‚例ãˆã°:: pip install pytest-NAME pip uninstall pytest-NAME .. If a plugin is installed, py.test automatically finds and integrates it, there is no need to activate it. Here is a list of known plugins: プラグインãŒã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«æ¸ˆã¿ãªã‚‰ã€py.test ãŒè‡ªå‹•çš„ã«æ¤œå‡ºã—ã¦ãã®ãƒ—ラグインを組ã¿è¾¼ã¿ã¾ã™ã€‚プラグインを有効化ã™ã‚‹å¿…è¦ã¯ã‚りã¾ã›ã‚“。既知ã®ãƒ—ラグイン一覧を紹介ã—ã¾ã™: .. * `pytest-capturelog `_: to capture and assert about messages from the logging module * `pytest-capturelog `_: logging モジュールã‹ã‚‰ã®ãƒ¡ãƒƒã‚»ãƒ¼ã‚¸ã«é–¢ã™ã‚‹ã‚¢ã‚µãƒ¼ãƒˆã‚„キャプãƒãƒ£ .. * `pytest-xdist `_: to distribute tests to CPUs and remote hosts, looponfailing mode, see also :ref:`xdist` * `pytest-xdist `_: CPU やリモートホストを使ã£ãŸåˆ†æ•£ãƒ†ã‚¹ãƒˆã€ :ref:`xdist` ã‚’å‚ç…§ .. * `pytest-cov `_: coverage reporting, compatible with distributed testing * `pytest-cov `_: 分散テストã§ã®äº’æ›æ€§ã‚„ã‚«ãƒãƒ¬ãƒƒã‚¸ãƒ¬ãƒãƒ¼ãƒˆ .. * `pytest-pep8 `_: a ``--pep8`` option to enable PEP8 compliance checking. * `pytest-pep8 `_: ``--pep8`` オプションを使ã£ãŸ PEP8 è¦ç´„ãƒã‚§ãƒƒã‚¯ .. * `oejskit `_: a plugin to run javascript unittests in life browsers (**version 0.8.9 not compatible with pytest-2.0**) * `oejskit `_: 実際ã®ãƒ–ラウザー㧠javascript ã® unittests を実行ã™ã‚‹ãƒ—ラグイン (**ãƒãƒ¼ã‚¸ãƒ§ãƒ³ 0.8.9 㯠pytest-2.0 ã§ã¯äº’æ›æ€§ãŒã‚りã¾ã›ã‚“** ) .. You may discover more plugins through a `pytest- pypi.python.org search`_. `"pytest-" ã§ pypi.python.org を検索`_ ã™ã‚‹ã¨ã€ã‚‚ã£ã¨ãƒ—ラグインãŒè¦‹ã¤ã‹ã‚‹ã§ã—ょã†ã€‚ .. _`available installable plugins`: .. _`pytest- pypi.python.org search`: http://pypi.python.org/pypi?%3Aaction=search&term=pytest-&submit=search .. _`"pytest-" ã§ pypi.python.org を検索`: http://pypi.python.org/pypi?%3Aaction=search&term=pytest-&submit=search サンプルã«ã‚ˆã‚‹ãƒ—ラグインã®è¨˜è¿° ------------------------------ .. Writing a plugin by looking at examples ------------------------------------------------------ .. _`Distribute`: http://pypi.python.org/pypi/distribute .. _`setuptools`: http://pypi.python.org/pypi/setuptools .. If you want to write a plugin, there are many real-life examples you can copy from: 自分ã§ãƒ—ラグインを作æˆã—ãŸã„ãªã‚‰ã€ãŸãã•ã‚“ã‚る実際ã®ãƒ—ラグインをコピーã—ã¦ã‹ã‚‰å§‹ã‚ã‚‹ã¨è‰¯ã„ã§ã™: .. * a custom collection example plugin: :ref:`yaml plugin` * around 20 `builtin plugins`_ which comprise py.test's own functionality * around 10 `external plugins`_ providing additional features * カスタムコレクションã®ã‚µãƒ³ãƒ—ルプラグイン: :ref:`yaml プラグイン ` * py.test ã®ç‹¬è‡ªæ©Ÿèƒ½ã‚’æ§‹æˆã™ã‚‹ç´„20個㮠:ref:`組ã¿è¾¼ã¿ãƒ—ラグイン ` * 追加機能をæä¾›ã™ã‚‹ç´„10個㮠:ref:`外部プラグイン ` .. All of these plugins implement the documented `well specified hooks`_ to extend and add functionality. ã“れらã®å…¨ãƒ—ラグインã¯ã€æ©Ÿèƒ½ã‚’è¿½åŠ ï¼æ‹¡å¼µã™ã‚‹ãŸã‚ã«ãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆä»˜ãã® :ref:`よã練られãŸãƒ•ック ` を実装ã—ã¾ã™ã€‚ .. _`setuptools entry points`: 独自プラグインを他ã‹ã‚‰ã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«å¯èƒ½ã«ã™ã‚‹ -------------------------------------------- .. Making your plugin installable by others ----------------------------------------------- .. If you want to make your plugin externally available, you may define a so-called entry point for your distribution so that ``py.test`` finds your plugin module. Entry points are a feature that is provided by `setuptools`_ or `Distribute`_. py.test looks up the ``pytest11`` entrypoint to discover its plugins and you can thus make your plugin available by definig it in your setuptools/distribute-based setup-invocation: 自分ã§ä½œæˆã—ãŸãƒ—ラグインを外部ã‹ã‚‰åˆ©ç”¨ã§ãるよã†ã«ã—ãŸã„ãªã‚‰ã€ ``py.test`` ãŒãƒ—ラグインモジュールを見ã¤ã‘られるよã†ã«ã€ãƒ‡ã‚£ã‚¹ãƒˆãƒªãƒ“ューションã®ã„ã‚ゆるエントリーãƒã‚¤ãƒ³ãƒˆã‚’定義ã—ã¾ã™ã€‚エントリーãƒã‚¤ãƒ³ãƒˆã¯ `setuptools`_ ã¾ãŸã¯ `distribute`_ ãŒæä¾›ã™ã‚‹æ©Ÿèƒ½ã§ã™ã€‚py.test ã¯ã€ãƒ—ラグインを検出ã™ã‚‹ãŸã‚ã« ``pytest11`` ã¨ã„ã†ã‚¨ãƒ³ãƒˆãƒªãƒ¼ãƒã‚¤ãƒ³ãƒˆã‚’調ã¹ã¾ã™ã€‚ã“ã®ã‚ˆã†ã« setuptools/distribute ã® setup 処ç†ã§ã‚¨ãƒ³ãƒˆãƒªãƒ¼ãƒã‚¤ãƒ³ãƒˆã‚’定義ã™ã‚‹ã“ã¨ã«ã‚ˆã‚Šã€è‡ªåˆ†ã®ãƒ—ラグインを利用ã§ãã¾ã™ã€‚ .. sourcecode:: python # サンプル㮠./setup.py ファイル from setuptools import setup setup( name="myproject", packages = ['myproject'] # 次ã®ã‚ˆã†ã«è¨˜è¿°ã—㦠py.test ã‹ã‚‰ãƒ—ラグインを利用å¯èƒ½ã«ã™ã‚‹ entry_points = { 'pytest11': [ 'name_of_plugin = myproject.pluginmodule', ] }, ) .. If a package is installed this way, py.test will load ``myproject.pluginmodule`` as a plugin which can define `well specified hooks`_. パッケージãŒã“ã®æ–¹æ³•ã§ã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã•れる場åˆã€py.test 㯠:ref:`よã練られãŸãƒ•ック ` を定義ã™ã‚‹ãƒ—ラグインã¨ã—㦠``myproject.pluginmodule`` を読ã¿è¾¼ã¿ã¾ã™ã€‚ .. Plugin discovery order at tool startup -------------------------------------------- ツール起動時ã®ãƒ—ãƒ©ã‚°ã‚¤ãƒ³æ¤œå‡ºé †åº -------------------------------- .. py.test loads plugin modules at tool startup in the following way: py.test ã¯ã€æ¬¡ã®æ–¹æ³•ã§ãƒ„ール起動時ã«ãƒ—ラグインモジュールを読ã¿è¾¼ã¿ã¾ã™ã€‚ .. * by loading all builtin plugins * å…¨ã¦ã®çµ„ã¿è¾¼ã¿ãƒ—ラグインを読ã¿è¾¼ã‚€ã€‚ .. * by loading all plugins registered through `setuptools entry points`_. * :ref:`setuptools ã®ã‚¨ãƒ³ãƒˆãƒªãƒ¼ãƒã‚¤ãƒ³ãƒˆ ` ã‹ã‚‰ç™»éŒ²ã•れãŸå…¨ã¦ã®ãƒ—ラグインを読ã¿è¾¼ã‚€ã€‚ .. * by pre-scanning the command line for the ``-p name`` option and loading the specified plugin before actual command line parsing. * コマンドライン㮠``-p name`` オプションを事å‰ã«èª¿ã¹ã€å®Ÿéš›ã«ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã®å¼•æ•°è§£æžã‚’行ã†å‰ã«æŒ‡å®šã—ãŸãƒ—ラグインを読ã¿è¾¼ã‚€ã€‚ .. * by loading all :file:`conftest.py` files as inferred by the command line invocation (test files and all of its *parent* directories). Note that ``conftest.py`` files from *sub* directories are by default not loaded at tool startup. * コマンドラインã®å®Ÿè¡Œã§æŽ¨å®šã•れる全ã¦ã® :file:`conftest.py` ファイルを読ã¿è¾¼ã‚€ (テストファイルã¨å…¨ã¦ã® *親* ディレクトリ) 。 *サブ* ディレクトリã‹ã‚‰ã® ``conftest.py`` ファイルã¯ã€ãƒ‡ãƒ•ォルトã§ã¯ã€ãƒ„ールã®èµ·å‹•時ã«èª­ã¿è¾¼ã¾ã‚Œãªã„ã“ã¨ã«æ³¨æ„ã—ã¦ãã ã•ã„。 .. * by recursively loading all plugins specified by the ``pytest_plugins`` variable in ``conftest.py`` files * ``conftest.py`` ファイル㮠``pytest_plugins`` å¤‰æ•°ã§æŒ‡å®šã•れãŸå…¨ã¦ã®ãƒ—ラグインをå†å¸°çš„ã«èª­ã¿è¾¼ã‚€ テストモジュールã¾ãŸã¯ conftest ファイルã®ãƒ—ラグインã®è¦æ±‚ã¨èª­ã¿è¾¼ã¿ -------------------------------------------------------------------- .. Requiring/Loading plugins in a test module or conftest file ------------------------------------------------------------- .. You can require plugins in a test module or a conftest file like this:: テストモジュールã€ã¾ãŸã¯ conftest ファイル内ã§ãƒ—ãƒ©ã‚°ã‚¤ãƒ³ã‚’è¦æ±‚ã§ãã¾ã™:: pytest_plugins = "name1", "name2", .. When the test module or conftest plugin is loaded the specified plugins will be loaded as well. You can also use dotted path like this:: テストモジュールã€ã¾ãŸã¯ conftest プラグインãŒèª­ã¿è¾¼ã¾ã‚Œã‚‹ã¨ãã€æŒ‡å®šã—ãŸãƒ—ãƒ©ã‚°ã‚¤ãƒ³ã‚‚åŒæ§˜ã«èª­ã¿è¾¼ã¾ã‚Œã¾ã™ã€‚ã•ã‚‰ã«æ¬¡ã®ã‚ˆã†ã«ãƒ‰ãƒƒãƒˆåŒºåˆ‡ã‚Šã®ãƒ‘スも使ãˆã¾ã™:: pytest_plugins = "myapp.testsupport.myplugin" .. which will import the specified module as a py.test plugin. ã“れ㯠py.test プラグインã¨ã—ã¦æŒ‡å®šã—ãŸãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã‚’インãƒãƒ¼ãƒˆã—ã¾ã™ã€‚ .. Accessing another plugin by name -------------------------------------------- プラグインåã§åˆ¥ã®ãƒ—ラグインã¸ã‚¢ã‚¯ã‚»ã‚¹ -------------------------------------- .. If a plugin wants to collaborate with code from another plugin it can obtain a reference through the plugin manager like this: ã‚るプラグインã¨åˆ¥ã®ãƒ—ラグインã®ã‚³ãƒ¼ãƒ‰ã‚’å”調ã•ã›ãŸã„ãªã‚‰ã€æ¬¡ã®ã‚ˆã†ã«ãƒ—ラグインマãƒãƒ¼ã‚¸ãƒ£ãƒ¼ã‚’使ã£ã¦ãƒªãƒ•ァレンスをå–å¾—ã§ãã¾ã™: .. sourcecode:: python plugin = config.pluginmanager.getplugin("name_of_plugin") .. If you want to look at the names of existing plugins, use the ``--traceconfig`` option. 既存ã®ãƒ—ラグインåを調ã¹ãŸã„å ´åˆã¯ ``--traceconfig`` オプションを使ã£ã¦ãã ã•ã„。 .. _`findpluginname`: 有効ãªãƒ—ãƒ©ã‚°ã‚¤ãƒ³ã®æ¤œå‡º ---------------------- .. Finding out which plugins are active ---------------------------------------------------------------------------- .. If you want to find out which plugins are active in your environment you can type:: 自分ã®ç’°å¢ƒã§æœ‰åйãªãƒ—ラグインを調ã¹ãŸã„ãªã‚‰ã€æ¬¡ã®ã‚ˆã†ã«å®Ÿè¡Œã—ã¦ãã ã•ã„:: py.test --traceconfig .. and will get an extended test header which shows activated plugins and their names. It will also print local plugins aka :ref:`conftest.py ` files when they are loaded. 有効ãªãƒ—ラグインã¨ãã®åå‰ã‚’表示ã™ã‚‹æ‹¡å¼µãƒ†ã‚¹ãƒˆãƒ˜ãƒƒãƒ€ãƒ¼ã‚’å–å¾—ã—ã¾ã™ã€‚ :ref:`conftest.py ` ãŒèª­ã¿è¾¼ã¾ã‚Œã‚‹ã¨ãã«ãã®ãƒ­ãƒ¼ã‚«ãƒ«ãƒ—ラグインも表示ã—ã¾ã™ã€‚ .. _`cmdunregister`: åå‰ã‹ã‚‰ãƒ—ラグインã®ç„¡åŠ¹åŒ–ã‚„ç™»éŒ²è§£é™¤ ------------------------------------ .. Deactivating / unregistering a plugin by name ---------------------------------------------------------------------------- .. You can prevent plugins from loading or unregister them:: プラグインを読ã¿è¾¼ã¾ã›ãªã„ã€ã¾ãŸã¯ç™»éŒ²ã‚’解除ã§ãã¾ã™:: py.test -p no:NAME .. This means that any subsequent try to activate/load the named plugin will it already existing. See :ref:`findpluginname` for how to obtain the name of a plugin. ã“ã®ã‚ªãƒ—ションã¯ã€æœ‰åŠ¹åŒ–ï¼èª­ã¿è¾¼ã‚‚ã†ã¨ã™ã‚‹ãƒ—ãƒ©ã‚°ã‚¤ãƒ³ãŒæ—¢ã«å­˜åœ¨ã™ã‚‹ã‚‚ã®ã¨ã—ã¦æ‰±ã„ã¾ã™ã€‚プラグインåã‚’å–å¾—ã™ã‚‹æ–¹æ³•㯠:ref:`findpluginname` ã‚’å‚ç…§ã—ã¦ãã ã•ã„。 .. _`builtin plugins`: py.test ã®ãƒ‡ãƒ•ォルトã®ãƒ—ラグインリファレンス ============================================ .. py.test default plugin reference ==================================== .. You can find the source code for the following plugins in the `pytest repository `_. 次ã®ãƒ—ラグインã®ã‚½ãƒ¼ã‚¹ã‚³ãƒ¼ãƒ‰ãŒ `pytest リãƒã‚¸ãƒˆãƒª `_ ã«å«ã¾ã‚Œã¦ã„ã¾ã™ã€‚ .. autosummary:: _pytest.assertion _pytest.capture _pytest.config _pytest.doctest _pytest.genscript _pytest.helpconfig _pytest.junitxml _pytest.mark _pytest.monkeypatch _pytest.nose _pytest.pastebin _pytest.pdb _pytest.pytester _pytest.python _pytest.recwarn _pytest.resultlog _pytest.runner _pytest.main _pytest.skipping _pytest.terminal _pytest.tmpdir _pytest.unittest .. _`well specified hooks`: py.test ã®ãƒ•ックリファレンス ============================ .. py.test hook reference ==================================== .. Hook specification and validation ----------------------------------------- フックã®ä»•æ§˜ã¨æ¤œè¨¼ ------------------ .. py.test calls hook functions to implement initialization, running, test execution and reporting. When py.test loads a plugin it validates that each hook function conforms to its respective hook specification. Each hook function name and its argument names need to match a hook specification. However, a hook function may accept *fewer* parameters by simply not specifying them. If you mistype argument names or the hook name itself you get an error showing the available arguments. py.test ã¯ã€åˆæœŸåŒ–ã€ãƒ†ã‚¹ãƒˆå®Ÿè¡Œã€ãƒ¬ãƒãƒ¼ãƒˆã‚’実装ã™ã‚‹ãƒ•ック関数を呼ã³å‡ºã—ã¾ã™ã€‚py.test ãŒãƒ—ラグインを読ã¿è¾¼ã‚€ã¨ãã€å„フック関数åã¯ãã®å¯¾å¿œã™ã‚‹ãƒ•ック仕様を確èªã—ã¾ã™ã€‚å„フック関数åã¨ãã®å¼•æ•°ã®åå‰ã¯ã€ãƒ•ック仕様ã«ä¸€è‡´ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚但ã—ã€ãƒ•ック関数をå˜ã«æŒ‡å®šã—ãªã„ã“ã¨ã«ã‚ˆã‚Š *å°‘ãªã„* パラメーターã¯è¨±å®¹ã—ã¾ã™ã€‚引数ã®åå‰ã‚„フックåãã®ã‚‚ã®ã‚’誤入力ã—ãŸå ´åˆã€åˆ©ç”¨ã§ãる引数を表示ã™ã‚‹ã‚¨ãƒ©ãƒ¼ãŒè¡¨ç¤ºã•れã¾ã™ã€‚ .. Initialization, command line and configuration hooks -------------------------------------------------------------------- åˆæœŸåŒ–ã€ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã€è¨­å®šã®ãƒ•ック ------------------------------------ .. currentmodule:: _pytest.hookspec .. autofunction:: pytest_cmdline_preparse .. autofunction:: pytest_cmdline_parse .. autofunction:: pytest_namespace .. autofunction:: pytest_addoption .. autofunction:: pytest_cmdline_main .. autofunction:: pytest_configure .. autofunction:: pytest_unconfigure .. Generic "runtest" hooks ------------------------------ 汎用的㪠"runtest" フック ------------------------- .. All all runtest related hooks receive a :py:class:`pytest.Item` object. フックã«é–¢é€£ã™ã‚‹å…¨ã¦ã® runtest 㯠:py:class:`pytest.Item` オブジェクトをå—ã‘å–りã¾ã™ã€‚ .. autofunction:: pytest_runtest_protocol .. autofunction:: pytest_runtest_setup .. autofunction:: pytest_runtest_call .. autofunction:: pytest_runtest_teardown .. autofunction:: pytest_runtest_makereport .. For deeper understanding you may look at the default implementation of these hooks in :py:mod:`_pytest.runner` and maybe also in :py:mod:`_pytest.pdb` which interacts with :py:mod:`_pytest.capture` and its input/output capturing in order to immediately drop into interactive debugging when a test failure occurs. より深ãç†è§£ã™ã‚‹ã«ã¯ :py:mod:`_pytest.runner` ã®å®Ÿéš›ã®ãƒ•ックã®ãƒ‡ãƒ•ォルト実装を調ã¹ã‚‹ã“ã¨ã«ãªã‚‹ã‹ã‚‚ã—れã¾ã›ã‚“。ã•らã«ã€ãƒ†ã‚¹ãƒˆãŒå¤±æ•—ã—ãŸã¨ãã«ãã®ã¾ã¾å¯¾è©±å¼ã®ãƒ‡ãƒãƒƒã‚¬ãƒ¼ã«å…¥ã‚‹ã€ãã®å…¥å‡ºåŠ›ã®ã‚­ãƒ£ãƒ—ãƒãƒ£ã‚„ :py:mod:`_pytest.capture` ã¨ç›¸äº’ã«ã‚„りå–りã™ã‚‹ :py:mod:`_pytest.pdb` ã‚‚ãã£ã¨è¦‹ãŸããªã‚‹ã§ã—ょã†ã€‚ .. The :py:mod:`_pytest.terminal` reported specifically uses the reporting hook to print information about a test run. 実際ã«ãƒ¬ãƒãƒ¼ãƒˆã‚’行ㆠ:py:mod:`_pytest.terminal` ã¯ã€ãƒ†ã‚¹ãƒˆå®Ÿè¡Œã«é–¢ã™ã‚‹æƒ…報を表示ã™ã‚‹ãŸã‚ã«ãƒ¬ãƒãƒ¼ãƒˆãƒ•ックを使ã„ã¾ã™ã€‚ .. Collection hooks ------------------------------ コレクションã®ãƒ•ック -------------------- .. py.test calls the following hooks for collecting files and directories: py.test ã¯ãƒ•ァイルã¨ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã‚’探索ã™ã‚‹ãŸã‚ã«æ¬¡ã®ãƒ•ックを呼ã³å‡ºã—ã¾ã™: .. autofunction:: pytest_ignore_collect .. autofunction:: pytest_collect_directory .. autofunction:: pytest_collect_file .. For influencing the collection of objects in Python modules you can use the following hook: Python モジュール内ã®ã‚ªãƒ–ジェクトã®ã‚³ãƒ¬ã‚¯ã‚·ãƒ§ãƒ³ã«å½±éŸ¿ã‚’与ãˆã‚‹ã«ã¯ã€æ¬¡ã®ãƒ•ックãŒä½¿ãˆã¾ã™: .. autofunction:: pytest_pycollect_makeitem .. Reporting hooks ------------------------------ レãƒãƒ¼ãƒˆãƒ•ック -------------- .. Session related reporting hooks: レãƒãƒ¼ãƒˆãƒ•ックã«é–¢é€£ã™ã‚‹ã‚»ãƒƒã‚·ãƒ§ãƒ³: .. autofunction:: pytest_collectstart .. autofunction:: pytest_itemcollected .. autofunction:: pytest_collectreport .. autofunction:: pytest_deselected .. And here is the central hook for reporting about test execution: ãã—ã¦ã€ãƒ†ã‚¹ãƒˆã®å®Ÿè¡Œã«é–¢ã™ã‚‹ä¸­å¤®ã®ãƒ•ックã§ã™: .. autofunction:: pytest_runtest_logreport .. Reference of important objects involved in hooks =========================================================== フックã§å®Ÿè¡Œã•れるé‡è¦ãªã‚ªãƒ–ジェクトã®ãƒªãƒ•ァレンス ================================================== .. autoclass:: _pytest.config.Config :members: .. autoclass:: _pytest.config.Parser :members: .. autoclass:: _pytest.main.Node(name, parent) :members: .. .. autoclass:: _pytest.main.File(fspath, parent) :members: .. autoclass:: _pytest.main.Item(name, parent) :members: .. autoclass:: _pytest.python.Module(name, parent) :members: .. autoclass:: _pytest.python.Class(name, parent) :members: .. autoclass:: _pytest.python.Function(name, parent) :members: .. autoclass:: _pytest.runner.CallInfo :members: .. autoclass:: _pytest.runner.TestReport :members: pytest-2.5.1/doc/ja/recwarn.txt0000664000175000017500000000370712254002202015774 0ustar hpkhpk00000000000000 .. Asserting deprecation and other warnings ===================================================== éžæŽ¨å¥¨ã®è­¦å‘Šã‚„ãã®ä»–ã®è­¦å‘Šã®ã‚¢ã‚µãƒ¼ãƒˆ ==================================== .. The recwarn function argument ------------------------------------ 関数ã®å¼•æ•° recwarn ------------------ .. You can use the ``recwarn`` funcarg to assert that code triggers warnings through the Python warnings system. Here is a simple self-contained test:: Python ã®ãƒ¯ãƒ¼ãƒ‹ãƒ³ã‚°ã‚·ã‚¹ãƒ†ãƒ ã‹ã‚‰ã®è­¦å‘Šã‚’å—ã‘å–るコードをアサートã™ã‚‹ãŸã‚ã« ``recwarn`` ã¨ã„ã†é–¢æ•°ã®å¼•æ•°ãŒä½¿ãˆã¾ã™ã€‚ç°¡å˜ãªè‡ªå·±å®Œçµåž‹ã®ãƒ†ã‚¹ãƒˆã‚’紹介ã—ã¾ã™:: # test_recwarn.py ã®å†…容 def test_hello(recwarn): from warnings import warn warn("hello", DeprecationWarning) w = recwarn.pop(DeprecationWarning) assert issubclass(w.category, DeprecationWarning) assert 'hello' in str(w.message) assert w.filename assert w.lineno .. The ``recwarn`` function argument provides these methods: 関数ã®å¼•æ•° ``recwarn`` ã¯æ¬¡ã®ãƒ¡ã‚½ãƒƒãƒ‰ã‚’æä¾›ã—ã¾ã™: .. * ``pop(category=None)``: return last warning matching the category. * ``clear()``: clear list of warnings * ``pop(category=None)``: カテゴリã«ä¸€è‡´ã™ã‚‹æœ€å¾Œã®è­¦å‘Šã‚’返㙠* ``clear()``: 警告ã®ä¸€è¦§ã‚’クリアã™ã‚‹ .. Ensuring a function triggers a deprecation warning ------------------------------------------------------- éžæŽ¨å¥¨ã®è­¦å‘Šã‚’発生ã•ã›ã‚‹é–¢æ•°ã®ç¢ºèª ---------------------------------- .. You can also call a global helper for checking that a certain function call triggers a Deprecation warning:: éžæŽ¨å¥¨ã®è­¦å‘Šã‚’発生ã•ã›ã‚‹ç‰¹å®šã®é–¢æ•°å‘¼ã³å‡ºã—を確èªã™ã‚‹ãŸã‚ã®ã‚°ãƒ­ãƒ¼ãƒãƒ«ãªãƒ˜ãƒ«ãƒ‘ー関数も呼ã³å‡ºã›ã¾ã™:: import pytest def test_global(): pytest.deprecated_call(myfunction, 17) pytest-2.5.1/doc/ja/naming20.txt0000664000175000017500000000131612254002202015740 0ustar hpkhpk00000000000000 .. _naming20: New pytest names in 2.0 (flat is better than nested) ---------------------------------------------------- If you used older version of the ``py`` distribution (which included the py.test command line tool and Python name space) you accessed helpers and possibly collection classes through the ``py.test`` Python namespaces. The new ``pytest`` Python module flaty provides the same objects, following these renaming rules:: py.test.XYZ -> pytest.XYZ py.test.collect.XYZ -> pytest.XYZ py.test.cmdline.main -> pytest.main The old ``py.test.*`` ways to access functionality remain valid but you are encouraged to do global renaming according to the above rules in your test code. pytest-2.5.1/doc/ja/index.txt0000664000175000017500000001237012254002202015436 0ustar hpkhpk00000000000000 .. Welcome to pytest! ============================================= Pytest ã¸ã‚ˆã†ã“ãï¼ =================== .. - **a mature full-featured testing tool** - runs on Posix/Windows, Python 2.4-3.2, PyPy and Jython-2.5.1 - :ref:`comprehensive online ` and `PDF documentation `_ - continuously `tested on many Python interpreters `_ - used in :ref:`many projects and organisations `, in test suites ranging from 10 to 10s of thousands of tests - comes with many :ref:`tested examples ` - supports :ref:`good integration practises ` - **é•·ã„間開発ã•れã€å…¨ã¦ã®æ©Ÿèƒ½ã‚’å‚™ãˆãŸãƒ†ã‚¹ãƒˆãƒ„ール** - Posix/Windows, Python 2.4-3.2, PyPy, Jython 2.5.1 ã«å¯¾å¿œ - :ref:`包括的ãªã‚ªãƒ³ãƒ©ã‚¤ãƒ³ãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆ ` 㨠`PDF ドキュメント `_ - 継続的㫠`多ãã® Python インタープリターã§ãƒ†ã‚¹ãƒˆ `_ - :ref:`様々ãªãƒ—ロジェクトã¨çµ„ç¹” ` ã®ã€æ•°ä¸‡ã‚‚ã®å¹…広ã„テストスイートã§åˆ©ç”¨ - 多ãã® :ref:`テストサンプル ` ãŒä»˜å±ž - :ref:`優れãŸã‚¤ãƒ³ãƒ†ã‚°ãƒ¬ãƒ¼ã‚·ãƒ§ãƒ³ãƒ—ラクティス ` ã«å¯¾å¿œ .. - **provides no-boilerplate testing** - makes it :ref:`easy to get started `, - refined :ref:`usage options ` - :ref:`assert with the assert statement` - helpful :ref:`traceback and failing assertion reporting ` - allows :ref:`print debugging ` and :ref:`the capturing of standard output during test execution ` - supports :pep:`8` compliant coding styles in tests - **ボイラープレート (ã²ãªå½¢) ã®ã„らãªã„テストをæä¾›** - :ref:`ç°¡å˜ã«å§‹ã‚られる ` - æ´—ç·´ã•れ㟠:ref:`オプションã®ä½¿ã„æ–¹ ` - :ref:`assert with the assert statement` - 分ã‹ã‚Šã‚„ã™ã„ :ref:`トレースãƒãƒƒã‚¯ã¨å¤±æ•—時ã®ãƒ¬ãƒãƒ¼ãƒˆ ` - :ref:`print デãƒãƒƒã‚° ` 㨠:ref:`ãƒ†ã‚¹ãƒˆå®Ÿè¡Œæ™‚ã®æ¨™æº–出力ã®ã‚­ãƒ£ãƒ—ãƒãƒ£ ` - テストã®ã‚³ãƒ¼ãƒ‡ã‚£ãƒ³ã‚°ã‚¹ã‚¿ã‚¤ãƒ«ã‚’ :pep:`8` 準拠ã«å¯¾å¿œ .. - **supports functional testing and complex test setups** - (new in 2.2) :ref:`durations` - (much improved in 2.2) :ref:`marking and test selection ` - (improved in 2.2) :ref:`parametrized test functions ` - advanced :ref:`skip and xfail` - unique :ref:`dependency injection through funcargs ` - can :ref:`distribute tests to multiple CPUs ` through :ref:`xdist plugin ` - can :ref:`continuously re-run failing tests ` - many :ref:`builtin helpers ` - flexible :ref:`Python test discovery` - **機能テストã¨è¤‡é›‘ãªãƒ†ã‚¹ãƒˆã®ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ—** - (2.2 ã®æ–°æ©Ÿèƒ½) :ref:`durations` - (2.2 ã§ã‹ãªã‚Šæ”¹å–„) :ref:`マーキングã¨ãƒ†ã‚¹ãƒˆé¸æŠž ` - (2.2 ã§æ”¹å–„) :ref:`パラメーターテスト機能 ` - :ref:`skip and xfail` - ユニーク㪠:ref:`funcargs を用ã„ãŸä¾å­˜æ€§ã®æ³¨å…¥ ` - :ref:`xdist プラグイン ` を用ã„㟠:ref:`複数 CPU ã«å¯¾ã™ã‚‹åˆ†æ•£ãƒ†ã‚¹ãƒˆ ` - :ref:`失敗ã™ã‚‹ãƒ†ã‚¹ãƒˆã®ã¿ã‚’継続的ã«å†å®Ÿè¡Œ ` - 多ãã® :ref:`組ã¿è¾¼ã¿ãƒ˜ãƒ«ãƒ‘ー機能 ` - 柔軟㪠:ref:`Python test discovery` .. - **integrates many common testing methods** - can integrate ``nose``, ``unittest.py`` and ``doctest.py`` style tests, including running testcases made for Django and trial - supports extended :ref:`xUnit style setup ` - supports domain-specific :ref:`non-python tests` - supports the generation of testing coverage reports - `Javascript unit- and functional testing`_ - **多ãã®å…±é€šãƒ†ã‚¹ãƒˆãƒ¡ã‚½ãƒƒãƒ‰ã‚’çµ±åˆ** - ``nose``, ``unittest.py``, ``doctest.py`` スタイルã®ãƒ†ã‚¹ãƒˆã€Django ã®ãƒ†ã‚¹ãƒˆãƒ©ãƒ³ãƒŠãƒ¼ (試作段階) - :ref:`xUnit スタイル㮠setup ` ã®æ‹¡å¼µ - ドメイン固有㮠:ref:`non-python tests` - テストカãƒãƒ¬ãƒƒã‚¸ãƒ¬ãƒãƒ¼ãƒˆã®ç”Ÿæˆ - `Javascript ã®ãƒ¦ãƒ‹ãƒƒãƒˆ/機能テスト `_ .. - **extensive plugin and customization system** - all collection, reporting, running aspects are delegated to hook functions - customizations can be per-directory, per-project or per PyPI released plugins - it is easy to add command line options or do other kind of add-ons and customizations. - **豊富ãªãƒ—ラグインã¨ã‚«ã‚¹ã‚¿ãƒžã‚¤ã‚ºã®ä»•組ã¿** - å…¨ã¦ã®ã‚³ãƒ¬ã‚¯ã‚·ãƒ§ãƒ³ã€ãƒ¬ãƒãƒ¼ãƒˆã€å®Ÿè¡ŒçŠ¶æ…‹ã‚’ãƒ•ãƒƒã‚¯é–¢æ•°ã«å§”è­² - ディレクトリå˜ä½ã€ãƒ—ロジェクトå˜ä½ã€PyPI ã§ãƒªãƒªãƒ¼ã‚¹ã•れãŸãƒ—ラグインå˜ä½ã§ã®ã‚«ã‚¹ã‚¿ãƒžã‚¤ã‚º - コマンドラインオプションを追加ã—ãŸã‚Šã€ãã®ä»–ã®ã‚¢ãƒ‰ã‚ªãƒ³ã‚„カスタマイズãŒç°¡å˜ .. _`Javascript unit- and functional testing`: http://pypi.python.org/pypi/oejskit .. _`easy`: http://bruynooghe.blogspot.com/2009/12/skipping-slow-test-by-default-in-pytest.html pytest-2.5.1/doc/ja/usage.txt0000664000175000017500000002413112254002202015431 0ustar hpkhpk00000000000000 .. _usage: 使用方法ã¨ãƒ†ã‚¹ãƒˆå®Ÿè¡Œ ==================== .. Usage and Invocations ========================================== .. _cmdline: ``python -m pytest`` ã«ã‚ˆã‚‹ pytest 呼ã³å‡ºã— ------------------------------------------- .. Calling pytest through ``python -m pytest`` ----------------------------------------------------- .. versionadded:: 2.0 .. If you use Python-2.5 or later you can invoke testing through the Python interpreter from the command line:: Python 2.5 ã‹ã€ãれ以上ã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚’使ã£ã¦ã„ã‚‹ãªã‚‰ã€ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‹ã‚‰ Python インタープリターã§ãƒ†ã‚¹ãƒˆã‚’実行ã§ãã¾ã™:: python -m pytest [...] .. This is equivalent to invoking the command line script ``py.test [...]`` directly. ã“れã¯ç›´æŽ¥çš„ã«ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚¹ã‚¯ãƒªãƒ—ト ``py.test [...]`` を実行ã™ã‚‹ã®ã¨åŒã˜ã§ã™ã€‚ .. Getting help on version, option names, environment variables -------------------------------------------------------------- ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã€ã‚ªãƒ—ションåã€ç’°å¢ƒå¤‰æ•°ã®ãƒ˜ãƒ«ãƒ—を表示 ------------------------------------------------ :: py.test --version # pytest ãŒã‚¤ãƒ³ãƒãƒ¼ãƒˆã•れãŸå ´æ‰€ã‚’表示 py.test --fixtures # 利用ã§ãる組ã¿è¾¼ã¿ã®é–¢æ•°å¼•数を表示 py.test -h | --help # コマンドラインã¨è¨­å®šãƒ•ァイルオプションã®ãƒ˜ãƒ«ãƒ—を表示 .. Stopping after the first (or N) failures --------------------------------------------------- æœ€åˆ (ã¾ãŸã¯ N 回) 失敗ã—ãŸã¨ãã«ãƒ†ã‚¹ãƒˆã‚’中止 --------------------------------------------- .. To stop the testing process after the first (N) failures:: py.test -x # stop after first failure py.test --maxfail=2 # stop after two failures æœ€åˆ (N 回) 失敗ã—ãŸã¨ãã«ãƒ†ã‚¹ãƒˆãƒ—ロセスを中止ã™ã‚‹ã«ã¯ã€æ¬¡ã®ã‚ˆã†ã«ã—ã¾ã™:: py.test -x # 最åˆã«å¤±æ•—ã—ãŸã¨ãã«ä¸­æ­¢ py.test --maxfail=2 # 2 回失敗ã—ãŸã¨ãã«ä¸­æ­¢ .. Specifying tests / selecting tests --------------------------------------------------- ãƒ†ã‚¹ãƒˆã®æŒ‡å®šã¨é¸æŠž ------------------ .. Several test run options:: 次ã®ã‚ˆã†ãªãƒ†ã‚¹ãƒˆå®Ÿè¡Œã®ã‚ªãƒ—ションãŒã‚りã¾ã™:: py.test test_mod.py # モジュール内ã®ãƒ†ã‚¹ãƒˆã‚’実行 py.test somepath # 指定ã—ãŸãƒ‘スã®å…¨ã¦ã®ãƒ†ã‚¹ãƒˆã‚’実行 py.test -k string # string ã‚’å«ã‚€åå‰ã‚’ã‚‚ã¤ãƒ†ã‚¹ãƒˆã®ã¿ã‚’実行 .. Import 'pkg' and use its filesystem location to find and run tests:: 'pkg' をインãƒãƒ¼ãƒˆã—ã¦ã€ãã®ãƒ•ァイルシステム上ã®ä½ç½®ã‹ã‚‰ãƒ†ã‚¹ãƒˆã‚’探ã—ã¦å®Ÿè¡Œã—ã¾ã™:: py.test --pyargs pkg # pkg ã®ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªé…下ã«ã‚ã‚‹å…¨ã¦ã®ãƒ†ã‚¹ãƒˆã‚’実行 .. Modifying Python traceback printing ---------------------------------------------- Python ã®ãƒˆãƒ¬ãƒ¼ã‚¹ãƒãƒƒã‚¯è¡¨ç¤ºã‚’変更 --------------------------------- .. Examples for modifying traceback printing:: トレースãƒãƒƒã‚¯è¡¨ç¤ºã‚’変更ã™ã‚‹ä¾‹ã§ã™:: py.test --showlocals # トレースãƒãƒƒã‚¯ã®ãƒ­ãƒ¼ã‚«ãƒ«å¤‰æ•°ã‚’表示 py.test -l # トレースãƒãƒƒã‚¯ã®ãƒ­ãƒ¼ã‚«ãƒ«å¤‰æ•°ã‚’表示 (短ã„オプション) py.test --tb=long # デフォルトã®è©³ç´°ãªãƒˆãƒ¬ãƒ¼ã‚¹ãƒãƒƒã‚¯å½¢å¼ py.test --tb=native # Python 標準ライブラリã®å½¢å¼ py.test --tb=short # 短ã„トレースãƒãƒƒã‚¯å½¢å¼ py.test --tb=line # 失敗ã—ãŸãƒ†ã‚¹ãƒˆã‚’1行表示 .. Dropping to PDB (Python Debugger) on failures ---------------------------------------------- 失敗ã—ãŸã¨ãã« PDB (Python デãƒãƒƒã‚¬ãƒ¼) ã‚’èµ·å‹• --------------------------------------------- .. _PDB: http://docs.python.org/library/pdb.html .. Python comes with a builtin Python debugger called PDB_. ``py.test`` allows one to drop into the PDB prompt via a command line option:: Python ã«ã¯ PDB_ ã¨ã„ã†çµ„ã¿è¾¼ã¿ã® Python デãƒãƒƒã‚¬ãƒ¼ãŒä»˜å±žã—ã¦ã„ã¾ã™ã€‚ ``py.test`` ã¯ã€ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ション㧠PDB プロンプトを起動ã§ãã¾ã™:: py.test --pdb .. This will invoke the Python debugger on every failure. Often you might only want to do this for the first failing test to understand a certain failure situation:: ã“ã®ã‚ªãƒ—ションã¯ã€ãƒ†ã‚¹ãƒˆãŒå¤±æ•—ã—ãŸã¨ãã« Python デãƒãƒƒã‚¬ãƒ¼ã‚’èµ·å‹•ã—ã¾ã™ã€‚多ãã®å ´åˆã€ç‰¹å®šã®ã‚¨ãƒ©ãƒ¼çжæ³ã‚’把æ¡ã™ã‚‹ã®ã«æœ€åˆã«å¤±æ•—ã—ãŸã¨ãã®ã¿ãƒ‡ãƒãƒƒã‚¬ãƒ¼ã‚’èµ·å‹•ã—ãŸã„ã¯ãšã§ã™:: py.test -x --pdb # 最åˆã«ãƒ†ã‚¹ãƒˆãŒå¤±æ•—ã—ãŸã¨ãã« PDB ã‚’èµ·å‹•ã—ã¦ãƒ†ã‚¹ãƒˆã‚»ãƒƒã‚·ãƒ§ãƒ³ã‚’終了 py.test --pdb --maxfail=3 # 最åˆã® 3 回ã®å¤±æ•—ã«å¯¾ã—㦠PDB ã‚’èµ·å‹• .. Setting a breakpoint / aka ``set_trace()`` ---------------------------------------------------- ``set_trace()`` ã¨ã„ã†ãƒ–レークãƒã‚¤ãƒ³ãƒˆã®è¨­å®š -------------------------------------------- .. If you want to set a breakpoint and enter the ``pdb.set_trace()`` you can use a helper:: ブレークãƒã‚¤ãƒ³ãƒˆã‚’設定ã—㦠``pdb.set_trace()`` を行ã„ãŸã„ãªã‚‰ã€ãƒ˜ãƒ«ãƒ‘ー関数を使ãˆã¾ã™:: import pytest def test_function(): ... pytest.set_trace() # PDB デãƒãƒƒã‚¬ãƒ¼ã‚’èµ·å‹•ã—ã¦ãƒˆãƒ¬ãƒ¼ã‚¹ã™ã‚‹ .. versionadded: 2.0.0 .. In previous versions you could only enter PDB tracing if you disabled capturing on the command line via ``py.test -s``. 以å‰ã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã§ã¯ã€ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‹ã‚‰ ``py.test -s`` ã§æ¨™æº–出力ã®å–得を無効ã«ã—ãŸå ´åˆã®ã¿ã€PDB トレースãŒå¯èƒ½ã§ã—ãŸã€‚ .. _durations: テストã®å®Ÿè¡Œæ™‚間をプロファイリング ---------------------------------- .. Profiling test execution duration ------------------------------------- .. versionadded: 2.2 .. To get a list of the slowest 10 test durations:: 最もé…ã„ 10 個ã®ãƒ†ã‚¹ãƒˆä¸€è¦§ã‚’å–å¾—ã™ã‚‹ã«ã¯ã€æ¬¡ã®ã‚ˆã†ã«ã—ã¾ã™:: py.test --durations=10 .. Creating JUnitXML format files ---------------------------------------------------- JUnitXML å½¢å¼ã®ãƒ•ã‚¡ã‚¤ãƒ«ä½œæˆ --------------------------- .. To create result files which can be read by Hudson_ or other Continuous integration servers, use this invocation:: Hudson_ ã‚„ãã®ä»–ã®ç¶™ç¶šçš„インテグレーションサーãƒãƒ¼ã§èª­ã¿è¾¼ã‚ã‚‹çµæžœãƒ•ァイルを作æˆã™ã‚‹ã«ã¯ã€æ¬¡ã®ã‚ˆã†ã«å®Ÿè¡Œã—ã¾ã™:: py.test --junitxml=path .. to create an XML file at ``path``. ``path`` ã« XML ファイルãŒä½œæˆã•れã¾ã™ã€‚ .. Creating resultlog format files ---------------------------------------------------- resultlog å½¢å¼ã®ãƒ•ã‚¡ã‚¤ãƒ«ä½œæˆ ---------------------------- .. To create plain-text machine-readable result files you can issue:: コンピューターãŒèª­ã‚る平文ã®çµæžœãƒ•ァイルを作æˆã™ã‚‹ã«ã¯ã€æ¬¡ã®ã‚ˆã†ã«ã—ã¾ã™:: py.test --resultlog=path .. and look at the content at the ``path`` location. Such files are used e.g. by the `PyPy-test`_ web page to show test results over several revisions. ``path`` ã«ä½œæˆã•れãŸãƒ•ァイルãŒã‚りã¾ã™ã€‚ãã†ã„ã£ãŸãƒ•ァイルã¯ã€ä¾‹ãˆã° `PyPy-test`_ ã§è¤‡æ•°ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã§ã®ãƒ†ã‚¹ãƒˆçµæžœã‚’表示ã™ã‚‹ã®ã«ä½¿ã‚れã¾ã™ã€‚ .. _`PyPy-test`: http://codespeak.net:8099/summary .. Sending test report to pocoo pastebin service ----------------------------------------------------- pocoo pastbin サービスã«ãƒ†ã‚¹ãƒˆçµæžœã‚’投稿 ---------------------------------------- .. **Creating a URL for each test failure**:: **テストãŒå¤±æ•—ã™ã‚‹æ¯Žã« URL を作æˆã—ã¾ã™**:: py.test --pastebin=failed .. This will submit test run information to a remote Paste service and provide a URL for each failure. You may select tests as usual or add for example ``-x`` if you only want to send one particular failure. ã“れã¯ãƒªãƒ¢ãƒ¼ãƒˆã® Paste サービスã¸ãƒ†ã‚¹ãƒˆã®å®Ÿè¡Œæƒ…報を投稿ã—ã¦ã€å¤±æ•—ã—ãŸãƒ†ã‚¹ãƒˆæ¯Žã« URL ã‚’æä¾›ã—ã¾ã™ã€‚特定ã®ã‚¨ãƒ©ãƒ¼ã®ã¿ã‚’投稿ã—ãŸã„å ´åˆã€æ™®é€šã«ãƒ†ã‚¹ãƒˆã‚’é¸æŠžã™ã‚‹ã‹ã€ä¾‹ãˆã° ``-x`` を追加ã—ã¾ã™ã€‚ .. **Creating a URL for a whole test session log**:: **å…¨ã¦ã®ãƒ†ã‚¹ãƒˆã‚»ãƒƒã‚·ãƒ§ãƒ³ãƒ­ã‚°ã«å¯¾ã—ã¦1ã¤ã® URL を作æˆã—ã¾ã™**:: py.test --pastebin=all .. Currently only pasting to the http://paste.pocoo.org service is implemented. ã„ã¾ã®ã¨ã“ã‚ã¯ã€http://paste.pocoo.org サービスã¸ã®ãƒšãƒ¼ã‚¹ãƒˆã®ã¿ãŒå®Ÿè£…ã•れã¦ã„ã¾ã™ã€‚ .. Calling pytest from Python code ---------------------------------------------------- Python コードã‹ã‚‰ã® pytest 呼ã³å‡ºã— ----------------------------------- .. versionadded:: 2.0 .. You can invoke ``py.test`` from Python code directly:: Python コードã‹ã‚‰ç›´æŽ¥ ``py.test`` を呼ã³å‡ºã›ã¾ã™:: pytest.main() .. this acts as if you would call "py.test" from the command line. It will not raise ``SystemExit`` but return the exitcode instead. You can pass in options and arguments:: ã“れã¯ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‹ã‚‰ "py.test" を呼ã³å‡ºã™ã‚ˆã†ã«å‹•作ã—ã¾ã™ã€‚ ``SystemExit`` を発生ã•ã›ãªã„代ã‚りã«çµ‚了コードを返ã—ã¾ã™ã€‚次ã®ã‚ˆã†ã«ã‚ªãƒ—ションã¨å¼•数を渡ã—ã¾ã™:: pytest.main(['-x', 'mytestdir']) .. or pass in a string:: ã¾ãŸã¯ã€æ–‡å­—åˆ—ã§æ¸¡ã—ã¾ã™:: pytest.main("-x mytestdir") .. You can specify additional plugins to ``pytest.main``:: ``pytest.main`` ã«è¿½åŠ ã®ãƒ—ラグインを指定ã§ãã¾ã™:: # myinvoke.py ã®å†…容 import pytest class MyPlugin: def pytest_sessionfinish(self): print("*** test run reporting finishing") pytest.main("-qq", plugins=[MyPlugin()]) .. Running it will show that ``MyPlugin`` was added and its hook was invoked:: ã“ã®ã‚³ãƒ¼ãƒ‰ã‚’実行ã™ã‚‹ã¨ ``MyPlugin`` ãŒè¿½åŠ ã•れã€ãã®ãƒ•ックãŒå®Ÿè¡Œã•れãŸã“ã¨ã‚’表示ã—ã¾ã™:: $ python myinvoke.py collecting ... collected 0 items in 0.00 seconds *** test run reporting finishing .. include:: links.inc pytest-2.5.1/doc/ja/goodpractises.txt0000664000175000017500000003321412254002202017175 0ustar hpkhpk00000000000000 .. highlightlang:: python .. _`goodpractises`: 優れãŸã‚¤ãƒ³ãƒ†ã‚°ãƒ¬ãƒ¼ã‚·ãƒ§ãƒ³ãƒ—ラクティス ==================================== .. Good Integration Practises ================================================= .. Work with virtual environments ----------------------------------------------------------- 仮想環境ã§ã®ä½œæ¥­ ---------------- .. We recommend to use virtualenv_ environments and use easy_install_ (or pip_) for installing your application dependencies as well as the ``pytest`` package itself. This way you will get a much more reproducible environment. A good tool to help you automate test runs against multiple dependency configurations or Python interpreters is `tox`_. virtualenv_ 環境を構築ã—ã¦ã€ ``pytest`` パッケージã¨ãã®ä»–ã«ä¾å­˜ã™ã‚‹ã‚¢ãƒ—リケーションをインストールã™ã‚‹ã®ã« easy_install_ (ã¾ãŸã¯ pip_) を使ã†ã“ã¨ã‚’ãŠå¥¨ã‚ã—ã¾ã™ã€‚ `tox`_ ã¨ã„ã†ã€è¤‡æ•°ã®ä¾å­˜è¨­å®šã‚„ Python インタープリターã«å¯¾ã—ã¦è‡ªå‹•çš„ã«ãƒ†ã‚¹ãƒˆã‚’実行ã™ã‚‹ä¾¿åˆ©ãªãƒ„ールãŒã‚りã¾ã™ã€‚ .. _`virtualenv`: http://pypi.python.org/pypi/virtualenv .. _`buildout`: http://www.buildout.org/ .. _pip: http://pypi.python.org/pypi/pip .. Use tox and Continuous Integration servers ------------------------------------------------- tox ã¨ç¶™ç¶šçš„インテグレーションサーãƒãƒ¼ã®åˆ©ç”¨ -------------------------------------------- .. If you frequently release code to the public you may want to look into `tox`_, the virtualenv test automation tool and its `pytest support `_. The basic idea is to generate a JUnitXML file through the ``--junitxml=PATH`` option and have a continuous integration server like Jenkins_ pick it up and generate reports. ã‚‚ã—é »ç¹ã«ã‚³ãƒ¼ãƒ‰ã‚’一般å‘ã‘ã«ãƒªãƒªãƒ¼ã‚¹ã™ã‚‹ãªã‚‰ã€virtualenv ã®ãƒ†ã‚¹ãƒˆè‡ªå‹•化ã¨ãã® `pytest サãƒãƒ¼ãƒˆ `_ を行ㆠ`tox`_ を調ã¹ã¦ã¿ãŸããªã‚‹ã§ã—ょã†ã€‚基本的ãªè€ƒãˆæ–¹ã¯ã€ ``--junitxml=PATH`` オプションã«ã‚ˆã‚Š JUnitXML ファイルを生æˆã—ã¾ã™ã€‚ãã—㦠Jenkins_ ã®ã‚ˆã†ãªç¶™ç¶šçš„インテグレーションサーãƒãƒ¼ãŒãã®ãƒ•ァイルをå–å¾—ã—ã¦ãƒ¬ãƒãƒ¼ãƒˆã‚’生æˆã—ã¾ã™ã€‚ .. _standalone: .. _`genscript method`: å˜ç‹¬å®Ÿè¡Œã§ãã‚‹ py.test スクリプトã®ä½œæˆ --------------------------------------- .. Create a py.test standalone script ------------------------------------------- .. If you are a maintainer or application developer and want others to easily run tests you can generate a completely standalone "py.test" script:: ã‚ãªãŸãŒãƒ¡ãƒ³ãƒ†ãƒŠãƒ¼ã¾ãŸã¯ã‚¢ãƒ—リケーション開発者ã§ã€ä»–ã®äººã«ã‚‚ç°¡å˜ã«ãƒ†ã‚¹ãƒˆã‚’実行ã•ã›ãŸã„ãªã‚‰ã€å˜ç‹¬ã§å®Ÿè¡Œã§ãã‚‹ "py.test" スクリプトを作æˆã§ãã¾ã™:: py.test --genscript=runtests.py .. generates a ``runtests.py`` script which is a fully functional basic ``py.test`` script, running unchanged under Python2 and Python3. You can tell people to download the script and then e.g. run it like this:: 基本的㫠``py.test`` スクリプトã¨å®Œå…¨ã«åŒæ©Ÿèƒ½ã‚’も㤠``runtests.py`` スクリプトを生æˆã—ã¾ã™ã€‚ã“ã®ã‚¹ã‚¯ãƒªãƒ—ト㯠Python2 㨠Python3 ã«ãŠã„ã¦ã‚‚修正ã›ãšå®Ÿè¡Œã§ãã¾ã™ã€‚ã“ã®ã‚¹ã‚¯ãƒªãƒ—トをダウンロードã—ã¦ã€ä¾‹ãˆã°ã€æ¬¡ã®ã‚ˆã†ã«å®Ÿè¡Œã—ã¦ãã ã•ã„ã¨ä¼ãˆã‚Œã°è‰¯ã„ã§ã™:: python runtests.py .. _`Distribute for installation`: http://pypi.python.org/pypi/distribute#installation-instructions .. _`distribute installation`: http://pypi.python.org/pypi/distribute ``python setup.py test`` ã«ã‚ˆã‚‹ distutils ã¨ã®é€£æº -------------------------------------------------- .. Integrating with distutils / ``python setup.py test`` -------------------------------------------------------- .. You can integrate test runs into your distutils or setuptools based project. Use the `genscript method`_ to generate a standalone py.test script:: プロジェクトベース㮠distutils ã¾ãŸã¯ setuptools ã§ãƒ†ã‚¹ãƒˆå®Ÿè¡Œã‚’連æºã§ãã¾ã™ã€‚å˜ç‹¬ã§å®Ÿè¡Œã§ãã‚‹ py.test スクリプトを生æˆã™ã‚‹ã«ã¯ :ref:`genscript メソッド ` を使ã£ã¦ãã ã•ã„:: py.test --genscript=runtests.py .. and make this script part of your distribution and then add this to your ``setup.py`` file:: ã“ã®ã‚¹ã‚¯ãƒªãƒ—トをé…布物ã®ä¸€éƒ¨ã«ã—㦠``setup.py`` ãƒ•ã‚¡ã‚¤ãƒ«ã«æ¬¡ã®ã‚³ãƒ¼ãƒ‰ã‚’追加ã—ã¾ã™:: from distutils.core import setup, Command # setuptools ã‹ã‚‰ã‚‚インãƒãƒ¼ãƒˆã§ãã¾ã™ class PyTest(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): import sys,subprocess errno = subprocess.call([sys.executable, 'runtest.py']) raise SystemExit(errno) setup( #..., cmdclass = {'test': PyTest}, #..., ) .. If you now type:: ã“ã“ã§æ¬¡ã®ã‚ˆã†ã«å®Ÿè¡Œã—ã¾ã™:: python setup.py test .. this will execute your tests using ``runtest.py``. As this is a standalone version of ``py.test`` no prior installation whatsoever is required for calling the test command. You can also pass additional arguments to the subprocess-calls such as your test directory or other options. ã“れ㯠``runtest.py`` を使ã£ã¦ãƒ†ã‚¹ãƒˆã‚’実行ã—ã¾ã™ã€‚ã“ã®ã‚ˆã†ã«ã€å˜ç‹¬ã§å®Ÿè¡Œã§ãã‚‹ ``py.test`` スクリプトã¯ã€ãã®ãƒ†ã‚¹ãƒˆã‚³ãƒžãƒ³ãƒ‰ã‚’呼ã³å‡ºã™ãŸã‚ã«ä¾å­˜ãƒ‘ッケージをインストールã™ã‚‹å¿…è¦ãŒã‚りã¾ã›ã‚“。ã•らã«ãƒ†ã‚¹ãƒˆãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã‚„ãã®ä»–ã®ã‚ªãƒ—ションãªã©ã€subprocess.call ã«è¿½åŠ ã®å¼•æ•°ã¨ã—ã¦æ¸¡ã›ã¾ã™ã€‚ .. _`test discovery`: .. _`Python test discovery`: setuptools/distribute ã®ãƒ†ã‚¹ãƒˆã‚³ãƒžãƒ³ãƒ‰ã¨ã®çµ„ã¿åˆã‚ã› ---------------------------------------------------- .. Integration with setuptools/distribute test commands ---------------------------------------------------- .. Distribute/Setuptools support test requirements, which means its really easy to extend its test command to support running a pytest from test requirements:: setuptools/distribute ã¯ã€ãƒ†ã‚¹ãƒˆã«å¿…è¦ãªãƒ‘ッケージè¦ä»¶ã‹ã‚‰ pytest を実行ã™ã‚‹ãƒ†ã‚¹ãƒˆã‚³ãƒžãƒ³ãƒ‰ã‚’ã¨ã¦ã‚‚ç°¡å˜ã«æ‹¡å¼µã§ãã‚‹ tests_require ã«å¯¾å¿œã—ã¦ã„ã¾ã™:: from setuptools.command.test import test as TestCommand class PyTest(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): # 外部㧠egg を読ã¿è¾¼ã¾ã›ãŸããªã„ãªã‚‰ã“ã“ã§ã‚¤ãƒ³ãƒãƒ¼ãƒˆã—ã¦ãã ã•ã„ import pytest pytest.main(self.test_args) setup( #..., tests_require=['pytest'], cmdclass = {'test': pytest}, ) .. Now if you run:: ã“ã“ã§æ¬¡ã®ã‚ˆã†ã«å®Ÿè¡Œã—ã¾ã™:: python setup.py test .. this will download py.test if needed and then run py.test as you would expect it to. å¿…è¦ã«å¿œã˜ã¦ py.test をダウンロードã—ã¦ã‹ã‚‰ã€æœŸå¾…ã—ãŸé€šã‚Šã« py.test を実行ã—ã¾ã™ã€‚ .. Conventions for Python test discovery ------------------------------------------------- Python テスト探索ã®è¦ç´„ ----------------------- .. ``py.test`` implements the following standard test discovery: ``py.test`` ã¯æ¬¡ã®ãƒ†ã‚¹ãƒˆæŽ¢ç´¢æ¨™æº–を実装ã—ã¾ã™: .. * collection starts from the initial command line arguments which may be directories, filenames or test ids. * recurse into directories, unless they match :confval:`norecursedirs` * ``test_*.py`` or ``*_test.py`` files, imported by their `package name`_. * ``Test`` prefixed test classes (without an ``__init__`` method) * ``test_`` prefixed test functions or methods are test items * コレクションã¯ã€ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã€ãƒ•ァイルåã€ãƒ†ã‚¹ãƒˆ ID ã¨ã„ã£ãŸæœ€åˆã«ä¸ŽãˆãŸã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³å¼•æ•°ã‹ã‚‰é–‹å§‹ã™ã‚‹ * :confval:`norecursedirs` ã«ä¸€è‡´ã—ãªã„é™ã‚Šã€ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã‚’å†å¸°çš„ã«æŽ¢ç´¢ã™ã‚‹ * `package name`_ ã§ã‚¤ãƒ³ãƒãƒ¼ãƒˆã•れる ``test_*.py`` ã¾ãŸã¯ ``*_test.py`` ファイル * ``Test`` ã¨ã„ã†æŽ¥é ­è¾žã‚’ã‚‚ã¤ãƒ†ã‚¹ãƒˆã‚¯ãƒ©ã‚¹ (``__init__`` メソッドをもãŸãªã„) * ``test_`` ã¨ã„ã†æŽ¥é ­è¾žã‚’ã‚‚ã¤ãƒ†ã‚¹ãƒˆé–¢æ•°ã‚„メソッドãŒãƒ†ã‚¹ãƒˆé …ç›®ã«ãªã‚‹ .. For examples of how to customize your test discovery :doc:`example/pythoncollection`. テスト探索をカスタマイズã™ã‚‹æ–¹æ³•ã®ä¾‹ã¯ :doc:`example/pythoncollection` ã‚’å‚ç…§ã—ã¦ãã ã•ã„。 .. Within Python modules, py.test also discovers tests using the standard :ref:`unittest.TestCase ` subclassing technique. Python モジュール内ã§ã¯ã€py.test も標準ライブラリ㮠:ref:`unittest.TestCase ` ã®ã‚µãƒ–クラス化を使ã£ã¦ãƒ†ã‚¹ãƒˆã‚’探索ã—ã¾ã™ã€‚ .. Choosing a test layout / import rules ------------------------------------------ ãƒ†ã‚¹ãƒˆãƒ¬ã‚¤ã‚¢ã‚¦ãƒˆé¸æŠžã¨ã‚¤ãƒ³ãƒãƒ¼ãƒˆãƒ«ãƒ¼ãƒ« -------------------------------------- .. py.test supports common test layouts: py.test ã¯ä¸€èˆ¬çš„ãªãƒ†ã‚¹ãƒˆãƒ¬ã‚¤ã‚¢ã‚¦ãƒˆã«å¯¾å¿œã—ã¦ã„ã¾ã™: .. * inlining test directories into your application package, useful if you want to keep (unit) tests and actually tested code close together:: * アプリケーション内㫠test ディレクトリをé…ç½®ã—ã¦ã„ã¾ã™ã€‚(ユニット) ãƒ†ã‚¹ãƒˆã‚’ä¿æŒã—ã¦å®Ÿéš›ã«ãƒ†ã‚¹ãƒˆã•れãŸã‚³ãƒ¼ãƒ‰ã‚’一緒ã«ã—ã¦ãŠãã®ã«å½¹ç«‹ã¡ã¾ã™:: mypkg/ __init__.py appmodule.py ... test/ test_app.py ... .. * putting tests into an extra directory outside your actual application code, useful if you have many functional tests or want to keep tests separate from actual application code:: * テストをアプリケーションコードã®å¤–部ã«é…ç½®ã—ã¦ã„ã¾ã™ã€‚多ãã®æ©Ÿèƒ½ãƒ†ã‚¹ãƒˆãŒã‚ã‚‹ã€ã¾ãŸã¯å®Ÿéš›ã®ã‚¢ãƒ—リケーションコードã‹ã‚‰ãƒ†ã‚¹ãƒˆã‚’分離ã—ã¦ä¿æŒã—ãŸã„ã¨ãã«å½¹ç«‹ã¡ã¾ã™:: mypkg/ __init__.py appmodule.py tests/ test_app.py ... .. In both cases you usually need to make sure that ``mypkg`` is importable, for example by using the setuptools ``python setup.py develop`` method. ã©ã¡ã‚‰ã®å ´åˆã‚‚ã€æ™®é€šã« ``mypkg`` ãŒã‚¤ãƒ³ãƒãƒ¼ãƒˆã§ãã‚‹ã“ã¨ã‚’ä¿è¨¼ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚例ãˆã°ã€setuptools ã® ``python setup.py develop`` メソッドを使ã„ã¾ã™ã€‚ .. You can run your tests by pointing to it:: 次ã®ã‚ˆã†ã«ãƒ†ã‚¹ãƒˆã‚’実行ã§ãã¾ã™:: py.test tests/test_app.py # 外部ã®ãƒ†ã‚¹ãƒˆãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒª py.test mypkg/test/test_app.py # 内部ã®ãƒ†ã‚¹ãƒˆãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒª py.test mypkg # テストディレクトリé…下ã«ã‚ã‚‹å…¨ã¦ã®ãƒ†ã‚¹ãƒˆã‚’実行 py.test # カレントディテクリé…下ã«ã‚ã‚‹å…¨ã¦ã®ãƒ†ã‚¹ãƒˆã‚’実行 ... .. _`package name`: .. note:: py.test ãŒãƒ•ァイルシステムをå†å¸°çš„ã«è¾¿ã£ã¦ "a/b/test_module.py" テストファイルを検出ã™ã‚‹å ´åˆã€ã‚¤ãƒ³ãƒãƒ¼ãƒˆåを次ã®ã‚ˆã†ã«ã—ã¦æ±ºå®šã—ã¾ã™ã€‚ * ``basedir`` を検出ã™ã‚‹ -- ã“れ㯠``__init__.py`` ã‚’å«ã¾ãªã„最åˆã® "upward" (ルートã«å‘ã‹ã†) ディレクトリã§ã™ã€‚ ``a`` 㨠``b`` ã®ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªä¸¡æ–¹ã« ``__init__.py`` ã‚’å«ã‚€å ´åˆã€basedir 㯠``a`` ã®è¦ªãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã«ãªã‚Šã¾ã™ * テストモジュールを完全修飾インãƒãƒ¼ãƒˆåã§ã‚¤ãƒ³ãƒãƒ¼ãƒˆã§ãるよã†ã«ã™ã‚‹ãŸã‚ã« ``sys.path.insert(0, basedir)`` を実行ã—ã¾ã™ * パス区切り文字 ``/`` ã‚’ "." ã«å¤‰æ›ã™ã‚‹ã“ã¨ã§æ±ºã¾ã‚‹ ``import a.b.test_module`` を行ã†ã€ã¤ã¾ã‚Šã‚¤ãƒ³ãƒãƒ¼ãƒˆåã«ç›´æŽ¥ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã‚„ファイルåを対応付ã‘ã‚‹è¦ç´„ã«å¾“ã‚ãªã„ã¨ã„ã‘ã¾ã›ã‚“ ã“ã®å°‘ã—進化ã—ãŸã‚¤ãƒ³ãƒãƒ¼ãƒˆãƒ†ã‚¯ãƒ‹ãƒƒã‚¯ã‚’使ã†ç†ç”±ã¯ã€å·¨å¤§ãªãƒ—ロジェクトã§ã¯è¤‡æ•°ã®ãƒ†ã‚¹ãƒˆãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ãŒãŠäº’ã„ã«ã‚¤ãƒ³ãƒãƒ¼ãƒˆã™ã‚‹å¯èƒ½æ€§ãŒã‚ã‚‹ã‹ã‚‰ã§ã™ã€‚ãã—ã¦ã€ã“ã®ã‚ˆã†ã«å°Žå‡ºã•れãŸã‚¤ãƒ³ãƒãƒ¼ãƒˆåã®æ¨™æº–化ã¯ã€ãƒ†ã‚¹ãƒˆãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã‚’2回インãƒãƒ¼ãƒˆã—ã¦ã—ã¾ã£ã¦é©šã‹ãªã„よã†ã«ã™ã‚‹ã®ã«å½¹ç«‹ã¡ã¾ã™ã€‚ .. If py.test finds a "a/b/test_module.py" test file while recursing into the filesystem it determines the import name as follows: * find ``basedir`` -- this is the first "upward" (towards the root) directory not containing an ``__init__.py``. If both the ``a`` and ``b`` directories contain an ``__init__.py`` the basedir will be the parent dir of ``a``. * perform ``sys.path.insert(0, basedir)`` to make the test module importable under the fully qualified import name. * ``import a.b.test_module`` where the path is determined by converting path separators ``/`` into "." characters. This means you must follow the convention of having directory and file names map directly to the import names. The reason for this somewhat evolved importing technique is that in larger projects multiple test modules might import from each other and thus deriving a canonical import name helps to avoid surprises such as a test modules getting imported twice. .. include:: links.inc pytest-2.5.1/doc/ja/test/0000775000175000017500000000000012254002202014542 5ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/test/attic.txt0000664000175000017500000001011512254002202016405 0ustar hpkhpk00000000000000=============================================== ATTIC documentation =============================================== XXX REVIEW and remove the below XXX Customizing the testing process =============================== writing conftest.py files ----------------------------------- You may put conftest.py files containing project-specific configuration in your project's root directory, it's usually best to put it just into the same directory level as your topmost ``__init__.py``. In fact, ``py.test`` performs an "upwards" search starting from the directory that you specify to be tested and will lookup configuration values right-to-left. You may have options that reside e.g. in your home directory but note that project specific settings will be considered first. There is a flag that helps you debugging your conftest.py configurations:: py.test --traceconfig customizing the collecting and running process ----------------------------------------------- To introduce different test items you can create one or more ``conftest.py`` files in your project. When the collection process traverses directories and modules the default collectors will produce custom Collectors and Items if they are found in a local ``conftest.py`` file. Customizing the collection process in a module ---------------------------------------------- If you have a module where you want to take responsibility for collecting your own test Items and possibly even for executing a test then you can provide `generative tests`_ that yield callables and possibly arguments as a tuple. This is especially useful for calling application test machinery with different parameter sets but counting each of the calls as a separate tests. .. _`generative tests`: features.html#generative-tests The other extension possibility is about specifying a custom test ``Item`` class which is responsible for setting up and executing an underlying test. Or you can extend the collection process for a whole directory tree by putting Items in a ``conftest.py`` configuration file. The collection process dynamically consults the *chain of conftest.py* modules to determine collectors and items at ``Directory``, ``Module``, ``Class``, ``Function`` or ``Generator`` level respectively. Customizing execution of Items and Functions ---------------------------------------------------- - ``pytest.Function`` test items control execution of a test function through its ``function.runtest()`` method. This method is responsible for performing setup and teardown ("Test Fixtures") for a test Function. - ``Function.execute(target, *args)`` methods are invoked by the default ``Function.run()`` to actually execute a python function with the given (usually empty set of) arguments. .. _`py-dev mailing list`: http://codespeak.net/mailman/listinfo/py-dev .. _`test generators`: funcargs.html#test-generators .. _`generative tests`: generative tests: yielding parametrized tests ==================================================== Deprecated since 1.0 in favour of `test generators`_. *Generative tests* are test methods that are *generator functions* which ``yield`` callables and their arguments. This is useful for running a test function multiple times against different parameters. Example:: def test_generative(): for x in (42,17,49): yield check, x def check(arg): assert arg % 7 == 0 # second generated tests fails! Note that ``test_generative()`` will cause three tests to get run, notably ``check(42)``, ``check(17)`` and ``check(49)`` of which the middle one will obviously fail. To make it easier to distinguish the generated tests it is possible to specify an explicit name for them, like for example:: def test_generative(): for x in (42,17,49): yield "case %d" % x, check, x disabling a test class ---------------------- If you want to disable a complete test class you can set the class-level attribute ``disabled``. For example, in order to avoid running some tests on Win32:: class TestPosixOnly: disabled = sys.platform == 'win32' def test_xxx(self): ... pytest-2.5.1/doc/ja/test/index.txt0000664000175000017500000000167512254002202016423 0ustar hpkhpk00000000000000======================================= py.test documentation index ======================================= features_: overview and discussion of features. quickstart_: getting started with writing a simple test. `talks, tutorials, examples`_: tutorial examples, slides funcargs_: powerful parametrized test function setup `plugins`_: list of available plugins with usage examples and feature details. customize_: configuration, customization, extensions changelog_: history of changes covering last releases **Continuous Integration of py.test's own tests and plugins with Hudson**: `http://hudson.testrun.org/view/pytest`_ .. _`http://hudson.testrun.org/view/pytest`: http://hudson.testrun.org/view/pytest/ .. _changelog: ../changelog.html .. _`plugins`: plugin/index.html .. _`talks, tutorials, examples`: talks.html .. _quickstart: quickstart.html .. _features: features.html .. _funcargs: funcargs.html .. _customize: customize.html pytest-2.5.1/doc/ja/test/plugin/0000775000175000017500000000000012254002202016040 5ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/test/plugin/terminal.txt0000664000175000017500000000206612254002202020420 0ustar hpkhpk00000000000000 Implements terminal reporting of the full testing process. ========================================================== .. contents:: :local: This is a good source for looking at the various reporting hooks. command line options -------------------- ``-v, --verbose`` increase verbosity. ``-r chars`` show extra test summary info as specified by chars (f)ailed, (s)skipped, (x)failed, (X)passed. ``-l, --showlocals`` show locals in tracebacks (disabled by default). ``--report=opts`` (deprecated, use -r) ``--tb=style`` traceback print mode (long/short/line/no). ``--fulltrace`` don't cut any tracebacks (default is to cut). ``--fixtures`` show available function arguments, sorted by plugin Start improving this plugin in 30 seconds ========================================= 1. Download `pytest_terminal.py`_ plugin source code 2. put it somewhere as ``pytest_terminal.py`` into your import path 3. a subsequent ``py.test`` run will use your local version Checkout customize_, other plugins_ or `get in contact`_. .. include:: links.txt pytest-2.5.1/doc/ja/test/plugin/helpconfig.txt0000664000175000017500000000165112254002202020722 0ustar hpkhpk00000000000000 provide version info, conftest/environment config names. ======================================================== .. contents:: :local: command line options -------------------- ``--version`` display py lib version and import information. ``-p name`` early-load given plugin (multi-allowed). ``--traceconfig`` trace considerations of conftest.py files. ``--nomagic`` don't reinterpret asserts, no traceback cutting. ``--debug`` generate and show internal debugging information. ``--help-config`` show available conftest.py and ENV-variable names. Start improving this plugin in 30 seconds ========================================= 1. Download `pytest_helpconfig.py`_ plugin source code 2. put it somewhere as ``pytest_helpconfig.py`` into your import path 3. a subsequent ``py.test`` run will use your local version Checkout customize_, other plugins_ or `get in contact`_. .. include:: links.txt pytest-2.5.1/doc/ja/test/plugin/genscript.txt0000664000175000017500000000122512254002202020577 0ustar hpkhpk00000000000000 generate standalone test script to be distributed along with an application. ============================================================================ .. contents:: :local: command line options -------------------- ``--genscript=path`` create standalone py.test script at given target path. Start improving this plugin in 30 seconds ========================================= 1. Download `pytest_genscript.py`_ plugin source code 2. put it somewhere as ``pytest_genscript.py`` into your import path 3. a subsequent ``py.test`` run will use your local version Checkout customize_, other plugins_ or `get in contact`_. .. include:: links.txt pytest-2.5.1/doc/ja/test/plugin/index.txt0000664000175000017500000000410612254002202017711 0ustar hpkhpk00000000000000 advanced python testing ======================= skipping_ advanced skipping for python test functions, classes or modules. mark_ generic mechanism for marking python functions. pdb_ interactive debugging with the Python Debugger. figleaf_ (external) report test coverage using the 'figleaf' package. monkeypatch_ safely patch object attributes, dicts and environment variables. coverage_ (external) Write and report coverage data with the 'coverage' package. cov_ (external) produce code coverage reports using the 'coverage' package, including support for distributed testing. capture_ configurable per-test stdout/stderr capturing mechanisms. capturelog_ (external) capture output of logging module. recwarn_ helpers for asserting deprecation and other warnings. tmpdir_ provide temporary directories to test functions. distributed testing, CI and deployment ====================================== xdist_ (external) loop on failing tests, distribute test runs to CPUs and hosts. pastebin_ submit failure or test session information to a pastebin service. junitxml_ logging of test results in JUnit-XML format, for use with Hudson resultlog_ non-xml machine-readable logging of test results. genscript_ generate standalone test script to be distributed along with an application. testing domains and conventions codecheckers ============================================ oejskit_ (external) run javascript tests in real life browsers django_ (external) for testing django applications unittest_ automatically discover and run traditional "unittest.py" style tests. nose_ nose-compatibility plugin: allow to run nose test suites natively. doctest_ collect and execute doctests from modules and test files. restdoc_ perform ReST syntax, local and remote reference tests on .rst/.txt files. internal, debugging, help functionality ======================================= helpconfig_ provide version info, conftest/environment config names. terminal_ Implements terminal reporting of the full testing process. hooklog_ log invocations of extension hooks to a file. .. include:: links.txt pytest-2.5.1/doc/ja/test/plugin/figleaf.txt0000664000175000017500000000165212254002202020202 0ustar hpkhpk00000000000000 report test coverage using the 'figleaf' package. ================================================= .. contents:: :local: Install --------------- To install the plugin issue:: easy_install pytest-figleaf # or pip install pytest-figleaf and if you are using pip you can also uninstall:: pip uninstall pytest-figleaf Usage --------------- After installation you can simply type:: py.test --figleaf [...] to enable figleaf coverage in your test run. A default ".figleaf" data file and "html" directory will be created. You can use command line options to control where data and html files are created. command line options -------------------- ``--figleaf`` trace python coverage with figleaf and write HTML for files below the current working dir ``--fig-data=dir`` set tracing file, default: ".figleaf". ``--fig-html=dir`` set html reporting dir, default "html". .. include:: links.txt pytest-2.5.1/doc/ja/test/plugin/nose.txt0000664000175000017500000000255112254002202017550 0ustar hpkhpk00000000000000 nose-compatibility plugin: allow to run nose test suites natively. ================================================================== .. contents:: :local: This is an experimental plugin for allowing to run tests written in 'nosetests style with py.test. Usage ------------- type:: py.test # instead of 'nosetests' and you should be able to run nose style tests and at the same time can make full use of py.test's capabilities. Supported nose Idioms ---------------------- * setup and teardown at module/class/method level * SkipTest exceptions and markers * setup/teardown decorators * yield-based tests and their setup * general usage of nose utilities Unsupported idioms / issues ---------------------------------- - nose-style doctests are not collected and executed correctly, also fixtures don't work. - no nose-configuration is recognized If you find other issues or have suggestions please run:: py.test --pastebin=all and send the resulting URL to a py.test contact channel, at best to the mailing list. Start improving this plugin in 30 seconds ========================================= 1. Download `pytest_nose.py`_ plugin source code 2. put it somewhere as ``pytest_nose.py`` into your import path 3. a subsequent ``py.test`` run will use your local version Checkout customize_, other plugins_ or `get in contact`_. .. include:: links.txt pytest-2.5.1/doc/ja/test/plugin/xdist.txt0000664000175000017500000001312412254002202017735 0ustar hpkhpk00000000000000 loop on failing tests, distribute test runs to CPUs and hosts. ============================================================== .. contents:: :local: The `pytest-xdist`_ plugin extends py.test with some unique test execution modes: * Looponfail: run your tests repeatedly in a subprocess. After each run py.test waits until a file in your project changes and then re-runs the previously failing tests. This is repeated until all tests pass after which again a full run is performed. * Load-balancing: if you have multiple CPUs or hosts you can use those for a combined test run. This allows to speed up development or to use special resources of remote machines. * Multi-Platform coverage: you can specify different Python interpreters or different platforms and run tests in parallel on all of them. Before running tests remotely, ``py.test`` efficiently synchronizes your program source code to the remote place. All test results are reported back and displayed to your local test session. You may specify different Python versions and interpreters. .. _`pytest-xdist`: http://pypi.python.org/pypi/pytest-xdist Usage examples --------------------- Speed up test runs by sending tests to multiple CPUs +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ To send tests to multiple CPUs, type:: py.test -n NUM Especially for longer running tests or tests requiring a lot of IO this can lead to considerable speed ups. Running tests in a Python subprocess +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ To instantiate a python2.4 sub process and send tests to it, you may type:: py.test -d --tx popen//python=python2.4 This will start a subprocess which is run with the "python2.4" Python interpreter, found in your system binary lookup path. If you prefix the --tx option value like this:: --tx 3*popen//python=python2.4 then three subprocesses would be created and tests will be load-balanced across these three processes. Sending tests to remote SSH accounts +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Suppose you have a package ``mypkg`` which contains some tests that you can successfully run locally. And you have a ssh-reachable machine ``myhost``. Then you can ad-hoc distribute your tests by typing:: py.test -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg This will synchronize your ``mypkg`` package directory to an remote ssh account and then locally collect tests and send them to remote places for execution. You can specify multiple ``--rsyncdir`` directories to be sent to the remote side. **NOTE:** For py.test to collect and send tests correctly you not only need to make sure all code and tests directories are rsynced, but that any test (sub) directory also has an ``__init__.py`` file because internally py.test references tests as a fully qualified python module path. **You will otherwise get strange errors** during setup of the remote side. Sending tests to remote Socket Servers +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Download the single-module `socketserver.py`_ Python program and run it like this:: python socketserver.py It will tell you that it starts listening on the default port. You can now on your home machine specify this new socket host with something like this:: py.test -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg .. _`atonce`: Running tests on many platforms at once +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ The basic command to run tests on multiple platforms is:: py.test --dist=each --tx=spec1 --tx=spec2 If you specify a windows host, an OSX host and a Linux environment this command will send each tests to all platforms - and report back failures from all platforms at once. The specifications strings use the `xspec syntax`_. .. _`xspec syntax`: http://codespeak.net/execnet/trunk/basics.html#xspec .. _`socketserver.py`: http://codespeak.net/svn/py/dist/py/execnet/script/socketserver.py .. _`execnet`: http://codespeak.net/execnet Specifying test exec environments in a conftest.py +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Instead of specifying command line options, you can put options values in a ``conftest.py`` file like this:: option_tx = ['ssh=myhost//python=python2.5', 'popen//python=python2.5'] option_dist = True Any commandline ``--tx`` specifications will add to the list of available execution environments. Specifying "rsync" dirs in a conftest.py +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ In your ``mypkg/conftest.py`` you may specify directories to synchronise or to exclude:: rsyncdirs = ['.', '../plugins'] rsyncignore = ['_cache'] These directory specifications are relative to the directory where the ``conftest.py`` is found. command line options -------------------- ``-f, --looponfail`` run tests in subprocess, wait for modified files and re-run failing test set until all pass. ``-n numprocesses`` shortcut for '--dist=load --tx=NUM*popen' ``--boxed`` box each test run in a separate process (unix) ``--dist=distmode`` set mode for distributing tests to exec environments. each: send each test to each available environment. load: send each test to available environment. (default) no: run tests inprocess, don't distribute. ``--tx=xspec`` add a test execution environment. some examples: --tx popen//python=python2.5 --tx socket=192.168.1.102:8888 --tx ssh=user@codespeak.net//chdir=testcache ``-d`` load-balance tests. shortcut for '--dist=load' ``--rsyncdir=dir1`` add directory for rsyncing to remote tx nodes. .. include:: links.txt pytest-2.5.1/doc/ja/test/plugin/links.txt0000664000175000017500000000520412254002202017722 0ustar hpkhpk00000000000000.. _`helpconfig`: helpconfig.html .. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_recwarn.py .. _`unittest`: unittest.html .. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_monkeypatch.py .. _`pytest_genscript.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_genscript.py .. _`pastebin`: pastebin.html .. _`skipping`: skipping.html .. _`genscript`: genscript.html .. _`plugins`: index.html .. _`mark`: mark.html .. _`tmpdir`: tmpdir.html .. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_doctest.py .. _`capture`: capture.html .. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_nose.py .. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_restdoc.py .. _`restdoc`: restdoc.html .. _`xdist`: xdist.html .. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_pastebin.py .. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_tmpdir.py .. _`terminal`: terminal.html .. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_hooklog.py .. _`capturelog`: capturelog.html .. _`junitxml`: junitxml.html .. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_skipping.py .. _`checkout the py.test development version`: ../../install.html#checkout .. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_helpconfig.py .. _`oejskit`: oejskit.html .. _`doctest`: doctest.html .. _`pytest_mark.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_mark.py .. _`get in contact`: ../../contact.html .. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_capture.py .. _`figleaf`: figleaf.html .. _`customize`: ../customize.html .. _`hooklog`: hooklog.html .. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_terminal.py .. _`recwarn`: recwarn.html .. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_pdb.py .. _`monkeypatch`: monkeypatch.html .. _`coverage`: coverage.html .. _`resultlog`: resultlog.html .. _`cov`: cov.html .. _`pytest_junitxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_junitxml.py .. _`django`: django.html .. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_unittest.py .. _`nose`: nose.html .. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_resultlog.py .. _`pdb`: pdb.html pytest-2.5.1/doc/ja/test/plugin/django.txt0000664000175000017500000000050312254002202020041 0ustar hpkhpk00000000000000pytest_django plugin (EXTERNAL) ========================================== pytest_django is a plugin for py.test that provides a set of useful tools for testing Django applications, checkout Ben Firshman's `pytest_django github page`_. .. _`pytest_django github page`: http://github.com/bfirsh/pytest_django/tree/master pytest-2.5.1/doc/ja/test/plugin/oejskit.txt0000664000175000017500000000153412254002202020254 0ustar hpkhpk00000000000000pytest_oejskit plugin (EXTERNAL) ========================================== The `oejskit`_ offers a py.test plugin for running Javascript tests in life browsers. Running inside the browsers comes with some speed cost, on the other hand it means for example the code is tested against the real-word DOM implementations. The approach enables to write integration tests such that the JavaScript code is tested against server-side Python code mocked as necessary. Any server-side framework that can already be exposed through WSGI (or for which a subset of WSGI can be written to accommodate the jskit own needs) can play along. For more info and download please visit the `oejskit PyPI`_ page. .. _`oejskit`: .. _`oejskit PyPI`: http://pypi.python.org/pypi/oejskit .. source link 'http://bitbucket.org/pedronis/js-infrastructure/src/tip/pytest_jstests.py', pytest-2.5.1/doc/ja/test/plugin/cov.txt0000664000175000017500000001726512254002202017403 0ustar hpkhpk00000000000000 produce code coverage reports using the 'coverage' package, including support for distributed testing. ====================================================================================================== .. contents:: :local: This plugin produces coverage reports. It supports centralised testing and distributed testing in both load and each modes. It also supports coverage of subprocesses. All features offered by the coverage package should be available, either through pytest-cov or through coverage's config file. Installation ------------ The `pytest-cov`_ package may be installed with pip or easy_install:: pip install pytest-cov easy_install pytest-cov .. _`pytest-cov`: http://pypi.python.org/pypi/pytest-cov/ Uninstallation -------------- Uninstalling packages is supported by pip:: pip uninstall pytest-cov However easy_install does not provide an uninstall facility. .. IMPORTANT:: Ensure that you manually delete the init_covmain.pth file in your site-packages directory. This file starts coverage collection of subprocesses if appropriate during site initialization at python startup. Usage ----- Centralised Testing ~~~~~~~~~~~~~~~~~~~ Centralised testing will report on the combined coverage of the main process and all of it's subprocesses. Running centralised testing:: py.test --cov myproj tests/ Shows a terminal report:: -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- Name Stmts Miss Cover ---------------------------------------- myproj/__init__ 2 0 100% myproj/myproj 257 13 94% myproj/feature4286 94 7 92% ---------------------------------------- TOTAL 353 20 94% Distributed Testing: Load ~~~~~~~~~~~~~~~~~~~~~~~~~ Distributed testing with dist mode set to load will report on the combined coverage of all slaves. The slaves may be spread out over any number of hosts and each slave may be located anywhere on the file system. Each slave will have it's subprocesses measured. Running distributed testing with dist mode set to load:: py.test --cov myproj -n 2 tests/ Shows a terminal report:: -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- Name Stmts Miss Cover ---------------------------------------- myproj/__init__ 2 0 100% myproj/myproj 257 13 94% myproj/feature4286 94 7 92% ---------------------------------------- TOTAL 353 20 94% Again but spread over different hosts and different directories:: py.test --cov myproj --dist load --tx ssh=memedough@host1//chdir=testenv1 --tx ssh=memedough@host2//chdir=/tmp/testenv2//python=/tmp/env1/bin/python --rsyncdir myproj --rsyncdir tests --rsync examples tests/ Shows a terminal report:: -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- Name Stmts Miss Cover ---------------------------------------- myproj/__init__ 2 0 100% myproj/myproj 257 13 94% myproj/feature4286 94 7 92% ---------------------------------------- TOTAL 353 20 94% Distributed Testing: Each ~~~~~~~~~~~~~~~~~~~~~~~~~ Distributed testing with dist mode set to each will report on the combined coverage of all slaves. Since each slave is running all tests this allows generating a combined coverage report for multiple environments. Running distributed testing with dist mode set to each:: py.test --cov myproj --dist each --tx popen//chdir=/tmp/testenv3//python=/usr/local/python27/bin/python --tx ssh=memedough@host2//chdir=/tmp/testenv4//python=/tmp/env2/bin/python --rsyncdir myproj --rsyncdir tests --rsync examples tests/ Shows a terminal report:: ---------------------------------------- coverage ---------------------------------------- platform linux2, python 2.6.5-final-0 platform linux2, python 2.7.0-final-0 Name Stmts Miss Cover ---------------------------------------- myproj/__init__ 2 0 100% myproj/myproj 257 13 94% myproj/feature4286 94 7 92% ---------------------------------------- TOTAL 353 20 94% Reporting --------- It is possible to generate any combination of the reports for a single test run. The available reports are terminal (with or without missing line numbers shown), HTML, XML and annotated source code. The terminal report without line numbers (default):: py.test --cov-report term --cov myproj tests/ -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- Name Stmts Miss Cover ---------------------------------------- myproj/__init__ 2 0 100% myproj/myproj 257 13 94% myproj/feature4286 94 7 92% ---------------------------------------- TOTAL 353 20 94% The terminal report with line numbers:: py.test --cov-report term-missing --cov myproj tests/ -------------------- coverage: platform linux2, python 2.6.4-final-0 --------------------- Name Stmts Miss Cover Missing -------------------------------------------------- myproj/__init__ 2 0 100% myproj/myproj 257 13 94% 24-26, 99, 149, 233-236, 297-298, 369-370 myproj/feature4286 94 7 92% 183-188, 197 -------------------------------------------------- TOTAL 353 20 94% The remaining three reports output to files without showing anything on the terminal (useful for when the output is going to a continuous integration server):: py.test --cov-report html --cov-report xml --cov-report annotate --cov myproj tests/ Coverage Data File ------------------ The data file is erased at the beginning of testing to ensure clean data for each test run. The data file is left at the end of testing so that it is possible to use normal coverage tools to examine it. Limitations ----------- For distributed testing the slaves must have the pytest-cov package installed. This is needed since the plugin must be registered through setuptools / distribute for pytest to start the plugin on the slave. For subprocess measurement environment variables must make it from the main process to the subprocess. The python used by the subprocess must have pytest-cov installed. The subprocess must do normal site initialization so that the environment variables can be detected and coverage started. Acknowledgments ---------------- Holger Krekel for pytest with its distributed testing support. Ned Batchelder for coverage and its ability to combine the coverage results of parallel runs. Whilst this plugin has been built fresh from the ground up to support distributed testing it has been influenced by the work done on pytest-coverage (Ross Lawley, James Mills, Holger Krekel) and nose-cover (Jason Pellerin) which are other coverage plugins for pytest and nose respectively. No doubt others have contributed to these tools as well. command line options -------------------- ``--cov=path`` measure coverage for filesystem path (multi-allowed) ``--cov-report=type`` type of report to generate: term, term-missing, annotate, html, xml (multi-allowed) ``--cov-config=path`` config file for coverage, default: .coveragerc .. include:: links.txt pytest-2.5.1/doc/ja/test/plugin/coverage.txt0000664000175000017500000000245412254002202020401 0ustar hpkhpk00000000000000 Write and report coverage data with the 'coverage' package. =========================================================== .. contents:: :local: Note: Original code by Ross Lawley. Install -------------- Use pip to (un)install:: pip install pytest-coverage pip uninstall pytest-coverage or alternatively use easy_install to install:: easy_install pytest-coverage Usage ------------- To get full test coverage reports for a particular package type:: py.test --cover-report=report command line options -------------------- ``--cover=COVERPACKAGES`` (multi allowed) only include info from specified package. ``--cover-report=REPORT_TYPE`` html: Directory for html output. report: Output a text report. annotate: Annotate your source code for which lines were executed and which were not. xml: Output an xml report compatible with the cobertura plugin for hudson. ``--cover-directory=DIRECTORY`` Directory for the reports (html / annotate results) defaults to ./coverage ``--cover-xml-file=XML_FILE`` File for the xml report defaults to ./coverage.xml ``--cover-show-missing`` Show missing files ``--cover-ignore-errors=IGNORE_ERRORS`` Ignore errors of finding source files for code. .. include:: links.txt pytest-2.5.1/doc/ja/test/mission.txt0000664000175000017500000000076112254002202016770 0ustar hpkhpk00000000000000 Mission ==================================== py.test strives to make testing a fun and no-boilerplate effort. The tool is distributed as part of the `py` package which contains supporting APIs that are also usable independently. The project independent ``py.test`` command line tool helps you to: * rapidly collect and run tests * run unit- or doctests, functional or integration tests * distribute tests to multiple environments * use local or global plugins for custom test types and setup pytest-2.5.1/doc/ja/overview.txt0000664000175000017500000000051112254002202016167 0ustar hpkhpk00000000000000.. ================================================== Getting started basics ================================================== ================ 基本ã‹ã‚‰å§‹ã‚よㆠ================ .. toctree:: :maxdepth: 2 index.txt getting-started.txt usage.txt goodpractises.txt projects.txt faq.txt pytest-2.5.1/doc/ja/nose.txt0000664000175000017500000000341712254002202015275 0ustar hpkhpk00000000000000.. Running tests written for nose ======================================= nose å‘ã‘ã«æ›¸ã‹ã‚ŒãŸãƒ†ã‚¹ãƒˆã®å®Ÿè¡Œ =============================== .. include:: links.inc .. py.test has basic support for running tests written for nose_. py.test ã¯ã€åŸºæœ¬çš„ã«ã¯ nose_ å‘ã‘ã«æ›¸ã‹ã‚ŒãŸãƒ†ã‚¹ãƒˆã®å®Ÿè¡Œã«å¯¾å¿œã—ã¦ã„ã¾ã™ã€‚ .. Usage ------------- 使用方法 -------- .. type:: å˜ç´”ã«:: py.test # 'nosetests' ã®ä»£ã‚りã«å®Ÿè¡Œ .. and you should be able to run your nose style tests and make use of py.test's capabilities. を実行ã—ã¾ã™ã€‚pytest ã®æ©Ÿèƒ½ã‚’使ã£ã¦ nose スタイルã®ãƒ†ã‚¹ãƒˆã‚’実行ã§ãã¾ã™ã€‚ .. Supported nose Idioms ---------------------- 対応ã—ã¦ã„ã‚‹ nose イディオム ---------------------------- .. * setup and teardown at module/class/method level * SkipTest exceptions and markers * setup/teardown decorators * yield-based tests and their setup * general usage of nose utilities * モジュールï¼ã‚¯ãƒ©ã‚¹ï¼ãƒ¡ã‚½ãƒƒãƒ‰ãƒ¬ãƒ™ãƒ«ã® setup 㨠teardown * SkipTest 例外ã¨ãƒžãƒ¼ã‚«ãƒ¼ * setup/teardown デコレーター * yield ベースã®ãƒ†ã‚¹ãƒˆã¨ãã®ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ— * nose ユーティリティã®ä¸€èˆ¬çš„ãªä½¿ç”¨æ–¹æ³• .. Unsupported idioms / known issues ---------------------------------- 対応ã—ã¦ã„ãªã„ã‚¤ãƒ‡ã‚£ã‚ªãƒ ã¨æ—¢çŸ¥ã®èª²é¡Œ ------------------------------------ .. - nose-style doctests are not collected and executed correctly, also doctest fixtures don't work. - nose スタイル㮠doctest ã¯æŽ¢ç´¢ã•れãšã€æ­£å¸¸ã«å®Ÿè¡Œã•れã¾ã›ã‚“ã€ã¾ãŸ doctest フィクスãƒãƒ£ã‚‚動作ã—ã¾ã›ã‚“ .. - no nose-configuration is recognized - nose 設定ã¯èªè­˜ã•れã¾ã›ã‚“ pytest-2.5.1/doc/ja/getting-started.txt0000664000175000017500000003272012254002202017435 0ustar hpkhpk00000000000000.. Installation and Getting Started =================================== インストールã—ã¦å§‹ã‚よㆠ======================== **Pythons**: Python 2.4-3.2, Jython, PyPy .. **Platforms**: Unix/Posix and Windows **Platforms** : Unix/Posix 㨠Windows .. **PyPI package name**: `pytest `_ **PyPI パッケージå** : `pytest `_ .. **documentation as PDF**: `download latest `_ **PDF ドキュメント** : `最新をダウンロード `_ .. _`getstarted`: インストール ------------ .. Installation ---------------------------------------- .. Installation options:: インストールオプション:: pip install -U pytest # or easy_install -U pytest .. To check your installation has installed the correct version:: インストール後ã«é©åˆ‡ãªãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‹ã‚’確èªã™ã‚‹ã«ã¯ã€æ¬¡ã®ã‚ˆã†ã«å®Ÿè¡Œã—ã¾ã™:: $ py.test --version This is py.test version 2.2.4, imported from /home/hpk/p/pytest/pytest.py setuptools registered plugins: pytest-xdist-1.8 at /home/hpk/p/pytest-xdist/xdist/plugin.pyc .. If you get an error checkout :ref:`installation issues`. エラーãŒç™ºç”Ÿã—ãŸã‚‰ :ref:`installation issues` を確èªã—ã¦ãã ã•ã„。 .. _`simpletest`: åˆã‚ã¦ã®ãƒ†ã‚¹ãƒˆå®Ÿè¡Œ ------------------ .. Our first test run ---------------------------------------------------------- .. Let's create a first test file with a simple test function:: ç°¡å˜ãªãƒ†ã‚¹ãƒˆé–¢æ•°ã‚’å«ã‚€æœ€åˆã®ãƒ†ã‚¹ãƒˆãƒ•ァイルを作りã¾ã—ょã†:: # test_sample.py ã®å†…容 def func(x): return x + 1 def test_answer(): assert func(3) == 5 .. That's it. You can execute the test function now:: ã“ã‚“ãªæ„Ÿã˜ã§ã™ã€‚ã•ã‚ã€ãƒ†ã‚¹ãƒˆé–¢æ•°ã‚’実行ã—ã¾ã—ょã†:: $ py.test =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 1 items test_sample.py F ================================= FAILURES ================================= _______________________________ test_answer ________________________________ def test_answer(): > assert func(3) == 5 E assert 4 == 5 E + where 4 = func(3) test_sample.py:5: AssertionError ========================= 1 failed in 0.01 seconds ========================= .. py.test found the ``test_answer`` function by following :ref:`standard test discovery rules `, basically detecting the ``test_`` prefixes. We got a failure report because our little ``func(3)`` call did not return ``5``. py.test 㯠:ref:`標準的ãªãƒ†ã‚¹ãƒˆæŽ¢ç´¢ãƒ«ãƒ¼ãƒ« ` ã«å¾“ã„ ``test_answer`` 関数を検出ã—ã¾ã™ã€‚基本的ã«ã¯ ``test_`` ã®æŽ¥é ­è¾žã‚’ã‚‚ã¤ãƒ•ァイルや関数ã§ã™ã€‚å…ˆã»ã©ä½œæˆã—㟠``func(3)`` 呼ã³å‡ºã—㌠``5`` ã‚’è¿”ã•ãªã‹ã£ãŸã¨ã„ã†å¤±æ•—レãƒãƒ¼ãƒˆã‚’å—ã‘å–りã¾ã—ãŸã€‚ .. You can simply use the ``assert`` statement for asserting test expectations. pytest's :ref:`assert introspection` will intelligently report intermediate values of the assert expression freeing you from the need to learn the many names of `JUnit legacy methods`_. .. note:: ãƒ†ã‚¹ãƒˆã®æœŸå¾…値をアサートã™ã‚‹ã«ã¯å˜ç´”ã« ``assert`` 文を使ã„ã¾ã™ã€‚pytest ã® :ref:`assert introspection` 㯠assert 評価時ã®ä¸­é–“値を賢ãレãƒãƒ¼ãƒˆã—ã¾ã™ã€‚ã“れã«ã‚ˆã‚Šã€å¤šãã® `JUnit レガシーメソッド`_ ã®åå‰ã‚’覚ãˆã‚‹å¿…è¦ãŒãªããªã‚Šã¾ã™ã€‚ .. _`JUnit legacy methods`: http://docs.python.org/library/unittest.html#test-cases .. _`JUnit レガシーメソッド`: http://docs.python.org/library/unittest.html#test-cases .. _`assert statement`: http://docs.python.org/reference/simple_stmts.html#the-assert-statement 特定ã®ä¾‹å¤–ãŒç™ºç”Ÿã—ãŸã“ã¨ã‚’アサートã™ã‚‹ -------------------------------------- .. Asserting that a certain exception is raised -------------------------------------------------------------- .. If you want to assert that some code raises an exception you can use the ``raises`` helper:: 例外を発生ã•ã›ã‚‹ã‚³ãƒ¼ãƒ‰ã‚’テストã—ãŸã„ãªã‚‰ ``raises`` ヘルパー関数を使ã„ã¾ã™:: # test_sysexit.py ã®å†…容 import pytest def f(): raise SystemExit(1) def test_mytest(): with pytest.raises(SystemExit): f() .. Running it with, this time in "quiet" reporting mode:: ã“ã®ã‚³ãƒ¼ãƒ‰ã‚’ "quiet" モードã§å®Ÿè¡Œã—ã¾ã™:: $ py.test -q test_sysexit.py collecting ... collected 1 items . 1 passed in 0.00 seconds .. todo:: For further ways to assert exceptions see the `raises` .. Grouping multiple tests in a class -------------------------------------------------------------- 1ã¤ã®ã‚¯ãƒ©ã‚¹ã§è¤‡æ•°ã®ãƒ†ã‚¹ãƒˆã‚’グループ化ã™ã‚‹ ----------------------------------------- .. Once you start to have more than a few tests it often makes sense to group tests logically, in classes and modules. Let's write a class containing two tests:: テストを書ãå§‹ã‚ã¦ä½•個ã‹ä½œæˆã—ãŸã‚‰ã€ã‚¯ãƒ©ã‚¹ã‚„モジュール内ã«ãã†ã„ã£ãŸãƒ†ã‚¹ãƒˆã‚’グループ化ã™ã‚‹ã¨åˆ†ã‹ã‚Šã‚„ã™ããªã‚Šã¾ã™ã€‚2ã¤ã®ãƒ†ã‚¹ãƒˆã‚’å«ã‚€ã‚¯ãƒ©ã‚¹ã‚’作æˆã—ã¾ã—ょã†:: # test_class.py ã®å†…容 class TestClass: def test_one(self): x = "this" assert 'h' in x def test_two(self): x = "hello" assert hasattr(x, 'check') .. The two tests are found because of the standard :ref:`test discovery`. There is no need to subclass anything. We can simply run the module by passing its filename:: :ref:`標準的ãªãƒ†ã‚¹ãƒˆæŽ¢ç´¢ãƒ«ãƒ¼ãƒ« ` ã«ã‚ˆã‚Šã€2ã¤ã®ãƒ†ã‚¹ãƒˆãŒæ¤œå‡ºã•れã¾ã—ãŸã€‚サブクラス化ã™ã‚‹å¿…è¦ã¯ã‚りã¾ã›ã‚“。å˜ç´”ã«ãã®ãƒ•ァイルåを与ãˆã‚‹ã“ã¨ã§ã€å¯¾è±¡ã®ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã‚’実行ã§ãã¾ã™:: $ py.test -q test_class.py collecting ... collected 2 items .F ================================= FAILURES ================================= ____________________________ TestClass.test_two ____________________________ self = def test_two(self): x = "hello" > assert hasattr(x, 'check') E assert hasattr('hello', 'check') test_class.py:8: AssertionError 1 failed, 1 passed in 0.01 seconds .. The first test passed, the second failed. Again we can easily see the intermediate values used in the assertion, helping us to understand the reason for the failure. 最åˆã®ãƒ†ã‚¹ãƒˆã¯æˆåŠŸã—ã€2番目ã®ãƒ†ã‚¹ãƒˆã¯å¤±æ•—ã—ã¾ã—ãŸã€‚ã¾ãŸã€å¤±æ•—ã—ãŸåŽŸå› ã‚’ç†è§£ã—ã‚„ã™ã„よã†ã€ã“ã®ã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ã®ä¸­é–“値ãŒã±ã£ã¨è¦‹ã¦åˆ†ã‹ã‚Šã¾ã™ã€‚ .. Going functional: requesting a unique temporary directory -------------------------------------------------------------- 機能テスト: 一時ディレクトリã®è¦æ±‚ ---------------------------------- .. For functional tests one often needs to create some files and pass them to application objects. py.test provides the versatile :ref:`funcarg mechanism` which allows to request arbitrary resources, for example a unique temporary directory:: 機能テストã§ã¯ã€ãƒ•ァイルを作æˆã—ã¦ã€ã‚¢ãƒ—リケーションã®ã‚ªãƒ–ジェクトをãã®ãƒ•ã‚¡ã‚¤ãƒ«ã«æ›¸ã込むよã†ãªã“ã¨ãŒã‚ˆãã‚りã¾ã™ã€‚py.test ã¯ã€1ã¤ã ã‘存在ã™ã‚‹ä¸€æ™‚ディレクトリã¨ã„ã£ãŸã€ä»»æ„ã®ãƒªã‚½ãƒ¼ã‚¹è¦æ±‚を扱ã†ä¸‡èƒ½ã® :ref:`funcarg mechanism` ã‚’æä¾›ã—ã¾ã™:: # test_tmpdir.py ã®å†…容 def test_needsfiles(tmpdir): print tmpdir assert 0 .. We list the name ``tmpdir`` in the test function signature and py.test will lookup and call a factory to create the resource before performing the test function call. Let's just run it:: テスト関数ã®ã‚·ã‚°ãƒãƒãƒ£ã« ``tmpdir`` ã¨ã„ã†åå‰ã‚’å«ã‚ã¾ã™ã€‚py.test ã¯ãã®åå‰ã‚’見ã¤ã‘ã€ãƒ†ã‚¹ãƒˆé–¢æ•°ãŒå‘¼ã³å‡ºã•れるå‰ã«ãƒªã‚½ãƒ¼ã‚¹ã‚’作æˆã™ã‚‹ãƒ•ァクトリー関数を呼ã³å‡ºã—ã¾ã™ã€‚ã§ã¯ã€å®Ÿè¡Œã—ã¦ã¿ã¾ã—ょã†:: $ py.test -q test_tmpdir.py collecting ... collected 1 items F ================================= FAILURES ================================= _____________________________ test_needsfiles ______________________________ tmpdir = local('/tmp/pytest-22/test_needsfiles0') def test_needsfiles(tmpdir): print tmpdir > assert 0 E assert 0 test_tmpdir.py:3: AssertionError ----------------------------- Captured stdout ------------------------------ /tmp/pytest-22/test_needsfiles0 1 failed in 0.01 seconds .. Before the test runs, a unique-per-test-invocation temporary directory was created. More info at :ref:`tmpdir handling`. テストを実行ã™ã‚‹æ¯Žã«ã€ãã®ãƒ†ã‚¹ãƒˆé–¢æ•°ã®å®Ÿè¡Œå‰ã«ä¸€æ™‚ディレクトリãŒä½œæˆã•れã¾ã—ãŸã€‚ã•らã«è©³ç´°ã¯ :ref:`tmpdir handling` ã‚’å‚ç…§ã—ã¦ãã ã•ã„。 .. You can find out what kind of builtin :ref:`funcargs` exist by typing:: 組ã¿è¾¼ã¿ã® :ref:`funcargs` を把æ¡ã™ã‚‹ã«ã¯ã€æ¬¡ã®ã‚³ãƒžãƒ³ãƒ‰ã‚’実行ã—ã¾ã™:: py.test --fixtures # 組ã¿è¾¼ã¿/カスタムã®é–¢æ•°ã®å¼•数を表示ã™ã‚‹ .. Where to go next ------------------------------------- 次ã«å­¦ã¶ã“㨠------------ .. Here are a few suggestions where to go next: 次ã®ãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã‚’見ã¦ã¾ã—ょã†: .. * :ref:`cmdline` for command line invocation examples * :ref:`good practises ` for virtualenv, test layout, genscript support * :ref:`apiref` for documentation and examples on using py.test * :ref:`plugins` managing and writing plugins * :ref:`cmdline`: コマンドラインã®å®Ÿè¡Œæ–¹æ³•ã®ã‚µãƒ³ãƒ—ル * :ref:`優れãŸãƒ—ラクティス `: virtualenvã€ãƒ†ã‚¹ãƒˆãƒ¬ã‚¤ã‚¢ã‚¦ãƒˆã€genscript ã®å¯¾å¿œ * :ref:`apiref`: ドキュメント㨠py.test を使ã†ä¸Šã§ã®ã‚µãƒ³ãƒ—ル * :ref:`plugins`: プラグインã®ç®¡ç†ã¨ä½œæˆ .. _`installation issues`: インストールã«é–¢ã™ã‚‹æ—¢çŸ¥ã®å•題 ------------------------------ .. Known Installation issues ------------------------------ .. easy_install or pip not found? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ easy_install ã‚„ pip ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ ++++++++++++++++++++++++++++++++++++ .. _`install pip`: http://www.pip-installer.org/en/latest/index.html .. _`pip をインストール`: http://www.pip-installer.org/en/latest/index.html .. `Install pip`_ for a state of the art python package installer. 最先端㮠Python パッケージインストーラーã§ã‚ã‚‹ `pip をインストール`_ ã—ã¦ãã ã•ã„。 .. Or consult `distribute docs`_ to install the ``easy_install`` tool on your machine. ã‚‚ã—ã㯠``easy_install`` ツールをインストールã™ã‚‹ãŸã‚ã« `distribute docs`_ を読んã§ãã ã•ã„。 .. You may also use the older `setuptools`_ project but it lacks bug fixes and does not work on Python3. æ—§æ¥ã® `setuptools`_ プロジェクトも使ãˆã¾ã™ãŒã€ãれã¯ãƒã‚°ä¿®æ­£ãŒè¡Œã‚れã¦ãªã Python 3 ã§ã‚‚動作ã—ã¾ã›ã‚“。 .. py.test not found on Windows despite installation? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ インストールã—ãŸã®ã« Windows 上㧠py.test ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. _`Python for Windows`: http://www.imladris.com/Scripts/PythonForWindows.html .. - **Windows**: If "easy_install" or "py.test" are not found you need to add the Python script path to your ``PATH``, see here: `Python for Windows`_. You may alternatively use an `ActivePython install`_ which does this for you automatically. - **Windows**: "easy_install" ã¾ãŸã¯ "py.test" ãŒè¦‹ã¤ã‹ã‚‰ãªã„ãªã‚‰ã€ ``PATH`` ã«ãれら㮠Python スクリプトを追加ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚ `Python for Windows`_ ã‚’å‚ç…§ã—ã¦ãã ã•ã„ã€‚åˆ¥ã®æ–¹æ³•ã¨ã—ã¦ã€è‡ªå‹•çš„ã«ãƒ‘ス設定を行ã£ã¦ãれる `ActivePython install`_ を使ã†ã“ã¨ã‚‚ã§ãã¾ã™ã€‚ .. _`ActivePython install`: http://www.activestate.com/activepython/downloads .. _`Jython does not create command line launchers`: http://bugs.jython.org/issue1491 .. _`Jython ã¯ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ãƒ©ãƒ³ãƒãƒ£ãƒ¼ã‚’作らãªã„`: http://bugs.jython.org/issue1491 .. - **Jython2.5.1 on Windows XP**: `Jython does not create command line launchers`_ so ``py.test`` will not work correctly. You may install py.test on CPython and type ``py.test --genscript=mytest`` and then use :ref:`examples` for more complex examples ``jython mytest`` to run py.test for your tests to run with Jython. - **Windows XP 上㮠Jython2.5.1**: `Jython ã¯ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ãƒ©ãƒ³ãƒãƒ£ãƒ¼ã‚’作らãªã„`_ ã®ã§ ``py.test`` ã¯æ­£å¸¸ã«å‹•作ã—ã¾ã›ã‚“。CPython 上㫠py.test をインストールã—㦠``py.test --genscript=mytest`` を実行ã™ã‚‹ã¨ã€Jython ã§è¡Œã†ãƒ†ã‚¹ãƒˆã‚’ py.test ã§å®Ÿè¡Œã™ã‚‹ ``jython mytest`` ãŒä½¿ãˆã¾ã™ã€‚ 複雑ãªä¾‹ã¯ :ref:`examples` ã‚’å‚ç…§ã—ã¦ãã ã•ã„。 .. include:: links.inc pytest-2.5.1/doc/ja/xdist.txt0000664000175000017500000003140212254002202015457 0ustar hpkhpk00000000000000 .. _`xdist`: xdist: pytest ã®åˆ†æ•£ãƒ†ã‚¹ãƒˆãƒ—ラグイン ==================================== .. xdist: pytest distributed testing plugin =============================================================== .. The `pytest-xdist`_ plugin extends py.test with some unique test execution modes: `pytest-xdist`_ プラグインã¯ã€ç‹¬è‡ªã®ãƒ†ã‚¹ãƒˆãƒ¢ãƒ¼ãƒ‰ã§ py.test ã‚’æ‹¡å¼µã—ã¾ã™: .. * Looponfail: run your tests repeatedly in a subprocess. After each run, py.test waits until a file in your project changes and then re-runs the previously failing tests. This is repeated until all tests pass. At this point a full run is again performed. * Looponfail: サブプロセスã§ãƒ†ã‚¹ãƒˆã‚’繰り返ã—実行ã—ã¾ã™ã€‚å„テストを実行ã—ã¦ã‹ã‚‰ py.test ã¯ãƒ—ロジェクト内ã®ãƒ•ァイルãŒå¤‰æ›´ã•れるã¾ã§å¾…ã¡ã€äº‹å‰ã«å¤±æ•—ã—ãŸãƒ†ã‚¹ãƒˆã®ã¿ã‚’å†å®Ÿè¡Œã—ã¾ã™ã€‚ã“ã®å‹•作ã¯å…¨ã¦ã®ãƒ†ã‚¹ãƒˆãŒæˆåŠŸã™ã‚‹ã¾ã§ç¹°ã‚Šè¿”ã•れã¾ã™ã€‚ã“ã®æ®µéšŽã«ãªã£ã¦å†åº¦ã€å…¨ã¦ã®ãƒ†ã‚¹ãƒˆã‚’実行ã—ã¾ã™ã€‚ .. * multiprocess Load-balancing: if you have multiple CPUs or hosts you can use them for a combined test run. This allows to speed up development or to use special resources of remote machines. * マルãƒãƒ—ロセス負è·åˆ†æ•£: 複数 CPU ã¾ãŸã¯è¤‡æ•°ãƒ›ã‚¹ãƒˆãŒã‚ã‚‹å ´åˆã€ãれらをçµåˆãƒ†ã‚¹ãƒˆã®å®Ÿè¡Œã«ä½¿ã„ã¾ã™ã€‚ã“れã«ã‚ˆã‚Šé–‹ç™ºã‚’高速化ã—ãŸã‚Šã€ãƒªãƒ¢ãƒ¼ãƒˆãƒžã‚·ãƒ³ã®ç‰¹åˆ¥ãªãƒªã‚½ãƒ¼ã‚¹ã‚’使ã£ãŸã‚Šã—ã¾ã™ã€‚ .. * Multi-Platform coverage: you can specify different Python interpreters or different platforms and run tests in parallel on all of them. * マルãƒãƒ—ラットフォームã®ã‚«ãƒãƒ¬ãƒƒã‚¸: ç•°ãªã‚‹ Python インタープリターã€ã¾ãŸã¯ç•°ãªã‚‹ãƒ—ラットフォームを指定ã—ã¦ã€å…¨ã¦ã®ç’°å¢ƒã§ä¸¦è¡Œã—ã¦ãƒ†ã‚¹ãƒˆã‚’実行ã—ã¾ã™ã€‚ .. Before running tests remotely, ``py.test`` efficiently "rsyncs" your program source code to the remote place. All test results are reported back and displayed to your local terminal. You may specify different Python versions and interpreters. リモート環境ã§ãƒ†ã‚¹ãƒˆã‚’実行ã™ã‚‹å‰ã«ã€ ``py.test`` ã¯ãƒ—ログラムã®ã‚½ãƒ¼ã‚¹ã‚³ãƒ¼ãƒ‰ã‚’リモートマシンã¸åŠ¹çŽ‡çš„ã« "rsync" ã—ã¾ã™ã€‚å…¨ã¦ã®ãƒ†ã‚¹ãƒˆçµæžœã¯ã€ãƒ­ãƒ¼ã‚«ãƒ«ãƒžã‚·ãƒ³ã®ã‚¿ãƒ¼ãƒŸãƒŠãƒ«ä¸Šã§è¡¨ç¤ºã•れã¾ã™ã€‚ç•°ãªã‚‹ Python ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚„インタープリターも指定ã§ãã¾ã™ã€‚ .. Installation of xdist plugin ------------------------------ xdist プラグインã®ã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ« ------------------------------ .. Install the plugin with:: プラグインをインストールã—ã¾ã™:: easy_install pytest-xdist # ã¾ãŸã¯ pip install pytest-xdist .. or use the package in develop/in-place mode with a checkout of the `pytest-xdist repository`_ :: ã‚‚ã—ã㯠`pytest-xdist リãƒã‚¸ãƒˆãƒª`_ ã‹ã‚‰ãƒã‚§ãƒƒã‚¯ã‚¢ã‚¦ãƒˆã—㦠develop コマンドã§ãƒ‘ッケージを使ã„ã¾ã™:: python setup.py develop .. Usage examples --------------------- 使用例 ------ .. _`xdistcpu`: 複数㮠CPU を使ã£ãŸãƒ†ã‚¹ãƒˆå®Ÿè¡Œã®é«˜é€ŸåŒ– +++++++++++++++++++++++++++++++++++++ .. Speed up test runs by sending tests to multiple CPUs +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. To send tests to multiple CPUs, type:: 複数㮠CPU ã§ãƒ†ã‚¹ãƒˆã‚’行ã†ã«ã¯ã€æ¬¡ã®ã‚ˆã†ã«å®Ÿè¡Œã—ã¾ã™:: py.test -n NUM .. Especially for longer running tests or tests requiring a lot of I/O this can lead to considerable speed ups. 特ã«é•·æ™‚間実行ã•れるテストã€ã¾ãŸã¯ãŸãã•ã‚“ã® I/O ã‚’å¿…è¦ã¨ã™ã‚‹ãƒ†ã‚¹ãƒˆã®é€Ÿåº¦å‘上ãŒè¦‹è¾¼ã‚ã¾ã™ã€‚ .. Running tests in a Python subprocess +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Python ã®ã‚µãƒ–プロセスã§ãƒ†ã‚¹ãƒˆå®Ÿè¡Œ +++++++++++++++++++++++++++++++++ .. To instantiate a Python-2.4 subprocess and send tests to it, you may type:: Python 2.4 ã® subprocess をインスタンス化ã™ã‚‹ã«ã¯ã€ãã“ã«ãƒ†ã‚¹ãƒˆã‚’é€ã‚Šã€æ¬¡ã®ã‚ˆã†ã«å®Ÿè¡Œã—ã¾ã™:: py.test -d --tx popen//python=python2.4 .. This will start a subprocess which is run with the "python2.4" Python interpreter, found in your system binary lookup path. システムã®ãƒã‚¤ãƒŠãƒªæ¤œç´¢ãƒ‘スã§è¦‹ã¤ã‘㟠"Python 2.4" インタープリターã§å®Ÿè¡Œã™ã‚‹ã‚µãƒ–プロセスを開始ã—ã¾ã™ã€‚ .. If you prefix the --tx option value like this:: 次ã®ã‚ˆã†ã« --tx オプションã«å€¤ã‚’指定ã™ã‚‹å ´åˆ:: py.test -d --tx 3*popen//python=python2.4 .. then three subprocesses would be created and the tests will be distributed to three subprocesses and run simultanously. 3ã¤ã®ã‚µãƒ–プロセスãŒä½œæˆã•れã€ãã®3ã¤ã®ã‚µãƒ–プロセスã«ãƒ†ã‚¹ãƒˆãŒåˆ†æ•£ã•れã¦ä¸¦è¡Œã§å®Ÿè¡Œã•れã¾ã™ã€‚ .. _looponfailing: looponfailing モードã§ã®ãƒ†ã‚¹ãƒˆå®Ÿè¡Œ ++++++++++++++++++++++++++++++++++ .. Running tests in looponfailing mode +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. For refactoring a project with a medium or large test suite you can use the looponfailing mode. Simply add the ``--f`` option:: ä¸­è¦æ¨¡ã¾ãŸã¯å¤§è¦æ¨¡ã®ãƒ†ã‚¹ãƒˆã‚¹ã‚¤ãƒ¼ãƒˆã‚’ã‚‚ã¤ãƒ—ロジェクトをリファクタリングã™ã‚‹ãŸã‚ã«ã€looponfailing モードãŒä¾¿åˆ©ã§ã™ã€‚å˜ç´”ã« ``--f`` オプションを追加ã—ã¾ã™:: py.test -f .. and py.test will run your tests. Assuming you have failures it will then wait for file changes and re-run the failing test set. File changes are detected by looking at ``looponfailingroots`` root directories and all of their contents (recursively). If the default for this value does not work for you you can change it in your project by setting a configuration option:: py.test ã¯ãƒ†ã‚¹ãƒˆã‚’実行ã—ã¾ã™ã€‚ã“ã“ã§å®Ÿè¡Œã—ãŸãƒ†ã‚¹ãƒˆã«å¤±æ•—ã™ã‚‹ã¨ä»®å®šã—ã¦ã€ãƒ•ã‚¡ã‚¤ãƒ«ã®æ›´æ–°ã‚’å¾…ã£ã¦ã‹ã‚‰ãã®å¤±æ•—ã—ãŸãƒ†ã‚¹ãƒˆã‚’å†å®Ÿè¡Œã—ã¾ã™ã€‚ãƒ•ã‚¡ã‚¤ãƒ«ã®æ›´æ–°ã¯ ``looponfailingroots`` ã¨ã„ã†ãƒ«ãƒ¼ãƒˆãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã¨ãã®å…¨ã¦ã®ã‚³ãƒ³ãƒ†ãƒ³ãƒ„ã‚’ (å†å¸°çš„ã«) 調ã¹ã‚‹ã“ã¨ã§æ¤œå‡ºã•れã¾ã™ã€‚ã“ã®ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆå€¤ãŒæ„図ã—ãŸã‚ˆã†ã«ä½œç”¨ã—ãªã„ãªã‚‰ã€è¨­å®šã‚ªãƒ—ションを追加ã™ã‚‹ã“ã¨ã§è‡ªåˆ†ã®ãƒ—ロジェクトå‘ã‘ã®å€¤ã‚’変更ã§ãã¾ã™:: # pytest.ini, setup.cfg ã¾ãŸã¯ tox.ini ファイルã®å†…容 [pytest] looponfailroots = mypkg testdir .. This would lead to only looking for file changes in the respective directories, specified relatively to the ini-file's directory. ã“ã®è¨­å®šã¯ ini ファイルãŒã‚るディレクトリã‹ã‚‰ã¿ã¦ã€ç›¸å¯¾ãƒ‘ã‚¹ã§æŒ‡å®šã—ãŸãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã®ãƒ•ã‚¡ã‚¤ãƒ«ã®æ›´æ–°ã®ã¿ã‚’調ã¹ã‚‹ã‚ˆã†ã«ã—ã¾ã™ã€‚ .. Sending tests to remote SSH accounts +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ リモート㮠SSH アカウントã¸ãƒ†ã‚¹ãƒˆã‚’é€ä¿¡ +++++++++++++++++++++++++++++++++++++++ .. Suppose you have a package ``mypkg`` which contains some tests that you can successfully run locally. And you also have a ssh-reachable machine ``myhost``. Then you can ad-hoc distribute your tests by typing:: ãƒ­ãƒ¼ã‚«ãƒ«ã§æ­£å¸¸ã«å®Ÿè¡Œã§ãるテストをå«ã‚€ ``mypkg`` ã¨ã€ssh ã§ãƒ­ã‚°ã‚¤ãƒ³ã§ãるマシン ``myhost`` ã‚‚ã‚ã‚‹ã¨ä»®å®šã—ã¾ã™ã€‚次ã®ã‚ˆã†ã«å®Ÿè¡Œã—ã¦ã‚¢ãƒ‰ãƒ›ãƒƒã‚¯ã«åˆ†æ•£ãƒ†ã‚¹ãƒˆã‚’実行ã§ãã¾ã™:: py.test -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg .. This will synchronize your ``mypkg`` package directory with a remote ssh account and then collect and run your tests at the remote side. ã“れã¯ãƒªãƒ¢ãƒ¼ãƒˆã® ssh アカウント㧠``mypkg`` パッケージã®ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã‚’åŒæœŸã•ã›ã€ãƒªãƒ¢ãƒ¼ãƒˆãƒžã‚·ãƒ³ä¸Šã§ãƒ†ã‚¹ãƒˆã‚’実行ã—ã¾ã™ã€‚ .. You can specify multiple ``--rsyncdir`` directories to be sent to the remote side. リモートマシンã¸é€ã‚‹ãŸã‚㫠複数㮠``--rsyncdir`` ディレクトリを指定ã§ãã¾ã™ã€‚ .. XXX CHECK **NOTE:** For py.test to collect and send tests correctly you not only need to make sure all code and tests directories are rsynced, but that any test (sub) directory also has an ``__init__.py`` file because internally py.test references tests as a fully qualified python module path. **You will otherwise get strange errors** during setup of the remote side. .. Sending tests to remote Socket Servers +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ リモートã®ã‚½ã‚±ãƒƒãƒˆã‚µãƒ¼ãƒãƒ¼ã¸ãƒ†ã‚¹ãƒˆã‚’é€ä¿¡ ++++++++++++++++++++++++++++++++++++++++ .. Download the single-module `socketserver.py`_ Python program and run it like this:: `socketserver.py`_ ã¨ã„ã†å˜ä¸€ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã® Python プログラムをダウンロードã—ã€æ¬¡ã®ã‚ˆã†ã«å®Ÿè¡Œã—ã¦ãã ã•ã„:: python socketserver.py .. It will tell you that it starts listening on the default port. You can now on your home machine specify this new socket host with something like this:: ã“ã®ãƒ—ログラムã¯ãƒ‡ãƒ•ォルトã®ãƒãƒ¼ãƒˆã§å¾…機状態を起動ã—ã¾ã™ã€‚自宅ã®ãƒžã‚·ãƒ³ä¸Šã§ã€ä½•ã‹ã®ãƒ‘ッケージã¨ä¸€ç·’ã«ã“ã®æ–°ãŸãªã‚½ã‚±ãƒƒãƒˆãƒ›ã‚¹ãƒˆã‚’指定ã§ãã¾ã™:: py.test -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg .. _`atonce`: 多ãã®ãƒ—ラットフォーム上ã§åŒæ™‚ã«ãƒ†ã‚¹ãƒˆã‚’実行 ++++++++++++++++++++++++++++++++++++++++++++ .. Running tests on many platforms at once +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. The basic command to run tests on multiple platforms is:: 複数ã®ãƒ—ラットフォームã§ãƒ†ã‚¹ãƒˆã‚’実行ã™ã‚‹åŸºæœ¬çš„ãªã‚³ãƒžãƒ³ãƒ‰ã§ã™:: py.test --dist=each --tx=spec1 --tx=spec2 .. If you specify a windows host, an OSX host and a Linux environment this command will send each tests to all platforms - and report back failures from all platforms at once. The specifications strings use the `xspec syntax`_. Windows ホストã€OSX ホストã€Linux 環境を指定ã™ã‚‹å ´åˆã€ã“ã®ã‚³ãƒžãƒ³ãƒ‰ã¯ãれãžã‚Œã®ãƒ†ã‚¹ãƒˆã‚’å…¨ã¦ã®ãƒ—ラットフォームã«é€ä¿¡ã—ã¾ã™ã€‚ãã—ã¦ã€å…¨ã¦ã®ãƒ—ラットフォームã‹ã‚‰åŒæ™‚ã«å¤±æ•—レãƒãƒ¼ãƒˆã‚’å—ã‘å–りã¾ã™ã€‚ã“ã®è¨­å®šã«ã¯ `xspec æ§‹æ–‡`_ を使ã„ã¾ã™ã€‚ .. _`xspec syntax`: http://codespeak.net/execnet/basics.html#xspec .. _`xspec æ§‹æ–‡`: http://codespeak.net/execnet/basics.html#xspec .. _`socketserver.py`: http://bitbucket.org/hpk42/execnet/raw/2af991418160/execnet/script/socketserver.py .. _`execnet`: http://codespeak.net/execnet .. Specifying test exec environments in an ini file +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Ini ファイルã§ãƒ†ã‚¹ãƒˆå®Ÿè¡Œç’°å¢ƒã®æŒ‡å®š ++++++++++++++++++++++++++++++++++ .. pytest (since version 2.0) supports ini-style configuration. For example, you could make running with three subprocesses your default:: pytest (ãƒãƒ¼ã‚¸ãƒ§ãƒ³ 2.0) 㯠ini スタイルã®è¨­å®šãƒ•ァイルã«å¯¾å¿œã—ã¦ã„ã¾ã™ã€‚例ãˆã°ã€ãƒ‡ãƒ•ォルトã§3ã¤ã®ã‚µãƒ–プロセスを使ã£ã¦ãƒ†ã‚¹ãƒˆã‚’実行ã•ã›ã¾ã™:: [pytest] addopts = -n3 .. You can also add default environments like this:: 次ã®ã‚ˆã†ã«ã—ã¦ãƒ‡ãƒ•ォルト環境も追加ã§ãã¾ã™:: [pytest] addopts = --tx ssh=myhost//python=python2.5 --tx ssh=myhost//python=python2.6 .. and then just type:: ãã—ã¦ã€æ¬¡ã®ã‚ˆã†ã«å…¥åŠ›ã—ã¦å®Ÿè¡Œã™ã‚‹ã¨:: py.test --dist=each .. to run tests in each of the environments. ãれãžã‚Œã®ç’°å¢ƒã§ãƒ†ã‚¹ãƒˆãŒå®Ÿè¡Œã•れã¾ã™ã€‚ .. Specifying "rsync" dirs in an ini-file +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ini ファイル㧠"rsync" ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã®æŒ‡å®š +++++++++++++++++++++++++++++++++++++++++ .. In a ``tox.ini`` or ``setup.cfg`` file in your root project directory you may specify directories to include or to exclude in synchronisation:: プロジェクトã®ãƒ«ãƒ¼ãƒˆãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã« ``tox.ini`` ã‹ ``setup.cfg`` ファイルを置ãã€åŒæœŸã«å«ã‚ã‚‹ã€ã¾ãŸã¯é™¤å¤–ã™ã‚‹ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã‚’指定ã™ã‚‹ã“ã¨ã‚‚ã§ãã¾ã™:: [pytest] rsyncdirs = . mypkg helperpkg rsyncignore = .hg .. These directory specifications are relative to the directory where the configuration file was found. ã“ã“ã§è¨­å®šã•れるディレクトリã¯ã€è¨­å®šãƒ•ァイルã®å ´æ‰€ã‹ã‚‰ç›¸å¯¾ãƒ‘スã§ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã‚’指定ã—ã¾ã™ã€‚ .. _`pytest-xdist`: http://pypi.python.org/pypi/pytest-xdist .. _`pytest-xdist repository`: http://bitbucket.org/hpk42/pytest-xdist .. _`pytest-xdist リãƒã‚¸ãƒˆãƒª`: http://bitbucket.org/hpk42/pytest-xdist .. _`pytest`: http://pytest.org pytest-2.5.1/doc/ja/develop.txt0000664000175000017500000000406512254002202015767 0ustar hpkhpk00000000000000.. ================================================= Feedback and contribute to py.test ================================================= ================================= フィードãƒãƒƒã‚¯ã‚„ py.test ã¸ã®è²¢çŒ® ================================= .. toctree:: :maxdepth: 2 contact.txt .. _checkout: ãƒãƒ¼ã‚¸ãƒ§ãƒ³ç®¡ç†ã‚„ tarball を使ã£ãŸä½œæ¥­ ===================================== .. Working from version control or a tarball ================================================= .. To follow development or start experiments, checkout the complete code and documentation source with mercurial_:: 開発を追ã„ã‹ã‘ãŸã‚Šå®Ÿé¨“ã—ãŸã‚Šã™ã‚‹ã«ã¯ã€ mercurial_ ã§ãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã‚„プログラムã®ã‚½ãƒ¼ã‚¹ã‚’ãƒã‚§ãƒƒã‚¯ã‚¢ã‚¦ãƒˆã—ã¦ãã ã•ã„:: hg clone https://bitbucket.org/hpk42/pytest/ .. You can also go to the python package index and download and unpack a TAR file:: ã¾ãŸã¯ Python パッケージインデックスã‹ã‚‰ TAR ファイルをダウンロードã—ã¦è§£å‡ã—ã¾ã™:: http://pypi.python.org/pypi/pytest/ .. Activating a checkout with setuptools -------------------------------------------- setuptools ã§ãƒã‚§ãƒƒã‚¯ã‚¢ã‚¦ãƒˆã—ãŸã‚‚ã®ã‚’有効ã«ã™ã‚‹ ----------------------------------------------- .. With a working Distribute_ or setuptools_ installation you can type:: distribute_ ã¾ãŸã¯ setuptools_ を使ã†ã¨ã€æ¬¡ã®ã‚ˆã†ã«ã—ã¦ã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã§ãã¾ã™:: python setup.py develop .. in order to work inline with the tools and the lib of your checkout. ãƒã‚§ãƒƒã‚¯ã‚¢ã‚¦ãƒˆã—ãŸãƒ„ールやライブラリを使ã„ã¾ã™ã€‚ .. If this command complains that it could not find the required version of "py" then you need to use the development pypi repository:: ã“ã®ã‚³ãƒžãƒ³ãƒ‰ãŒ "py" ã®å¿…è¦ãªãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚’発見ã§ããªã‹ã£ãŸã¨ã‚¨ãƒ©ãƒ¼ã‚’発生ã•ã›ã‚‹å ´åˆã€é–‹ç™ºç‰ˆã® pypi リãƒã‚¸ãƒˆãƒªã‚’使ã†å¿…è¦ãŒã‚りã¾ã™:: python setup.py develop -i http://pypi.testrun.org .. include:: links.inc pytest-2.5.1/doc/ja/mark.txt0000664000175000017500000000352512254002202015263 0ustar hpkhpk00000000000000 .. _mark: 属性をもã¤ãƒ†ã‚¹ãƒˆé–¢æ•°ã®ãƒžãƒ¼ã‚¯ ============================ .. Marking test functions with attributes ================================================================= .. currentmodule:: _pytest.mark .. By using the ``pytest.mark`` helper you can easily set metadata on your test functions. There are some builtin markers, for example: ``pytest.mark`` ヘルパーを使ã£ã¦ã€ãƒ†ã‚¹ãƒˆé–¢æ•°ã«ãƒ¡ã‚¿ãƒ‡ãƒ¼ã‚¿ã‚’ç°¡å˜ã«è¨­å®šã§ãã¾ã™ã€‚組ã¿è¾¼ã¿ã®ãƒžãƒ¼ã‚«ãƒ¼ã‚’紹介ã—ã¾ã™: .. * :ref:`skipif ` - skip a test function if a certain condition is met * :ref:`xfail ` - produce an "expected failure" outcome if a certain condition is met * :ref:`parametrize ` to perform multiple calls to the same test function. * :ref:`skipif `: ç‰¹å®šã®æ¡ä»¶ã‚’満ãŸã—ãŸå ´åˆã«ãƒ†ã‚¹ãƒˆé–¢æ•°ã‚’スキップ * :ref:`xfail `: ç‰¹å®šã®æ¡ä»¶ã‚’満ãŸã—ãŸå ´åˆã« "失敗を期待" * :ref:`parametrize `: åŒã˜ãƒ†ã‚¹ãƒˆé–¢æ•°ã«å¯¾ã—ã¦è¤‡æ•°å›žã®å‘¼ã³å‡ºã—を実行 .. It's easy to create custom markers or to apply markers to whole test classes or modules. See :ref:`mark examples` for examples which also serve as documentation. カスタムマーカーを作æˆã™ã‚‹ã€ã¾ãŸã¯å…¨ä½“ã®ãƒ†ã‚¹ãƒˆã‚¯ãƒ©ã‚¹ã‚„モジュールã«ãƒžãƒ¼ã‚«ãƒ¼ã‚’é©ç”¨ã™ã‚‹ã®ã¯ç°¡å˜ã§ã™ã€‚ドキュメントã§ã‚‚ã‚ã‚‹ :ref:`mark examples` ã®ã‚µãƒ³ãƒ—ルをå‚ç…§ã—ã¦ãã ã•ã„。 .. API reference for mark related objects ------------------------------------------------ マーカー関連オブジェクト㮠API リファレンス ------------------------------------------- .. autoclass:: MarkGenerator :members: .. autoclass:: MarkDecorator :members: .. autoclass:: MarkInfo :members: pytest-2.5.1/doc/ja/builtin.txt0000664000175000017500000001375512254002202016005 0ustar hpkhpk00000000000000 .. _`pytest helpers`: Pytest 組ã¿è¾¼ã¿ãƒ˜ãƒ«ãƒ‘ー機能 =========================== .. Pytest builtin helpers ================================================ .. builtin pytest.* functions and helping objects ----------------------------------------------------- 組ã¿è¾¼ã¿ã® pytest.* 関数ã¨ãƒ˜ãƒ«ãƒ‘ーオブジェクト ---------------------------------------------- .. You can always use an interactive Python prompt and type:: Python インタープリターã®å¯¾è©±ãƒ¢ãƒ¼ãƒ‰ã‹ã‚‰æ¬¡ã®ã‚ˆã†ã«å…¥åŠ›ã™ã‚‹ã¨:: import pytest help(pytest) .. to get an overview on the globally available helpers. グローãƒãƒ«ã«åˆ©ç”¨ã§ãã‚‹ãƒ˜ãƒ«ãƒ‘ãƒ¼æ©Ÿèƒ½ã®æ¦‚è¦ã‚’把æ¡ã§ãã¾ã™ã€‚ .. automodule:: pytest :members: .. _builtinfuncargs: 組ã¿è¾¼ã¿é–¢æ•°ã®å¼•æ•° ------------------ .. Builtin function arguments ----------------------------------------------------- .. You can ask for available builtin or project-custom :ref:`function arguments ` by typing:: 次ã®ã‚ˆã†ã«å…¥åŠ›ã—ã¦ã€åˆ©ç”¨ã§ãる組ã¿è¾¼ã¿ã¾ãŸã¯ãƒ—ロジェクトカスタム㮠:ref:`関数ã®å¼•æ•° ` を確èªã§ãã¾ã™ã€‚ | $ py.test --fixtures | ====================== test session starts ======================= | platform linux2 -- Python 2.7.1 -- pytest-2.2.4 | collected 0 items | pytestconfig | pytest ã® config オブジェクトã¨ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ションã¸ã®ã‚¢ã‚¯ã‚»ã‚¹ | | capsys | sys.stdout/sys.stderr ã¸ã®æ›¸ãè¾¼ã¿å†…容をå–å¾—ã§ãã‚‹ | キャプãƒãƒ£ã—ãŸå‡ºåŠ›å†…å®¹ã¯ ``(out, err)`` ã®ã‚¿ãƒ—ルを返㙠| ``capsys.readouterr()`` メソッドã§åˆ©ç”¨ã§ãã‚‹ | | capfd | ファイルディスクリプタ 1 㨠2 ã¸æ›¸ãè¾¼ã¿å†…容をå–å¾—ã§ãã‚‹ | キャプãƒãƒ£ã—ãŸå‡ºåŠ›å†…å®¹ã¯ ``(out, err)`` ã®ã‚¿ãƒ—ルを返㙠| ``capsys.readouterr()`` メソッドã§åˆ©ç”¨ã§ãã‚‹ | | tmpdir | 基本ã¨ãªã‚‹ä¸€æ™‚ディレクトリé…下ã«ã‚µãƒ–ディレクトリを作æˆã—ã¦ã€ | テスト関数ã®å®Ÿè¡Œæ¯Žã«ä¸€æ„ãªä¸€æ™‚ディレクトリã®ã‚ªãƒ–ジェクトを返㙠| ã“れ㯠py.path.local ã®ãƒ‘スオブジェクトãŒè¿”ã•れる | | monkeypatch | オブジェクトã€ãƒ‡ã‚£ã‚¯ã‚·ãƒ§ãƒŠãƒªã€os.environ を変更ã™ã‚‹ | 次ã®ãƒ˜ãƒ«ãƒ‘ーメソッドをæä¾›ã™ã‚‹ ``monkeypatch`` オブジェクトãŒè¿”ã•れる | | monkeypatch.setattr(obj, name, value, raising=True) | monkeypatch.delattr(obj, name, raising=True) | monkeypatch.setitem(mapping, name, value) | monkeypatch.delitem(obj, name, raising=True) | monkeypatch.setenv(name, value, prepend=False) | monkeypatch.delenv(name, value, raising=True) | monkeypatch.syspath_prepend(path) | monkeypatch.chdir(path) | | å…¨ã¦ã®å¤‰æ›´ã¯ãƒ†ã‚¹ãƒˆé–¢æ•°ã®å‘¼ã³å‡ºã—ãŒçµ‚ã‚ã£ãŸå¾Œã§å…ƒã«æˆ»ã‚Šã¾ã™ | ``raising`` パラメーターã¯ã€ã‚»ãƒƒãƒˆï¼å‰Šé™¤ã®æ“作対象ãŒãªã„ã¨ãã« | KeyError ã‚„ AttributeError を発生ã•ã›ã‚‹ã‹ã©ã†ã‹ã‚’決ã‚ã¾ã™ | | recwarn | 次ã®ãƒ¡ã‚½ãƒƒãƒ‰ã‚’æä¾›ã™ã‚‹ WarningsRecorder インスタンスを返㙠| | * ``pop(category=None)``: category ã«ä¸€è‡´ã™ã‚‹æœ€å¾Œã®è­¦å‘Šã‚’返㙠| * ``clear()``: 警告ã®ãƒªã‚¹ãƒˆã‚’削除ã™ã‚‹ | | 警告ã«ã¤ã„ã¦ã¯ http://docs.python.org/library/warnings.html ã‚’ | å‚ç…§ã—ã¦ãã ã•ã„ | | ======================== in 0.00 seconds ======================== .. $ py.test --fixtures =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collected 0 items pytestconfig the pytest config object with access to command line opts. capsys enables capturing of writes to sys.stdout/sys.stderr and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. capfd enables capturing of writes to file descriptors 1 and 2 and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. tmpdir return a temporary directory path object which is unique to each test function invocation, created as a sub directory of the base temporary directory. The returned object is a `py.path.local`_ path object. monkeypatch The returned ``monkeypatch`` funcarg provides these helper methods to modify objects, dictionaries or os.environ:: monkeypatch.setattr(obj, name, value, raising=True) monkeypatch.delattr(obj, name, raising=True) monkeypatch.setitem(mapping, name, value) monkeypatch.delitem(obj, name, raising=True) monkeypatch.setenv(name, value, prepend=False) monkeypatch.delenv(name, value, raising=True) monkeypatch.syspath_prepend(path) monkeypatch.chdir(path) All modifications will be undone after the requesting test function has finished. The ``raising`` parameter determines if a KeyError or AttributeError will be raised if the set/deletion operation has no target. recwarn Return a WarningsRecorder instance that provides these methods: * ``pop(category=None)``: return last warning matching the category. * ``clear()``: clear list of warnings See http://docs.python.org/library/warnings.html for information on warning categories. ============================= in 0.00 seconds ============================= pytest-2.5.1/doc/ja/unittest.txt0000664000175000017500000000424212254002202016205 0ustar hpkhpk00000000000000 .. _`unittest.TestCase`: unittest.TestCase ã®å¯¾å¿œ ======================== .. Support for unittest.TestCase ===================================================================== .. py.test has limited support for running Python `unittest.py style`_ tests. It will automatically collect ``unittest.TestCase`` subclasses and their ``test`` methods in test files. It will invoke ``setUp/tearDown`` methods but also perform py.test's standard ways of treating tests such as IO capturing:: py.test ã¯ã€Python ã® `unittest スタイル`_ ã®ãƒ†ã‚¹ãƒˆã«åˆ¶é™ä»˜ãã§å¯¾å¿œã—ã¦ã„ã¾ã™ã€‚テストファイル内㮠``unittest.TestCase`` ã®ã‚µãƒ–クラスã¨ãã® ``test`` ãƒ¡ã‚½ãƒƒãƒ‰ã‚’è‡ªå‹•çš„ã«æŽ¢ã—ã¾ã™ã€‚ ``setUp/tearDown`` メソッドを実行ã—ã¾ã™ãŒã€IO キャプãƒãƒ£ã®ã‚ˆã†ãªãƒ†ã‚¹ãƒˆã®æ‰±ã„㯠pytest ã®æ¨™æº–çš„ãªæ–¹æ³•ã§è¡Œã„ã¾ã™:: # test_unittest.py ã®å†…容 import unittest class MyTest(unittest.TestCase): def setUp(self): print ("hello") # 出力内容をキャプãƒãƒ£ def test_method(self): x = 1 self.assertEquals(x, 3) .. Running it yields:: ã“ã®ã‚³ãƒ¼ãƒ‰ã‚’実行ã™ã‚‹ã¨æ¬¡ã®ã‚ˆã†ã«ãªã‚Šã¾ã™:: $ py.test test_unittest.py =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 1 items test_unittest.py F ================================= FAILURES ================================= ____________________________ MyTest.test_method ____________________________ self = def test_method(self): x = 1 > self.assertEquals(x, 3) E AssertionError: 1 != 3 test_unittest.py:8: AssertionError ----------------------------- Captured stdout ------------------------------ hello ========================= 1 failed in 0.01 seconds ========================= .. _`unittest.py style`: http://docs.python.org/library/unittest.html .. _`unittest スタイル`: http://docs.python.org/library/unittest.html pytest-2.5.1/doc/ja/talks.txt0000664000175000017500000001246412254002202015451 0ustar hpkhpk00000000000000 .. Talks and Tutorials ========================== 講演ã¨ãƒãƒ¥ãƒ¼ãƒˆãƒªã‚¢ãƒ« ==================== .. _`funcargs`: funcargs.html ãƒãƒ¥ãƒ¼ãƒˆãƒªã‚¢ãƒ«ã‚„ブログ記事 -------------------------- .. Tutorial examples and blog postings --------------------------------------------- .. _`tutorial1 repository`: http://bitbucket.org/hpk42/pytest-tutorial1/ .. _`pycon 2010 tutorial PDF`: http://bitbucket.org/hpk42/pytest-tutorial1/raw/tip/pytest-basic.pdf .. Basic usage and funcargs: 基本的ãªä½¿ç”¨æ–¹æ³•ã¨é–¢æ•°ã®å¼•æ•° (funcarg): .. - `pycon 2010 tutorial PDF`_ and `tutorial1 repository`_ - `pycon 2010 tutorial PDF`_ 㨠`tutorial1 repository`_ .. Function arguments: 関数ã®å¼•æ•°: - :ref:`mysetup` - `application setup in test functions with funcargs`_ .. - `monkey patching done right`_ (blog post, consult `monkeypatch plugin`_ for actual 1.0 API) - `monkey patching done right`_ (ブログ記事ã€å®Ÿéš›ã® 1.0 API 㯠`monkeypatch plugin`_ ã‚’å‚ç…§) .. Test parametrization: パラメーターテスト: .. - `generating parametrized tests with funcargs`_ (uses deprecated ``addcall()`` API. - `test generators and cached setup`_ - `parametrizing tests, generalized`_ (blog post) - `putting test-hooks into local or global plugins`_ (blog post) - `generating parametrized tests with funcargs`_ ï¼ˆéžæŽ¨å¥¨ ``addcall()`` API を使用) - `test generators and cached setup`_ - `parametrizing tests, generalized`_ (ブログ記事) - `putting test-hooks into local or global plugins`_ (ブログ記事) .. Assertion introspection: アサートイントロスペクション: - `(07/2011) Behind the scenes of py.test's new assertion rewriting `_ .. Distributed testing: 分散テスト: .. - `simultaneously test your code on all platforms`_ (blog entry) - `simultaneously test your code on all platforms`_ (ブログ記事) .. Plugin specific examples: プラグインã«ç‰¹åŒ–ã—ãŸä¾‹: .. - `skipping slow tests by default in py.test`_ (blog entry) - `skipping slow tests by default in py.test`_ (ブログ記事) - `many examples in the docs for plugins`_ .. _`skipping slow tests by default in py.test`: http://bruynooghe.blogspot.com/2009/12/skipping-slow-test-by-default-in-pytest.html .. _`many examples in the docs for plugins`: plugin/index.html .. _`monkeypatch plugin`: plugin/monkeypatch.html .. _`application setup in test functions with funcargs`: funcargs.html#appsetup .. _`simultaneously test your code on all platforms`: http://tetamap.wordpress.com/2009/03/23/new-simultanously-test-your-code-on-all-platforms/ .. _`monkey patching done right`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/ .. _`putting test-hooks into local or global plugins`: http://tetamap.wordpress.com/2009/05/14/putting-test-hooks-into-local-and-global-plugins/ .. _`parametrizing tests, generalized`: http://tetamap.wordpress.com/2009/05/13/parametrizing-python-tests-generalized/ .. _`generating parametrized tests with funcargs`: funcargs.html#test-generators .. _`test generators and cached setup`: http://bruynooghe.blogspot.com/2010/06/pytest-test-generators-and-cached-setup.html .. Conference talks and tutorials ---------------------------------------- カンファレンス講演ã¨ãƒãƒ¥ãƒ¼ãƒˆãƒªã‚¢ãƒ« ---------------------------------- .. - `ep2009-rapidtesting.pdf`_ tutorial slides (July 2009): - testing terminology - basic py.test usage, file system layout - test function arguments (funcargs_) and test fixtures - existing plugins - distributed testing - `ep2009-rapidtesting.pdf`_ ãƒãƒ¥ãƒ¼ãƒˆãƒªã‚¢ãƒ«ã®ã‚¹ãƒ©ã‚¤ãƒ‰ (2009å¹´7月): - テストã®å°‚門用語 - 基本的㪠py.test 使用方法ã€ãƒ•ァイルシステムã®é…ç½® - テスト関数ã®å¼•æ•° (funcargs_) ã¨ãƒ†ã‚¹ãƒˆãƒ•ィクスãƒãƒ£ - 既存ã®ãƒ—ラグイン - 分散テスト .. - `ep2009-pytest.pdf`_ 60 minute py.test talk, highlighting unique features and a roadmap (July 2009) - `ep2009-pytest.pdf`_: 60分㮠py.test 講演ã€ç‹¬ç‰¹ã®æ©Ÿèƒ½ã‚„ロードマップを強調ã—ã¦èª¬æ˜Ž (2009å¹´7月) .. - `pycon2009-pytest-introduction.zip`_ slides and files, extended version of py.test basic introduction, discusses more options, also introduces old-style xUnit setup, looponfailing and other features. - `pycon2009-pytest-introduction.zip`_: スライドã¨ãƒ•ァイルã€py.test å…¥é–€ã®æ‹¡å¼µãƒãƒ¼ã‚¸ãƒ§ãƒ³ã€ã‚ˆã‚Šå¤šãã®ã‚ªãƒ—ションã®è­°è«–ã€å¤ã„スタイル xUnit setup ã®å°Žå…¥ã€looponfailing ã¨ãã®ä»–ã®æ©Ÿèƒ½ .. - `pycon2009-pytest-advanced.pdf`_ contain a slightly older version of funcargs and distributed testing, compared to the EuroPython 2009 slides. - `pycon2009-pytest-advanced.pdf`_: EuroPython 2009 ã®ã‚¹ãƒ©ã‚¤ãƒ‰ã¨æ¯”較ã—ã¦ã€ã¡ã‚‡ã£ã¨å¤ã„ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã® funcargs や分散テストをå«ã‚€ .. _`ep2009-rapidtesting.pdf`: http://codespeak.net/download/py/ep2009-rapidtesting.pdf .. _`ep2009-pytest.pdf`: http://codespeak.net/download/py/ep2009-pytest.pdf .. _`pycon2009-pytest-introduction.zip`: http://codespeak.net/download/py/pycon2009-pytest-introduction.zip .. _`pycon2009-pytest-advanced.pdf`: http://codespeak.net/download/py/pycon2009-pytest-advanced.pdf pytest-2.5.1/doc/ja/check_sphinx.py0000664000175000017500000000072712254002202016611 0ustar hpkhpk00000000000000import py import subprocess def test_build_docs(tmpdir): doctrees = tmpdir.join("doctrees") htmldir = tmpdir.join("html") subprocess.check_call([ "sphinx-build", "-W", "-bhtml", "-d", str(doctrees), ".", str(htmldir)]) def test_linkcheck(tmpdir): doctrees = tmpdir.join("doctrees") htmldir = tmpdir.join("html") subprocess.check_call( ["sphinx-build", "-blinkcheck", "-d", str(doctrees), ".", str(htmldir)]) pytest-2.5.1/doc/ja/example/0000775000175000017500000000000012254002202015216 5ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/example/attic.txt0000664000175000017500000000516012254002202017065 0ustar hpkhpk00000000000000 .. _`accept example`: example: specifying and selecting acceptance tests -------------------------------------------------------------- .. sourcecode:: python # ./conftest.py def pytest_option(parser): group = parser.getgroup("myproject") group.addoption("-A", dest="acceptance", action="store_true", help="run (slow) acceptance tests") def pytest_funcarg__accept(request): return AcceptFixture(request) class AcceptFixture: def __init__(self, request): if not request.config.option.acceptance: pytest.skip("specify -A to run acceptance tests") self.tmpdir = request.config.mktemp(request.function.__name__, numbered=True) def run(self, cmd): """ called by test code to execute an acceptance test. """ self.tmpdir.chdir() return py.process.cmdexec(cmd) and the actual test function example: .. sourcecode:: python def test_some_acceptance_aspect(accept): accept.tmpdir.mkdir("somesub") result = accept.run("ls -la") assert "somesub" in result If you run this test without specifying a command line option the test will get skipped with an appropriate message. Otherwise you can start to add convenience and test support methods to your AcceptFixture and drive running of tools or applications and provide ways to do assertions about the output. .. _`decorate a funcarg`: example: decorating a funcarg in a test module -------------------------------------------------------------- For larger scale setups it's sometimes useful to decorate a funcarg just for a particular test module. We can extend the `accept example`_ by putting this in our test module: .. sourcecode:: python def pytest_funcarg__accept(request): # call the next factory (living in our conftest.py) arg = request.getfuncargvalue("accept") # create a special layout in our tempdir arg.tmpdir.mkdir("special") return arg class TestSpecialAcceptance: def test_sometest(self, accept): assert accept.tmpdir.join("special").check() Our module level factory will be invoked first and it can ask its request object to call the next factory and then decorate its result. This mechanism allows us to stay ignorant of how/where the function argument is provided - in our example from a `conftest plugin`_. sidenote: the temporary directory used here are instances of the `py.path.local`_ class which provides many of the os.path methods in a convenient way. .. _`py.path.local`: ../path.html#local .. _`conftest plugin`: customize.html#conftestplugin pytest-2.5.1/doc/ja/example/index.txt0000664000175000017500000000176412254002202017076 0ustar hpkhpk00000000000000 .. _examples: 使用方法ã¨ä¾‹ ============ .. Usages and Examples =========================================== .. Here is a (growing) list of examples. :ref:`Contact ` us if you need more examples or have questions. Also take a look at the :ref:`comprehensive documentation ` which contains many example snippets as well. 次㫠pytest ã®ä½¿ã„æ–¹ã®ã‚µãƒ³ãƒ—ルリストãŒã‚りã¾ã™ (ã•らã«å¢—ã‚„ã—ç¶šã‘ã¾ã™) 。もã£ã¨è‰¯ã„サンプルや質å•ãŒã‚れ㰠:ref:`ã”連絡ãã ã•ã„ ` 。ã¾ãŸ :ref:`オンラインドキュメント全体 ` を見渡ã—ã¦ã‚‚多ãã®ã‚µãƒ³ãƒ—ルやスニペットãŒã‚りã¾ã™ã€‚ .. see :doc:`../getting-started` for basic introductory examples .. note:: 基本的ãªä½¿ã„方㯠:doc:`../getting-started` ã‚’å‚ç…§ã—ã¦ãã ã•ã„ .. toctree:: :maxdepth: 2 reportingdemo.txt simple.txt mysetup.txt parametrize.txt markers.txt pythoncollection.txt nonpython.txt pytest-2.5.1/doc/ja/example/costlysetup/0000775000175000017500000000000012254002202017614 5ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/example/costlysetup/conftest.py0000664000175000017500000000063012254002202022012 0ustar hpkhpk00000000000000 def pytest_funcarg__setup(request): return request.cached_setup( setup=lambda: CostlySetup(), teardown=lambda costlysetup: costlysetup.finalize(), scope="session", ) class CostlySetup: def __init__(self): import time print ("performing costly setup") time.sleep(5) self.timecostly = 1 def finalize(self): del self.timecostly pytest-2.5.1/doc/ja/example/costlysetup/sub1/0000775000175000017500000000000012254002202020466 5ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/example/costlysetup/sub1/test_quick.py0000664000175000017500000000004112254002202023206 0ustar hpkhpk00000000000000 def test_quick(setup): pass pytest-2.5.1/doc/ja/example/costlysetup/sub1/__init__.py0000664000175000017500000000000212254002202022567 0ustar hpkhpk00000000000000# pytest-2.5.1/doc/ja/example/costlysetup/sub2/0000775000175000017500000000000012254002202020467 5ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/example/costlysetup/sub2/__init__.py0000664000175000017500000000000212254002202022570 0ustar hpkhpk00000000000000# pytest-2.5.1/doc/ja/example/costlysetup/sub2/test_two.py0000664000175000017500000000017712254002202022716 0ustar hpkhpk00000000000000def test_something(setup): assert setup.timecostly == 1 def test_something_more(setup): assert setup.timecostly == 1 pytest-2.5.1/doc/ja/example/parametrize.txt0000664000175000017500000004163612254002202020314 0ustar hpkhpk00000000000000 .. _paramexamples: パラメーターテスト ================== .. Parametrizing tests ================================================= .. currentmodule:: _pytest.python py.test ã¯ã€ç°¡å˜ã«ãƒ‘ãƒ©ãƒ¡ãƒ¼ã‚¿ãƒ¼ã‚’ãƒ†ã‚¹ãƒˆé–¢æ•°ã¸æ¸¡ã›ã¾ã™ã€‚パラメーターテストを行ã†ãŸã‚ã®çµ„ã¿è¾¼ã¿ã®ä»•組ã¿ã‚’使ã£ãŸã‚µãƒ³ãƒ—ルを紹介ã—ã¾ã™ã€‚ .. py.test allows to easily parametrize test functions. In the following we provide some examples using the builtin mechanisms. .. _parametrizemark: シンプル㪠"デコレーター" ã«ã‚ˆã‚‹ãƒ‘ラメーターテスト -------------------------------------------------- .. Simple "decorator" parametrization of a test function ---------------------------------------------------------------------------- .. versionadded:: 2.2 .. The builtin ``pytest.mark.parametrize`` decorator directly enables parametrization of arguments for a test function. Here is an example of a test function that wants to compare that processing some input results in expected output:: 組ã¿è¾¼ã¿ã® ``pytest.mark.parametrize`` デコレーターã¯ã€ç›´æŽ¥ã€ãƒ†ã‚¹ãƒˆé–¢æ•°ã®å¼•æ•°ã¸ãƒ‘ラメーターを渡ã›ã¾ã™ã€‚入力値を処ç†ã—ã¦ã€ãã®çµæžœã¨ã—ã¦æœŸå¾…ã•れる出力値を比較ã—ãŸã„テスト関数ã®ã‚µãƒ³ãƒ—ルを紹介ã—ã¾ã™:: # test_expectation.py ã®å†…容 import pytest @pytest.mark.parametrize(("input", "expected"), [ ("3+5", 8), ("2+4", 6), ("6*9", 42), ]) def test_eval(input, expected): assert eval(input) == expected .. we parametrize two arguments of the test function so that the test function is called three times. Let's run it:: テスト関数ãŒ3回呼ã³å‡ºã•れã€ãã®ãƒ†ã‚¹ãƒˆé–¢æ•°ã¸2ã¤ã®å¼•数をパラメーターã¨ã—ã¦æ¸¡ã—ã¾ã™ã€‚実行ã—ã¦ã¿ã¾ã—ょã†:: $ py.test -q collecting ... collected 3 items ..F ================================= FAILURES ================================= ____________________________ test_eval[6*9-42] _____________________________ input = '6*9', expected = 42 @pytest.mark.parametrize(("input", "expected"), [ ("3+5", 8), ("2+4", 6), ("6*9", 42), ]) def test_eval(input, expected): > assert eval(input) == expected E assert 54 == 42 E + where 54 = eval('6*9') test_expectation.py:8: AssertionError 1 failed, 2 passed in 0.01 seconds .. As expected only one pair of input/output values fails the simple test function. 期待ã—ãŸé€šã‚Šã€å…¥åЛ値ï¼å‡ºåЛ値ã®çµ„ã¿åˆã‚ã›ã®1ã¤ã ã‘ãŒã“ã®å˜ç´”ãªãƒ†ã‚¹ãƒˆé–¢æ•°ã‚’失敗ã•ã›ã¾ã™ã€‚ .. Note that there are various ways how you can mark groups of functions, see :ref:`mark`. 関数ã®ã‚°ãƒ«ãƒ¼ãƒ—をマークã™ã‚‹æ–¹æ³•ã¯æ§˜ã€…ãªã‚„り方ãŒã‚ã‚‹ã®ã«æ³¨æ„ã—ã¦ãã ã•ã„。詳細㯠:ref:`mark` ã‚’å‚ç…§ã—ã¦ãã ã•ã„。 .. Generating parameters combinations, depending on command line ---------------------------------------------------------------------------- コマンドラインã‹ã‚‰ãƒ‘ラメーターã®çµ„ã¿åˆã‚ã›ã‚’ä½œæˆ ------------------------------------------------ .. regendoc:wipe .. Let's say we want to execute a test with different computation parameters and the parameter range shall be determined by a command line argument. Let's first write a simple (do-nothing) computation test:: 別ã®ãƒ‘ラメーターã§ãƒ†ã‚¹ãƒˆã‚’実行ã—ãŸã„ã¨ãã«ã€ãã®ãƒ‘ラメーターã®ç¯„囲ã¯ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³å¼•æ•°ã«ã‚ˆã£ã¦æ±ºã¾ã‚‹ã‚‚ã®ã¨ã—ã¾ã—ょã†ã€‚最åˆã®ç°¡å˜ãª (何もã—ãªã„) テストを書ã„ã¦ã¿ã¾ã™:: # test_compute.py ã®å†…容 def test_compute(param1): assert param1 < 4 .. Now we add a test configuration like this:: 次ã®ã‚ˆã†ãªãƒ†ã‚¹ãƒˆè¨­å®šã‚’追加ã—ã¾ã™:: # conftest.py ã®å†…容 def pytest_addoption(parser): parser.addoption("--all", action="store_true", help="run all combinations") def pytest_generate_tests(metafunc): if 'param1' in metafunc.fixturenames: if metafunc.config.option.all: end = 5 else: end = 2 metafunc.parametrize("param1", range(end)) .. This means that we only run 2 tests if we do not pass ``--all``:: ã“れ㯠``--all`` を指定ã—ãªã„å ´åˆã€2回ã ã‘テストを実行ã—ã¾ã™:: $ py.test -q test_compute.py collecting ... collected 2 items .. 2 passed in 0.01 seconds .. We run only two computations, so we see two dots. let's run the full monty:: 2回ã ã‘テストを実行ã™ã‚‹ã®ã§ã€ãƒ‰ãƒƒãƒˆãŒ2ã¤è¡¨ç¤ºã•れã¾ã™ã€‚ã§ã¯ã€å…¨ãƒ†ã‚¹ãƒˆã‚’実行ã—ã¦ã¿ã¾ã—ょã†:: $ py.test -q --all collecting ... collected 5 items ....F ================================= FAILURES ================================= _____________________________ test_compute[4] ______________________________ param1 = 4 def test_compute(param1): > assert param1 < 4 E assert 4 < 4 test_compute.py:3: AssertionError 1 failed, 4 passed in 0.02 seconds .. As expected when running the full range of ``param1`` values we'll get an error on the last one. 期待ã—ãŸé€šã‚Š ``param1`` ã®å…¨ã¦ã®ç¯„囲値を実行ã™ã‚‹ã¨ã€æœ€å¾Œã®1ã¤ãŒã‚¨ãƒ©ãƒ¼ã«ãªã‚Šã¾ã™ã€‚ .. A quick port of "testscenarios" ------------------------------------ "testscenarios" ã®æ‰‹æ—©ã„移行 ---------------------------- .. _`test scenarios`: http://bazaar.launchpad.net/~lifeless/testscenarios/trunk/annotate/head%3A/doc/example.py .. Here is a quick port to run tests configured with `test scenarios`_, an add-on from Robert Collins for the standard unittest framework. We only have to work a bit to construct the correct arguments for pytest's :py:func:`Metafunc.parametrize`:: Robert Collins ã«ã‚ˆã‚‹æ¨™æº–ライブラリ㮠unittest フレームワークã®ã‚¢ãƒ‰ã‚ªãƒ³ã§ã‚ã‚‹ `test scenarios`_ ã§è¨­å®šã•れãŸãƒ†ã‚¹ãƒˆã‚’実行ã™ã‚‹ãŸã‚ã«æ‰‹æ—©ã„移行方法を紹介ã—ã¾ã™ã€‚pytest ã® :py:func:`Metafunc.parametrize` ã¸æ¸¡ã™æ­£ã—ã„引数を作æˆã™ã‚‹ãŸã‚ã«å°‘ã—ã ã‘コーディングãŒå¿…è¦ã§ã™:: # test_scenarios.py ã®å†…容 def pytest_generate_tests(metafunc): idlist = [] argvalues = [] for scenario in metafunc.cls.scenarios: idlist.append(scenario[0]) items = scenario[1].items() argnames = [x[0] for x in items] argvalues.append(([x[1] for x in items])) metafunc.parametrize(argnames, argvalues, ids=idlist) scenario1 = ('basic', {'attribute': 'value'}) scenario2 = ('advanced', {'attribute': 'value2'}) class TestSampleWithScenarios: scenarios = [scenario1, scenario2] def test_demo(self, attribute): assert isinstance(attribute, str) .. this is a fully self-contained example which you can run with:: ã“れã¯ã™ã実行ã§ãる完全ãªè‡ªå·±å®Œçµåž‹ã‚µãƒ³ãƒ—ルã§ã™:: $ py.test test_scenarios.py =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 2 items test_scenarios.py .. ========================= 2 passed in 0.01 seconds ========================= .. If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:: ãŸã ãƒ†ã‚¹ãƒˆã‚’ (実行ã›ãšã«) 集ã‚ã‚‹ã ã‘ãªã‚‰ã€ãƒ†ã‚¹ãƒˆé–¢æ•°ã®å¤‰æ•°ã¨ã—㦠'advanced' 㨠'basic' ã‚‚ã†ã¾ã表示ã•れã¾ã™:: $ py.test --collect-only test_scenarios.py =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 2 items ============================= in 0.00 seconds ============================= .. Deferring the setup of parametrized resources --------------------------------------------------- パラメーター化ã•れãŸãƒªã‚½ãƒ¼ã‚¹ã®é…延セットアップ ---------------------------------------------- .. regendoc:wipe .. The parametrization of test functions happens at collection time. It is a good idea to setup expensive resources like DB connections or subprocess only when the actual test is run. Here is a simple example how you can achieve that, first the actual test requiring a ``db`` object:: テスト関数ã¸ã®ãƒ‘ラメーター渡ã—ã¯ã‚³ãƒ¬ã‚¯ã‚·ãƒ§ãƒ³æ™‚ã«ç™ºç”Ÿã—ã¾ã™ã€‚実際ã«ãƒ†ã‚¹ãƒˆã‚’実行ã™ã‚‹ã¨ãã®ã¿ã€DB コãƒã‚¯ã‚·ãƒ§ãƒ³ã‚„サブプロセスã¨ã„ã£ãŸé«˜ä¾¡ãªãƒªã‚½ãƒ¼ã‚¹ã‚’セットアップã™ã‚‹ã®ã¯è‰¯ã„考ãˆã§ã™ã€‚ãã†ã„ã£ãŸãƒ†ã‚¹ãƒˆã‚’行ã†ç°¡å˜ãªã‚µãƒ³ãƒ—ãƒ«ãŒæ¬¡ã«ãªã‚Šã¾ã™ã€‚最åˆã®ãƒ†ã‚¹ãƒˆã¯ ``db`` ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆã‚’è¦æ±‚ã—ã¾ã™:: # test_backends.py ã®å†…容 import pytest def test_db_initialized(db): # ダミーテスト if db.__class__.__name__ == "DB2": pytest.fail("deliberately failing for demo purposes") .. We can now add a test configuration that generates two invocations of the ``test_db_initialized`` function and also implements a factory that creates a database object for the actual test invocations:: ``test_db_initialized`` 関数ã®2回実行ã™ã‚‹ã‚ˆã†ã«ãƒ†ã‚¹ãƒˆè¨­å®šã‚’追加ã—ã¾ã™ã€‚ã•らã«å®Ÿéš›ã®ãƒ†ã‚¹ãƒˆå®Ÿè¡Œæ™‚ã«ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã‚ªãƒ–ジェクトを作æˆã™ã‚‹ãƒ•ァクトリー関数も実装ã—ã¾ã™:: # conftest.py ã®å†…容 def pytest_generate_tests(metafunc): if 'db' in metafunc.fixturenames: metafunc.parametrize("db", ['d1', 'd2'], indirect=True) class DB1: "one database object" class DB2: "alternative database object" def pytest_funcarg__db(request): if request.param == "d1": return DB1() elif request.param == "d2": return DB2() else: raise ValueError("invalid internal test config") .. Let's first see how it looks like at collection time:: コレクション時ã«å…ˆã»ã©ã®è¨­å®šãŒã©ã†ãªã‚‹ã‹ã‚’最åˆã«è¦‹ã¦ã¿ã¾ã—ょã†:: $ py.test test_backends.py --collect-only =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 2 items ============================= in 0.00 seconds ============================= .. And then when we run the test:: ãれã‹ã‚‰ãƒ†ã‚¹ãƒˆã‚’実行ã—ã¾ã™:: $ py.test -q test_backends.py collecting ... collected 2 items .F ================================= FAILURES ================================= _________________________ test_db_initialized[d2] __________________________ db = def test_db_initialized(db): # ダミーテスト if db.__class__.__name__ == "DB2": > pytest.fail("deliberately failing for demo purposes") E Failed: deliberately failing for demo purposes test_backends.py:6: Failed 1 failed, 1 passed in 0.01 seconds .. The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``pytest_funcarg__db`` factory has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase. 最åˆã® ``db == "DB1"`` ã«ã‚ˆã‚‹å®Ÿè¡ŒãŒæˆåŠŸã—ãŸã®ã«å¯¾ã—ã¦ã€2番目㮠``db == "DB2"`` ã¯å¤±æ•—ã—ã¾ã—ãŸã€‚ ``pytest_funcarg__db`` ファクトリーã¯ã€ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ—フェーズã®ã¨ãã«ãれãžã‚Œã® DB 値をインスタンス化ã—ã¾ã—ãŸã€‚一方 ``pytest_generate_tests`` ã¯ã€ã‚³ãƒ¬ã‚¯ã‚·ãƒ§ãƒ³ãƒ•ェーズã®ã¨ãã«2回㮠``test_db_initialized`` 呼ã³å‡ºã—を生æˆã—ã¾ã—ãŸã€‚ .. regendoc:wipe .. Parametrizing test methods through per-class configuration -------------------------------------------------------------- クラス設定毎ã®ãƒ†ã‚¹ãƒˆãƒ¡ã‚½ãƒƒãƒ‰ã®ãƒ‘ラメーター渡㗠---------------------------------------------- .. _`unittest parameterizer`: http://code.google.com/p/unittest-ext/source/browse/trunk/params.py .. Here is an example ``pytest_generate_function`` function implementing a parametrization scheme similar to Michael Foord's `unittest parameterizer`_ but in a lot less code:: Michael Foord ã® `unittest parameterizer`_ ã¨ã‚ˆãä¼¼ã¦ã„ã¾ã™ãŒã€ãれよりもãšã£ã¨å°‘ãªã„コードã§ãƒ‘ラメーターを渡ã™ä»•組ã¿ã‚’実装ã™ã‚‹ ``pytest_generate_function`` 関数ã®ã‚µãƒ³ãƒ—ルãŒã‚りã¾ã™:: # ./test_parametrize.py ã®å†…容 import pytest def pytest_generate_tests(metafunc): # ãれãžã‚Œã®ãƒ†ã‚¹ãƒˆé–¢æ•°æ¯Žã«1回呼ã³å‡ºã•れる funcarglist = metafunc.cls.params[metafunc.function.__name__] argnames = list(funcarglist[0]) metafunc.parametrize(argnames, [[funcargs[name] for name in argnames] for funcargs in funcarglist]) class TestClass: # テストメソッドã®ãŸã‚ã«è¤‡æ•°ã®å¼•数セットを指定ã™ã‚‹ãƒ‡ã‚£ã‚¯ã‚·ãƒ§ãƒŠãƒª params = { 'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ], 'test_zerodivision': [dict(a=1, b=0), ], } def test_equals(self, a, b): assert a == b def test_zerodivision(self, a, b): pytest.raises(ZeroDivisionError, "a/b") .. Our test generator looks up a class-level definition which specifies which argument sets to use for each test function. Let's run it:: テストジェãƒãƒ¬ãƒ¼ã‚¿ãƒ¼ã¯ã€ãれãžã‚Œã®ãƒ†ã‚¹ãƒˆãƒ¡ã‚½ãƒƒãƒ‰ã¸ã©ã®å¼•数セットを渡ã™ã‹ã‚’特定ã™ã‚‹ã‚¯ãƒ©ã‚¹ãƒ¬ãƒ™ãƒ«ã®å®šç¾©ã‚’調ã¹ã¾ã™ã€‚実行ã—ã¦ã¿ã¾ã—ょã†:: $ py.test -q collecting ... collected 3 items F.. ================================= FAILURES ================================= ________________________ TestClass.test_equals[1-2] ________________________ self = , a = 1, b = 2 def test_equals(self, a, b): > assert a == b E assert 1 == 2 test_parametrize.py:18: AssertionError 1 failed, 2 passed in 0.01 seconds .. Indirect parametrization with multiple resources -------------------------------------------------------------- 複数リソースã§ã®é–“接的ãªãƒ‘ラメーター渡㗠---------------------------------------- .. Here is a stripped down real-life example of using parametrized testing for testing serialization, invoking different python interpreters. We define a ``test_basic_objects`` function which is to be run with different sets of arguments for its three arguments: 別々㮠Python インタープリターã§å®Ÿè¡Œã—ã€ã‚·ãƒªã‚¢ãƒ©ã‚¤ã‚ºåŒ–を検証ã™ã‚‹ã®ã«ãƒ‘ラメーターテストを使ã†ã€å®Ÿéš›ã®ä¸–界ã§ã®ã‚µãƒ³ãƒ—ルを解説ã—ã¾ã™ã€‚次ã®3ã¤ã®å¼•数を全組ã¿åˆã‚ã›ã§å®Ÿè¡Œã™ã‚‹ ``test_basic_objects`` 関数を定義ã—ã¾ã™ã€‚ .. * ``python1``: first python interpreter, run to pickle-dump an object to a file * ``python2``: second interpreter, run to pickle-load an object from a file * ``obj``: object to be dumped/loaded * ``python1`` : 1番目㮠Python インタープリターã€ã‚ªãƒ–ジェクトをファイル㸠pickle-dump ã™ã‚‹ãŸã‚ã«å®Ÿè¡Œ * ``python2`` : 2番目㮠Python インタープリターã€ãƒ•ァイルã‹ã‚‰ã‚ªãƒ–ジェクトを pickle-load ã™ã‚‹ãŸã‚ã«å®Ÿè¡Œ * ``obj`` : ダンプã—ãŸã‚Šèª­ã¿è¾¼ã‚€ãŸã‚ã®ã‚ªãƒ–ジェクト .. literalinclude:: multipython.py .. Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize):: ã‚‚ã—å…¨ã¦ã® Python インタープリターãŒã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã•れã¦ã„ãªã„å ´åˆã€å®Ÿè¡Œã—ã¦ã‚‚スキップã•れã¾ã™ã€‚インストール済ã¿ã®å ´åˆã€å…¨ã¦ã®çµ„ã¿åˆã‚ã›ãŒå®Ÿè¡Œã•れã¾ã™ (5ã¤ã®ã‚¤ãƒ³ã‚¿ãƒ¼ãƒ—リター * 5ã¤ã®ã‚¤ãƒ³ã‚¿ãƒ¼ãƒ—リター * 3ã¤ã®ã‚·ãƒªã‚¢ãƒ©ã‚¤ã‚ºï¼ãƒ‡ã‚·ãƒªã‚¢ãƒ©ã‚¤ã‚ºã™ã‚‹ã‚ªãƒ–ジェクト):: . $ py.test -rs -q multipython.py collecting ... collected 75 items ............sss............sss............sss............ssssssssssssssssss ========================= short test summary info ========================== SKIP [27] /home/hpk/p/pytest/doc/example/multipython.py:36: 'python2.8' not found 48 passed, 27 skipped in 1.71 seconds pytest-2.5.1/doc/ja/example/reportingdemo.txt0000664000175000017500000004661012254002202020644 0ustar hpkhpk00000000000000 .. _`tbreportdemo`: py.test ã«ã‚ˆã‚‹ãƒ†ã‚¹ãƒˆå¤±æ•—時ã®ãƒ¬ãƒãƒ¼ãƒˆã®ãƒ‡ãƒ¢ ========================================== .. Demo of Python failure reports with py.test ================================================== .. Here is a nice run of several tens of failures and how py.test presents things (unfortunately not showing the nice colors here in the HTML that you get on the terminal - we are working on that): 次㫠py.test ãŒãƒ†ã‚¹ãƒˆå¤±æ•—時ã®ãƒ¬ãƒãƒ¼ãƒˆã‚’ã©ã†è¡¨ç¾ã™ã‚‹ã‹ã«ã¤ã„ã¦ã®ã€æ•°å個ã®å®Ÿè¡ŒçµæžœãŒã‚りã¾ã™ (残念ãªãŒã‚‰ã€ã“ã“ã§ã¯ã€ã‚¿ãƒ¼ãƒŸãƒŠãƒ«ã§å®Ÿè¡Œã—ãŸã¨ãã®ã‚ˆã†ãªè¦‹ã‚„ã™ã„カラー表示ã§ã¯ã‚りã¾ã›ã‚“): .. code-block:: python assertion $ py.test failure_demo.py =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 39 items failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF ================================= FAILURES ================================= ____________________________ test_generative[0] ____________________________ param1 = 3, param2 = 6 def test_generative(param1, param2): > assert param1 * 2 < param2 E assert (3 * 2) < 6 failure_demo.py:15: AssertionError _________________________ TestFailing.test_simple __________________________ self = def test_simple(self): def f(): return 42 def g(): return 43 > assert f() == g() E assert 42 == 43 E + where 42 = () E + and 43 = () failure_demo.py:28: AssertionError ____________________ TestFailing.test_simple_multiline _____________________ self = def test_simple_multiline(self): otherfunc_multi( 42, > 6*9) failure_demo.py:33: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ a = 42, b = 54 def otherfunc_multi(a,b): > assert (a == b) E assert 42 == 54 failure_demo.py:11: AssertionError ___________________________ TestFailing.test_not ___________________________ self = def test_not(self): def f(): return 42 > assert not f() E assert not 42 E + where 42 = () failure_demo.py:38: AssertionError _________________ TestSpecialisedExplanations.test_eq_text _________________ self = def test_eq_text(self): > assert 'spam' == 'eggs' E assert 'spam' == 'eggs' E - spam E + eggs failure_demo.py:42: AssertionError _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ self = def test_eq_similar_text(self): > assert 'foo 1 bar' == 'foo 2 bar' E assert 'foo 1 bar' == 'foo 2 bar' E - foo 1 bar E ? ^ E + foo 2 bar E ? ^ failure_demo.py:45: AssertionError ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ self = def test_eq_multiline_text(self): > assert 'foo\nspam\nbar' == 'foo\neggs\nbar' E assert 'foo\nspam\nbar' == 'foo\neggs\nbar' E foo E - spam E + eggs E bar failure_demo.py:48: AssertionError ______________ TestSpecialisedExplanations.test_eq_long_text _______________ self = def test_eq_long_text(self): a = '1'*100 + 'a' + '2'*100 b = '1'*100 + 'b' + '2'*100 > assert a == b E assert '111111111111...2222222222222' == '1111111111111...2222222222222' E Skipping 90 identical leading characters in diff E Skipping 91 identical trailing characters in diff E - 1111111111a222222222 E ? ^ E + 1111111111b222222222 E ? ^ failure_demo.py:53: AssertionError _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ self = def test_eq_long_text_multiline(self): a = '1\n'*100 + 'a' + '2\n'*100 b = '1\n'*100 + 'b' + '2\n'*100 > assert a == b E assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n1...n2\n2\n2\n2\n' E Skipping 190 identical leading characters in diff E Skipping 191 identical trailing characters in diff E 1 E 1 E 1 E 1 E 1 E - a2 E + b2 E 2 E 2 E 2 E 2 failure_demo.py:58: AssertionError _________________ TestSpecialisedExplanations.test_eq_list _________________ self = def test_eq_list(self): > assert [0, 1, 2] == [0, 1, 3] E assert [0, 1, 2] == [0, 1, 3] E At index 2 diff: 2 != 3 failure_demo.py:61: AssertionError ______________ TestSpecialisedExplanations.test_eq_list_long _______________ self = def test_eq_list_long(self): a = [0]*100 + [1] + [3]*100 b = [0]*100 + [2] + [3]*100 > assert a == b E assert [0, 0, 0, 0, 0, 0, ...] == [0, 0, 0, 0, 0, 0, ...] E At index 100 diff: 1 != 2 failure_demo.py:66: AssertionError _________________ TestSpecialisedExplanations.test_eq_dict _________________ self = def test_eq_dict(self): > assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2} E assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2} E - {'a': 0, 'b': 1} E ? ^ E + {'a': 0, 'b': 2} E ? ^ failure_demo.py:69: AssertionError _________________ TestSpecialisedExplanations.test_eq_set __________________ self = def test_eq_set(self): > assert set([0, 10, 11, 12]) == set([0, 20, 21]) E assert set([0, 10, 11, 12]) == set([0, 20, 21]) E Extra items in the left set: E 10 E 11 E 12 E Extra items in the right set: E 20 E 21 failure_demo.py:72: AssertionError _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ self = def test_eq_longer_list(self): > assert [1,2] == [1,2,3] E assert [1, 2] == [1, 2, 3] E Right contains more items, first extra item: 3 failure_demo.py:75: AssertionError _________________ TestSpecialisedExplanations.test_in_list _________________ self = def test_in_list(self): > assert 1 in [0, 2, 3, 4, 5] E assert 1 in [0, 2, 3, 4, 5] failure_demo.py:78: AssertionError __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ self = def test_not_in_text_multiline(self): text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail' > assert 'foo' not in text E assert 'foo' not in 'some multiline\ntext\nw...ncludes foo\nand a\ntail' E 'foo' is contained here: E some multiline E text E which E includes foo E ? +++ E and a E tail failure_demo.py:82: AssertionError ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ self = def test_not_in_text_single(self): text = 'single foo line' > assert 'foo' not in text E assert 'foo' not in 'single foo line' E 'foo' is contained here: E single foo line E ? +++ failure_demo.py:86: AssertionError _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ self = def test_not_in_text_single_long(self): text = 'head ' * 50 + 'foo ' + 'tail ' * 20 > assert 'foo' not in text E assert 'foo' not in 'head head head head hea...ail tail tail tail tail ' E 'foo' is contained here: E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? +++ failure_demo.py:90: AssertionError ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ self = def test_not_in_text_single_long_term(self): text = 'head ' * 50 + 'f'*70 + 'tail ' * 20 > assert 'f'*70 not in text E assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail ' E 'ffffffffffffffffff...fffffffffffffffffff' is contained here: E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ failure_demo.py:94: AssertionError ______________________________ test_attribute ______________________________ def test_attribute(): class Foo(object): b = 1 i = Foo() > assert i.b == 2 E assert 1 == 2 E + where 1 = .b failure_demo.py:101: AssertionError _________________________ test_attribute_instance __________________________ def test_attribute_instance(): class Foo(object): b = 1 > assert Foo().b == 2 E assert 1 == 2 E + where 1 = .b E + where = () failure_demo.py:107: AssertionError __________________________ test_attribute_failure __________________________ def test_attribute_failure(): class Foo(object): def _get_b(self): raise Exception('Failed to get attrib') b = property(_get_b) i = Foo() > assert i.b == 2 failure_demo.py:116: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = def _get_b(self): > raise Exception('Failed to get attrib') E Exception: Failed to get attrib failure_demo.py:113: Exception _________________________ test_attribute_multiple __________________________ def test_attribute_multiple(): class Foo(object): b = 1 class Bar(object): b = 2 > assert Foo().b == Bar().b E assert 1 == 2 E + where 1 = .b E + where = () E + and 2 = .b E + where = () failure_demo.py:124: AssertionError __________________________ TestRaises.test_raises __________________________ self = def test_raises(self): s = 'qwe' > raises(TypeError, "int(s)") failure_demo.py:133: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ > int(s) E ValueError: invalid literal for int() with base 10: 'qwe' <0-codegen /home/hpk/p/pytest/_pytest/python.py:978>:1: ValueError ______________________ TestRaises.test_raises_doesnt _______________________ self = def test_raises_doesnt(self): > raises(IOError, "int('3')") E Failed: DID NOT RAISE failure_demo.py:136: Failed __________________________ TestRaises.test_raise ___________________________ self = def test_raise(self): > raise ValueError("demo error") E ValueError: demo error failure_demo.py:139: ValueError ________________________ TestRaises.test_tupleerror ________________________ self = def test_tupleerror(self): > a,b = [1] E ValueError: need more than 1 value to unpack failure_demo.py:142: ValueError ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ self = def test_reinterpret_fails_with_print_for_the_fun_of_it(self): l = [1,2,3] print ("l is %r" % l) > a,b = l.pop() E TypeError: 'int' object is not iterable failure_demo.py:147: TypeError ----------------------------- Captured stdout ------------------------------ l is [1, 2, 3] ________________________ TestRaises.test_some_error ________________________ self = def test_some_error(self): > if namenotexi: E NameError: global name 'namenotexi' is not defined failure_demo.py:150: NameError ____________________ test_dynamic_compile_shows_nicely _____________________ def test_dynamic_compile_shows_nicely(): src = 'def foo():\n assert 1 == 0\n' name = 'abc-123' module = py.std.imp.new_module(name) code = py.code.compile(src, name, 'exec') py.builtin.exec_(code, module.__dict__) py.std.sys.modules[name] = module > module.foo() failure_demo.py:165: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def foo(): > assert 1 == 0 E assert 1 == 0 <2-codegen 'abc-123' /home/hpk/p/pytest/doc/example/assertion/failure_demo.py:162>:2: AssertionError ____________________ TestMoreErrors.test_complex_error _____________________ self = def test_complex_error(self): def f(): return 44 def g(): return 43 > somefunc(f(), g()) failure_demo.py:175: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ x = 44, y = 43 def somefunc(x,y): > otherfunc(x,y) failure_demo.py:8: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ a = 44, b = 43 def otherfunc(a,b): > assert a==b E assert 44 == 43 failure_demo.py:5: AssertionError ___________________ TestMoreErrors.test_z1_unpack_error ____________________ self = def test_z1_unpack_error(self): l = [] > a,b = l E ValueError: need more than 0 values to unpack failure_demo.py:179: ValueError ____________________ TestMoreErrors.test_z2_type_error _____________________ self = def test_z2_type_error(self): l = 3 > a,b = l E TypeError: 'int' object is not iterable failure_demo.py:183: TypeError ______________________ TestMoreErrors.test_startswith ______________________ self = def test_startswith(self): s = "123" g = "456" > assert s.startswith(g) E assert ('456') E + where = '123'.startswith failure_demo.py:188: AssertionError __________________ TestMoreErrors.test_startswith_nested ___________________ self = def test_startswith_nested(self): def f(): return "123" def g(): return "456" > assert f().startswith(g()) E assert ('456') E + where = '123'.startswith E + where '123' = () E + and '456' = () failure_demo.py:195: AssertionError _____________________ TestMoreErrors.test_global_func ______________________ self = def test_global_func(self): > assert isinstance(globf(42), float) E assert isinstance(43, float) E + where 43 = globf(42) failure_demo.py:198: AssertionError _______________________ TestMoreErrors.test_instance _______________________ self = def test_instance(self): self.x = 6*7 > assert self.x != 42 E assert 42 != 42 E + where 42 = .x failure_demo.py:202: AssertionError _______________________ TestMoreErrors.test_compare ________________________ self = def test_compare(self): > assert globf(10) < 5 E assert 11 < 5 E + where 11 = globf(10) failure_demo.py:205: AssertionError _____________________ TestMoreErrors.test_try_finally ______________________ self = def test_try_finally(self): x = 1 try: > assert x == 0 E assert 1 == 0 failure_demo.py:210: AssertionError ======================== 39 failed in 0.17 seconds ========================= pytest-2.5.1/doc/ja/example/mysetup.txt0000664000175000017500000002004612254002202017467 0ustar hpkhpk00000000000000 .. highlightlang:: python .. _mysetup: Mysetup パターン: アプリケーションã«ç‰¹åŒ–ã—ãŸãƒ†ã‚¹ãƒˆãƒ•ィクスãƒãƒ£ ============================================================== .. Mysetup pattern: application specific test fixtures ========================================================== .. Here is a basic useful step-by-step example for managing and interacting with application specific test setup. The goal is to have one place where we have the glue and test support code for bootstrapping and configuring application objects and allow test modules and test functions to stay ignorant of involved details. アプリケーションã«ç‰¹åŒ–ã—ãŸãƒ†ã‚¹ãƒˆã®ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ—を管ç†ã—ãŸã‚Šã€ç›¸äº’ã«ã‚„りå–りã™ã‚‹ã€åŸºæœ¬çš„且ã¤ä¾¿åˆ©ãªã‚µãƒ³ãƒ—ルを順を追ã£ã¦ç´¹ä»‹ã—ã¾ã™ã€‚ãã®ç›®çš„ã¨ã—ã¦ã¯ã€ã‚¢ãƒ—リケーションオブジェクトã®ä¸€é€£ã®é–‹å§‹å‡¦ç†ã‚„設定ã®ã‚°ãƒ«ãƒ¼ã‚³ãƒ¼ãƒ‰ã‚„テストコードを1ã¤ã®å ´æ‰€ã«é›†ã‚ã€å®Ÿè¡Œæ™‚ã«ãƒ†ã‚¹ãƒˆãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã¨ãƒ†ã‚¹ãƒˆé–¢æ•°ã‹ã‚‰ãã†ã„ã£ãŸå‡¦ç†ã®è©³ç´°ã‚’見ãˆãªãã™ã‚‹ã“ã¨ã§ã™ã€‚ .. Step 1: Implementing the test/app-specific ``mysetup`` pattern -------------------------------------------------------------- ステップ 1: アプリケーションã«ç‰¹åŒ–ã—㟠``mysetup`` パターンã®å®Ÿè£… ----------------------------------------------------------------- .. Let's write a simple test function using a ``mysetup`` funcarg:: ``mysetup`` ã¨ã„ã†é–¢æ•°ã®å¼•数を使ã£ã¦ã€ç°¡å˜ãªãƒ†ã‚¹ãƒˆé–¢æ•°ã‚’書ã„ã¦ã¿ã¾ã—ょã†:: # test_sample.py ã®å†…容 def test_answer(mysetup): app = mysetup.myapp() answer = app.question() assert answer == 42 .. To run this test py.test needs to find and call a factory to obtain the required ``mysetup`` function argument. To make an according factory findable we write down a specifically named factory method in a :ref:`local plugin ` :: ã“ã®ãƒ†ã‚¹ãƒˆã‚’実行ã™ã‚‹ãŸã‚ã« py.test ã¯ã€é–¢æ•°ã®å¼•æ•°ã«ä¸Žãˆã‚‰ã‚ŒãŸ ``mysetup`` を扱ã†ãƒ•ァクトリー関数を探ã—ã¦å‘¼ã³å‡ºã™å¿…è¦ãŒã‚りã¾ã™ã€‚ã“ã®ãƒ•ァクトリー関数を探ã—出ã›ã‚‹ã‚ˆã†ã« :ref:`local プラグイン ` ã«ç‰¹åˆ¥ãªåå‰ã‚’ã‚‚ã¤ãƒ•ァクトリーメソッドを書ãã¾ã™:: # conftest.py ã®å†…容 from myapp import MyApp def pytest_funcarg__mysetup(request): # "mysetup" ファクトリー関数 return MySetup() class MySetup: # ã“ã®ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ã¯ãƒ†ã‚¹ãƒˆé–¢æ•°ã‹ã‚‰è¦‹ãˆã‚‹ def myapp(self): return MyApp() .. To run the example we stub out a simple ``MyApp`` application object:: ã“ã®ã‚µãƒ³ãƒ—ルを実行ã™ã‚‹ãŸã‚ã« ``MyApp`` アプリケーションオブジェクトã®ç°¡å˜ãªã‚¹ã‚¿ãƒ–を作りã¾ã™:: # myapp.py ã®å†…容 class MyApp: def question(self): return 6 * 9 .. You can now run the test:: テストを実行ã—ã¾ã™:: $ py.test test_sample.py =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 1 items test_sample.py F ================================= FAILURES ================================= _______________________________ test_answer ________________________________ mysetup = def test_answer(mysetup): app = mysetup.myapp() answer = app.question() > assert answer == 42 E assert 54 == 42 test_sample.py:4: AssertionError ========================= 1 failed in 0.01 seconds ========================= .. This means that our ``mysetup`` object was successfully instantiated and ``mysetup.app()`` returned an initialized ``MyApp`` instance. We can ask it about the question and if you are confused as to what the concrete question or answers actually mean, please see here_. ``mysetup`` ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆãŒæ­£å¸¸ã«ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹åŒ–ã•れã¦ã€ ``mysetup.app()`` ãŒåˆæœŸåŒ–ã•れ㟠``MyApp`` インスタンスを返ã—ã¾ã—ãŸã€‚ã‚ãªãŸãŒå…·ä½“çš„ã«ä½•ã‚’èžã‘ã°è‰¯ã„ã®ã‹ã€ã‚‚ã—ãã¯å®Ÿéš›ã«ä½•ãŒèµ·ã“ã£ãŸã‹ã«æ··ä¹±ã—ã¦ã„ã‚‹ãªã‚‰ã€ãã®è³ªå•ã«é–¢ã—ã¦å°‹ã­ã‚‰ã‚Œã¾ã™ã€‚ `ã“ã“`_ ã‚’ã”覧ãã ã•ã„。 .. _here: http://uncyclopedia.wikia.com/wiki/The_Hitchhiker's_Guide_to_the_Galaxy .. _ã“ã“: http://uncyclopedia.wikia.com/wiki/The_Hitchhiker's_Guide_to_the_Galaxy .. _`tut-cmdlineoption`: ステップ 2: コマンドラインオプションã¨ãƒ†ã‚¹ãƒˆã®ã‚¹ã‚­ãƒƒãƒ—ã‚’ç¢ºèª ------------------------------------------------------------ .. Step 2: Checking a command line option and skipping tests ----------------------------------------------------------- .. To add a command line option we update the ``conftest.py`` of the previous example to add a command line option and to offer a new mysetup method:: コマンドラインオプションを追加ã™ã‚‹ã«ã¯ã€å‰è¿°ã—ãŸã‚µãƒ³ãƒ—ル㮠``conftest.py`` ã«ã€ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ションを追加ã—ã¦æ–°ãŸãª mysetup メソッドをæä¾›ã™ã‚‹ã‚ˆã†ã«å¤‰æ›´ã—ã¾ã™:: # ./conftest.py ã®å†…容 import pytest from myapp import MyApp def pytest_funcarg__mysetup(request): # "mysetup" ファクトリー関数 return MySetup(request) def pytest_addoption(parser): parser.addoption("--ssh", action="store", default=None, help="specify ssh host to run tests with") class MySetup: def __init__(self, request): self.config = request.config def myapp(self): return MyApp() def getsshconnection(self): host = self.config.option.ssh if host is None: pytest.skip("specify ssh host with --ssh") return execnet.SshGateway(host) .. Now any test function can use the ``mysetup.getsshconnection()`` method like this:: 次ã®ã‚ˆã†ã«ãƒ†ã‚¹ãƒˆé–¢æ•°ã‹ã‚‰ ``mysetup.getsshconnection()`` メソッドを使ãˆã¾ã™:: # test_ssh.py ã®å†…容 class TestClass: def test_function(self, mysetup): conn = mysetup.getsshconnection() # conn を使ã£ã¦ãƒ†ã‚¹ãƒˆã™ã‚‹ .. Running it yields:: 実行ã™ã‚‹ã¨æ¬¡ã®ã‚ˆã†ãªãƒ¬ãƒãƒ¼ãƒˆãŒè¡¨ç¤ºã•れã¾ã™:: $ py.test test_ssh.py -rs =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 1 items test_ssh.py s ========================= short test summary info ========================== SKIP [1] /tmp/doc-exec-220/conftest.py:22: specify ssh host with --ssh ======================== 1 skipped in 0.01 seconds ========================= .. If you specify a command line option like ``py.test --ssh=python.org`` the test will execute as expected. ``py.test --ssh=python.org`` ã®ã‚ˆã†ã«ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ションを指定ã™ã‚‹ã¨ã€æœŸå¾…ã—ãŸé€šã‚Šã«ãƒ†ã‚¹ãƒˆãŒå®Ÿè¡Œã•れã¾ã™ã€‚ .. Note that neither the ``TestClass`` nor the ``test_function`` need to know anything about how to setup the test state. It is handled separately in your "test setup glue" code in the ``conftest.py`` file. It is easy to extend the ``mysetup`` object for further needs in the test code - and for use by any other test functions in the files and directories below the ``conftest.py`` file. ``TestClass`` ã‚‚ ``test_function`` ã®ã©ã¡ã‚‰ã¨ã‚‚ã€ãƒ†ã‚¹ãƒˆã®çŠ¶æ…‹ã‚’ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ—ã™ã‚‹æ–¹æ³•ã«ã¤ã„ã¦ä½•も知る必è¦ãŒãªã„ã“ã¨ã«æ³¨ç›®ã—ã¦ãã ã•ã„。 ``conftest.py`` ファイル㮠"テストをセットアップã™ã‚‹ã‚°ãƒ«ãƒ¼" コードã¯åˆ¥ã€…ã«å‡¦ç†ã•れã¾ã™ã€‚テストコード内ã§å¿…è¦ã«å¿œã˜ã¦ ``mysetup`` オブジェクトを拡張ã™ã‚‹ã®ã¯ç°¡å˜ã§ã™ã€‚ ``conftest.py`` ファイルã®é…下ã«ã‚るファイルやディレクトリã®ã€ä»»æ„ã®ãƒ†ã‚¹ãƒˆé–¢æ•°ã«ã‚ˆã£ã¦ä½¿ã‚れã¾ã™ã€‚ pytest-2.5.1/doc/ja/example/xfail_demo.py0000664000175000017500000000056212254002202017702 0ustar hpkhpk00000000000000import pytest xfail = pytest.mark.xfail @xfail def test_hello(): assert 0 @xfail(run=False) def test_hello2(): assert 0 @xfail("hasattr(os, 'sep')") def test_hello3(): assert 0 @xfail(reason="bug 110") def test_hello4(): assert 0 @xfail('pytest.__version__[0] != "17"') def test_hello5(): assert 0 def test_hello6(): pytest.xfail("reason") pytest-2.5.1/doc/ja/example/layout1/0000775000175000017500000000000012254002202016614 5ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/example/layout1/setup.cfg0000664000175000017500000000015212254002202020433 0ustar hpkhpk00000000000000[pytest] testfilepatterns = ${topdir}/tests/unit/test_${basename} ${topdir}/tests/functional/*.py pytest-2.5.1/doc/ja/example/conftest.py0000664000175000017500000000003712254002202017415 0ustar hpkhpk00000000000000collect_ignore = ["nonpython"] pytest-2.5.1/doc/ja/example/py2py3/0000775000175000017500000000000012254002202016364 5ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/example/py2py3/test_py2.py0000664000175000017500000000015112254002202020504 0ustar hpkhpk00000000000000 def test_exception_syntax(): try: 0/0 except ZeroDivisionError, e: assert 0, e pytest-2.5.1/doc/ja/example/py2py3/conftest.py0000664000175000017500000000050412254002202020562 0ustar hpkhpk00000000000000import sys import pytest py3 = sys.version_info[0] >= 3 class DummyCollector(pytest.collect.File): def collect(self): return [] def pytest_pycollect_makemodule(path, parent): bn = path.basename if "py3" in bn and not py3 or ("py2" in bn and py3): return DummyCollector(path, parent=parent) pytest-2.5.1/doc/ja/example/py2py3/test_py3.py0000664000175000017500000000015312254002202020507 0ustar hpkhpk00000000000000 def test_exception_syntax(): try: 0/0 except ZeroDivisionError as e: assert 0, e pytest-2.5.1/doc/ja/example/markers.txt0000664000175000017500000003642312254002202017433 0ustar hpkhpk00000000000000 .. _`mark examples`: カスタムマーカーを使ㆠ====================== .. Working with custom markers ================================================= .. Here are some example using the :ref:`mark` mechanism. ã“ã“ã§ã¯ :ref:`mark` ã®ä»•組ã¿ã‚’使ã£ãŸã‚µãƒ³ãƒ—ルを紹介ã—ã¾ã™ã€‚ .. Marking test functions and selecting them for a run ---------------------------------------------------- テスト関数をマークã—ã¦å®Ÿè¡Œæ™‚ã«é¸æŠž ---------------------------------- .. You can "mark" a test function with custom metadata like this:: 次ã®ã‚ˆã†ã«ã‚«ã‚¹ã‚¿ãƒ ãƒ¡ã‚¿ãƒ‡ãƒ¼ã‚¿ã§ãƒ†ã‚¹ãƒˆé–¢æ•°ã‚’ "マーク" ã§ãã¾ã™:: # test_server.py ã®å†…容 import pytest @pytest.mark.webtest def test_send_http(): pass # アプリ㮠webtest テストを実行 def test_something_quick(): pass .. versionadded:: 2.2 .. You can then restrict a test run to only run tests marked with ``webtest``:: ``webtest`` ã§ãƒžãƒ¼ã‚¯ã•れãŸãƒ†ã‚¹ãƒˆã®ã¿ã‚’実行ã™ã‚‹ã‚ˆã†ã«åˆ¶é™ã§ãã¾ã™:: $ py.test -v -m webtest =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 -- /home/hpk/venv/0/bin/python collecting ... collected 2 items test_server.py:3: test_send_http PASSED =================== 1 tests deselected by "-m 'webtest'" =================== ================== 1 passed, 1 deselected in 0.00 seconds ================== .. Or the inverse, running all tests except the webtest ones:: ã‚‚ã—ãã¯é€†ã« webtest を除ãå…¨ã¦ã®ãƒ†ã‚¹ãƒˆã‚’実行ã—ã¾ã™:: $ py.test -v -m "not webtest" =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 -- /home/hpk/venv/0/bin/python collecting ... collected 2 items test_server.py:6: test_something_quick PASSED ================= 1 tests deselected by "-m 'not webtest'" ================= ================== 1 passed, 1 deselected in 0.00 seconds ================== .. Registering markers ------------------------------------- マーカーã®ç™»éŒ² -------------- .. versionadded:: 2.2 .. Registering markers for your test suite is simple:: .. ini-syntax for custom markers: テストスイートã«ãƒžãƒ¼ã‚«ãƒ¼ã‚’登録ã™ã‚‹ã®ã¯ç™»éŒ²ã§ã™:: # pytest.ini ã®å†…容 [pytest] markers = webtest: mark a test as a webtest. .. You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` markers:: テストスイートã«å­˜åœ¨ã™ã‚‹ãƒžãƒ¼ã‚«ãƒ¼ãŒèª¿ã¹ã¾ã™ã€‚次ã®ä¸€è¦§ã§ã¯ã€å…ˆã»ã©å®šç¾©ã—㟠``webtest`` マーカーãŒã‚りã¾ã™:: $ py.test --markers @pytest.mark.webtest: mark a test as a webtest. @pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. @pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied. @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in multiple different argument value sets. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2. @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. .. For an example on how to add and work with markers from a plugin, see :ref:`adding a custom marker from a plugin`. プラグインã‹ã‚‰ãƒžãƒ¼ã‚«ãƒ¼ã‚’追加ã—ã¦å‡¦ç†ã™ã‚‹ã‚µãƒ³ãƒ—ルã«ã¤ã„ã¦ã¯ :ref:`adding a custom marker from a plugin` ã‚’å‚ç…§ã—ã¦ãã ã•ã„。 .. It is recommended to explicitely register markers so that: * there is one place in your test suite defining your markers * asking for existing markers via ``py.test --markers`` gives good output * typos in function markers are treated as an error if you use the ``--strict`` option. Later versions of py.test are probably going to treat non-registered markers as an error. .. note:: 次ã®ã‚ˆã†ã«æ˜Žç¤ºçš„ã«ãƒžãƒ¼ã‚«ãƒ¼ã‚’登録ã™ã‚‹ã“ã¨ã‚’推奨ã—ã¾ã™: * テストスイートã®ä¸€ç®‡æ‰€ã§ãƒžãƒ¼ã‚«ãƒ¼ã‚’定義ã™ã‚‹ * ``py.test --markers`` ã§æ—¢å­˜ã®ãƒžãƒ¼ã‚«ãƒ¼ã«é–¢ã™ã‚‹åˆ†ã‹ã‚Šã‚„ã™ã„説明を表示ã™ã‚‹ * ``--strict`` オプションを使ã†ã¨ã€é–¢æ•°ãƒžãƒ¼ã‚«ãƒ¼å†…ã®èª¤å­—をエラーã«ã—ã¾ã™ã€æœ€è¿‘ã® py.test ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã§ã¯ã€æœªç™»éŒ²ãƒžãƒ¼ã‚«ãƒ¼ã‚’エラーã¨ã—ã¦æ‰±ã†ã‚ˆã†ã«ã—ã¦ã„ã¾ã™ .. _`scoped-marking`: クラスã¾ãŸã¯ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«å…¨ä½“をマーキング -------------------------------------- .. Marking whole classes or modules ---------------------------------------------------- .. If you are programming with Python 2.6 or later you may use ``pytest.mark`` decorators with classes to apply markers to all of its test methods:: Python 2.6 ã‹ã€ãれ以上ã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã§ã‚³ãƒ¼ãƒ‡ã‚£ãƒ³ã‚°ã—ã¦ã„ã‚‹ãªã‚‰ã€ã‚¯ãƒ©ã‚¹ã®ãƒ†ã‚¹ãƒˆãƒ¡ã‚½ãƒƒãƒ‰å…¨ã¦ã«ãƒžãƒ¼ã‚«ãƒ¼ã‚’é©ç”¨ã™ã‚‹ãŸã‚ã« ``pytest.mark`` をクラスデコレーターã¨ã—ã¦ä½¿ãˆã¾ã™:: # test_mark_classlevel.py ã®å†…容 import pytest @pytest.mark.webtest class TestClass: def test_startup(self): pass def test_startup_and_more(self): pass .. This is equivalent to directly applying the decorator to the two test functions. ã“れã¯2ã¤ã®ãƒ†ã‚¹ãƒˆé–¢æ•°ã«ç›´æŽ¥ãƒ‡ã‚³ãƒ¬ãƒ¼ã‚¿ãƒ¼ã‚’é©ç”¨ã™ã‚‹ã®ã¨åŒã˜ã§ã™ã€‚ .. To remain backward-compatible with Python 2.4 you can also set a ``pytestmark`` attribute on a TestClass like this:: Pythn 2.4 ã¨ã®å¾Œæ–¹äº’æ›æ€§ã‚’ç¶­æŒã™ã‚‹ã«ã¯ã€æ¬¡ã®ã‚ˆã†ã« TestClass ã« ``pytestmark`` 属性も設定ã§ãã¾ã™:: import pytest class TestClass: pytestmark = pytest.mark.webtest .. or if you need to use multiple markers you can use a list:: ã‚‚ã—ãã¯ã€è¤‡æ•°ã®ãƒžãƒ¼ã‚«ãƒ¼ã‚’使ã†å¿…è¦ãŒã‚ã‚‹å ´åˆã¯ãƒªã‚¹ãƒˆã‚‚使ãˆã¾ã™:: import pytest class TestClass: pytestmark = [pytest.mark.webtest, pytest.mark.slowtest] .. You can also set a module level marker:: モジュールレベルã®ãƒžãƒ¼ã‚«ãƒ¼ã‚‚設定ã§ãã¾ã™:: import pytest pytestmark = pytest.mark.webtest .. in which case it will be applied to all functions and methods defined in the module. ã“ã®å ´åˆã€ãã®ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«å†…ã§å®šç¾©ã•れã¦ã„ã‚‹å…¨ã¦ã®é–¢æ•°ã¨ãƒ¡ã‚½ãƒƒãƒ‰ã«é©ç”¨ã•れã¾ã™ã€‚ .. Using ``-k TEXT`` to select tests ---------------------------------------------------- ``-k TEXT`` を使ã£ãŸãƒ†ã‚¹ãƒˆã®é¸æŠž -------------------------------- .. You can use the ``-k`` command line option to only run tests with names matching the given argument:: 指定ã—ãŸå¼•æ•°ã«ä¸€è‡´ã™ã‚‹åå‰ã®ãƒ†ã‚¹ãƒˆã‚’実行ã™ã‚‹ã«ã¯ ``-k`` コマンドラインオプションを使ã„ã¾ã™:: $ py.test -k send_http # å‰ç¯€ã§å®šç¾©ã—ãŸã‚µãƒ³ãƒ—ルを実行 =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 4 items test_server.py . =================== 3 tests deselected by '-ksend_http' ==================== ================== 1 passed, 3 deselected in 0.01 seconds ================== .. And you can also run all tests except the ones that match the keyword:: ã¾ãŸã€ãã®ã‚­ãƒ¼ãƒ¯ãƒ¼ãƒ‰ã«ä¸€è‡´ã™ã‚‹ã‚‚ã®ã‚’除ãå…¨ã¦ã®ãƒ†ã‚¹ãƒˆã‚’実行ã™ã‚‹ã“ã¨ã‚‚ã§ãã¾ã™:: $ py.test -k-send_http =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 4 items test_mark_classlevel.py .. test_server.py . =================== 1 tests deselected by '-k-send_http' =================== ================== 3 passed, 1 deselected in 0.01 seconds ================== .. Or to only select the class:: ã‚‚ã—ãã¯ã€ã‚¯ãƒ©ã‚¹ã®ã¿ã‚’é¸æŠžã™ã‚‹ã«ã¯æ¬¡ã®ã‚ˆã†ã«ã—ã¾ã™:: $ py.test -kTestClass =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 4 items test_mark_classlevel.py .. =================== 2 tests deselected by '-kTestClass' ==================== ================== 2 passed, 2 deselected in 0.01 seconds ================== .. _`adding a custom marker from a plugin`: カスタムマーカーã¨ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ションã«ã‚ˆã‚‹ãƒ†ã‚¹ãƒˆã®å®Ÿè¡Œåˆ¶å¾¡ ---------------------------------------------------------------- .. Custom marker and command line option to control test runs ---------------------------------------------------------- .. regendoc:wipe .. Plugins can provide custom markers and implement specific behaviour based on it. This is a self-contained example which adds a command line option and a parametrized test function marker to run tests specifies via named environments:: プラグインã¯ã€ã‚«ã‚¹ã‚¿ãƒ ãƒžãƒ¼ã‚«ãƒ¼ã‚’æä¾›ã—ã¦ã€ãã®ãƒžãƒ¼ã‚«ãƒ¼ã«åŸºã¥ãç‰¹åˆ¥ãªæŒ¯ã‚‹èˆžã„を実装ã—ã¾ã™ã€‚ã“れã¯ã€ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ションã¨ã€åå‰ä»˜ãã®ç’°å¢ƒã®å€¤ã«ç‰¹åŒ–ã—ãŸãƒ†ã‚¹ãƒˆã‚’実行ã™ã‚‹ãŸã‚ã®ãƒ‘ラメーター化ã•れãŸãƒ†ã‚¹ãƒˆé–¢æ•°ãƒžãƒ¼ã‚«ãƒ¼ã‚’追加ã™ã‚‹è‡ªå·±å®Œçµåž‹ã®ã‚µãƒ³ãƒ—ルã§ã™:: # conftest.py ã®å†…容 import pytest def pytest_addoption(parser): parser.addoption("-E", dest="env", action="store", metavar="NAME", help="only run tests matching the environment NAME.") def pytest_configure(config): # 追加ã®ãƒžãƒ¼ã‚«ãƒ¼ã‚’登録 config.addinivalue_line("markers", "env(name): mark test to run only on named environment") def pytest_runtest_setup(item): if not isinstance(item, item.Function): return if hasattr(item.obj, 'env'): envmarker = getattr(item.obj, 'env') envname = envmarker.args[0] if envname != item.config.option.env: pytest.skip("test requires env %r" % envname) .. A test file using this local plugin:: ã“ã® local プラグインを使ã†ãƒ†ã‚¹ãƒˆãƒ•ァイルã§ã™:: # test_someenv.py ã®å†…容 import pytest @pytest.mark.env("stage1") def test_basic_db_operation(): pass .. and an example invocations specifying a different environment than what the test needs:: ãã®ãƒ†ã‚¹ãƒˆãŒå¿…è¦ã¨ã™ã‚‹ã‚‚ã®ã§ã¯ãªã„別ã®ç’°å¢ƒã‚’指定ã—ã¦å®Ÿè¡Œã™ã‚‹ä¾‹ã§ã™:: $ py.test -E stage2 =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 1 items test_someenv.py s ======================== 1 skipped in 0.00 seconds ========================= .. and here is one that specifies exactly the environment needed:: ä»Šåº¦ã¯æ­£ã—ãå¿…è¦ã¨ã™ã‚‹ç’°å¢ƒã‚’指定ã—ã¦å®Ÿè¡Œã—ã¾ã™:: $ py.test -E stage1 =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 1 items test_someenv.py . ========================= 1 passed in 0.00 seconds ========================= .. The ``--markers`` option always gives you a list of available markers:: ``--markers`` オプションã¯åˆ©ç”¨ã§ãるマーカーã®ä¸€è¦§ã‚’表示ã—ã¾ã™:: $ py.test --markers @pytest.mark.env(name): mark test to run only on named environment @pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. @pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied. @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in multiple different argument value sets. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2. @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. .. Reading markers which were set from multiple places ---------------------------------------------------- 複数ã®å ´æ‰€ã‹ã‚‰è¨­å®šã•れãŸãƒžãƒ¼ã‚«ãƒ¼ã‚’読ã¿è¾¼ã‚€ ------------------------------------------ .. versionadded: 2.2.2 .. If you are heavily using markers in your test suite you may encounter the case where a marker is applied several times to a test function. From plugin code you can read over all such settings. Example:: テストスイート内ã§ãƒžãƒ¼ã‚«ãƒ¼ã‚’ãŸãã•ん使ã†ã¨ã€ãƒ†ã‚¹ãƒˆé–¢æ•°ã«å¯¾ã—ã¦æ•°å›žãƒžãƒ¼ã‚«ãƒ¼ãŒé©ç”¨ã•れる場åˆãŒã‚りã¾ã™ã€‚プラグインコードã‹ã‚‰ã€ãã†ã„ã£ãŸå…¨ã¦ã®è¨­å®šã‚’読ã¿è¾¼ã‚ã¾ã™ã€‚サンプルを紹介ã—ã¾ã™:: # test_mark_three_times.py ã®å†…容 import pytest pytestmark = pytest.mark.glob("module", x=1) @pytest.mark.glob("class", x=2) class TestClass: @pytest.mark.glob("function", x=3) def test_something(self): pass .. Here we have the marker "glob" applied three times to the same test function. From a conftest file we can read it like this:: ã“ã“ã§ã¯ã€åŒã˜ãƒ†ã‚¹ãƒˆé–¢æ•°ã«å¯¾ã—ã¦3回é©ç”¨ã•れる "glob" マーカーãŒã‚りã¾ã™ã€‚conftest ファイルã‹ã‚‰æ¬¡ã®ã‚ˆã†ã«ã—ã¦ãれを調ã¹ã¾ã™:: # conftest.py ã®å†…容 def pytest_runtest_setup(item): g = getattr(item.obj, 'glob', None) if g is not None: for info in g: print ("glob args=%s kwargs=%s" %(info.args, info.kwargs)) .. Let's run this without capturing output and see what we get:: 標準出力をå–å¾—ã›ãšã«ã“ã®ãƒ†ã‚¹ãƒˆã‚’実行ã—ã¦ã€ä½•ãŒè¡¨ç¤ºã•れるã‹ã‚’見ã¦ã¿ã¾ã—ょã†:: $ py.test -q -s collecting ... collected 2 items .. 2 passed in 0.01 seconds glob args=('function',) kwargs={'x': 3} glob args=('class',) kwargs={'x': 2} glob args=('module',) kwargs={'x': 1} pytest-2.5.1/doc/ja/example/nonpython/0000775000175000017500000000000012254002202017252 5ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/example/nonpython/__init__.py0000664000175000017500000000000012254002202021351 0ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/example/nonpython/conftest.py0000664000175000017500000000250412254002202021452 0ustar hpkhpk00000000000000# content of conftest.py import pytest def pytest_collect_file(path, parent): if path.ext == ".yml" and path.basename.startswith("test"): return YamlFile(path, parent) class YamlFile(pytest.File): def collect(self): import yaml # we need a yaml parser, e.g. PyYAML raw = yaml.load(self.fspath.open()) for name, spec in raw.items(): yield YamlItem(name, self, spec) class YamlItem(pytest.Item): def __init__(self, name, parent, spec): super(YamlItem, self).__init__(name, parent) self.spec = spec def runtest(self): for name, value in self.spec.items(): # some custom test execution (dumb example follows) if name != value: raise YamlException(self, name, value) def repr_failure(self, excinfo): """ called when self.runtest() raises an exception. """ if isinstance(excinfo.value, YamlException): return "\n".join([ "usecase execution failed", " spec failed: %r: %r" % excinfo.value.args[1:3], " no further details known at this point." ]) def reportinfo(self): return self.fspath, 0, "usecase: %s" % self.name class YamlException(Exception): """ custom exception for error reporting. """ pytest-2.5.1/doc/ja/example/nonpython/test_simple.yml0000664000175000017500000000011612254002202022323 0ustar hpkhpk00000000000000# test_simple.yml ok: sub1: sub1 hello: world: world some: other pytest-2.5.1/doc/ja/example/multipython.py0000664000175000017500000000416412254002202020171 0ustar hpkhpk00000000000000""" module containing a parametrized tests testing cross-python serialization via the pickle module. """ import py, pytest pythonlist = ['python2.4', 'python2.5', 'python2.6', 'python2.7', 'python2.8'] def pytest_generate_tests(metafunc): # we parametrize all "python1" and "python2" arguments to iterate # over the python interpreters of our list above - the actual # setup and lookup of interpreters in the python1/python2 factories # respectively. for arg in metafunc.fixturenames: if arg in ("python1", "python2"): metafunc.parametrize(arg, pythonlist, indirect=True) @pytest.mark.parametrize("obj", [42, {}, {1:3},]) def test_basic_objects(python1, python2, obj): python1.dumps(obj) python2.load_and_is_true("obj == %s" % obj) def pytest_funcarg__python1(request): tmpdir = request.getfuncargvalue("tmpdir") picklefile = tmpdir.join("data.pickle") return Python(request.param, picklefile) def pytest_funcarg__python2(request): python1 = request.getfuncargvalue("python1") return Python(request.param, python1.picklefile) class Python: def __init__(self, version, picklefile): self.pythonpath = py.path.local.sysfind(version) if not self.pythonpath: py.test.skip("%r not found" %(version,)) self.picklefile = picklefile def dumps(self, obj): dumpfile = self.picklefile.dirpath("dump.py") dumpfile.write(py.code.Source(""" import pickle f = open(%r, 'wb') s = pickle.dump(%r, f) f.close() """ % (str(self.picklefile), obj))) py.process.cmdexec("%s %s" %(self.pythonpath, dumpfile)) def load_and_is_true(self, expression): loadfile = self.picklefile.dirpath("load.py") loadfile.write(py.code.Source(""" import pickle f = open(%r, 'rb') obj = pickle.load(f) f.close() res = eval(%r) if not res: raise SystemExit(1) """ % (str(self.picklefile), expression))) print (loadfile) py.process.cmdexec("%s %s" %(self.pythonpath, loadfile)) pytest-2.5.1/doc/ja/example/simple.txt0000664000175000017500000003650312254002202017257 0ustar hpkhpk00000000000000 .. highlightlang:: python 基本的ãªãƒ‘ターンã¨ä¾‹ ==================== .. Basic patterns and examples ========================================================== .. Pass different values to a test function, depending on command line options ---------------------------------------------------------------------------- コマンドラインオプションã§ãƒ†ã‚¹ãƒˆé–¢æ•°ã«é•ã†å€¤ã‚’渡㙠-------------------------------------------------- .. regendoc:wipe .. Suppose we want to write a test that depends on a command line option. Here is a basic pattern how to achieve this:: コマンドラインオプションã§åˆ¶å¾¡ã™ã‚‹ãƒ†ã‚¹ãƒˆã‚’書ããŸã„ã¨ä»®å®šã—ã¾ã™ã€‚ã“れを実ç¾ã™ã‚‹åŸºæœ¬çš„ãªæ–¹æ³•ã¯æ¬¡ã®é€šã‚Šã§ã™:: # test_sample.py ã®å†…容 def test_answer(cmdopt): if cmdopt == "type1": print ("first") elif cmdopt == "type2": print ("second") assert 0 # 何ãŒè¡¨ç¤ºã•れるã‹ã‚’見るãŸã‚ .. For this to work we need to add a command line option and provide the ``cmdopt`` through a :ref:`function argument ` factory:: ã“ã®ãŸã‚ã«ã¯ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ションを追加ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚ :ref:`関数ã®å¼•æ•° ` ファクトリーを使ã£ã¦ ``cmdopt`` ã‚’æä¾›ã—ã¾ã™:: # conftest.py ã®å†…容 def pytest_addoption(parser): parser.addoption("--cmdopt", action="store", default="type1", help="my option: type1 or type2") def pytest_funcarg__cmdopt(request): return request.config.option.cmdopt .. Let's run this without supplying our new command line option:: å…ˆã»ã©ä½œæˆã—ãŸã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ションを指定ã›ãšã«å®Ÿè¡Œã—ã¦ã¿ã¾ã—ょã†:: $ py.test -q test_sample.py collecting ... collected 1 items F ================================= FAILURES ================================= _______________________________ test_answer ________________________________ cmdopt = 'type1' def test_answer(cmdopt): if cmdopt == "type1": print ("first") elif cmdopt == "type2": print ("second") > assert 0 # 何ãŒè¡¨ç¤ºã•れるã‹ã‚’見るãŸã‚ E assert 0 test_sample.py:6: AssertionError ----------------------------- Captured stdout ------------------------------ first 1 failed in 0.01 seconds .. And now with supplying a command line option:: 次ã¯ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ションを指定ã—ã¦å®Ÿè¡Œã—ã¾ã™:: $ py.test -q --cmdopt=type2 collecting ... collected 1 items F ================================= FAILURES ================================= _______________________________ test_answer ________________________________ cmdopt = 'type2' def test_answer(cmdopt): if cmdopt == "type1": print ("first") elif cmdopt == "type2": print ("second") > assert 0 # 何ãŒè¡¨ç¤ºã•れるã‹ã‚’見るãŸã‚ E assert 0 test_sample.py:6: AssertionError ----------------------------- Captured stdout ------------------------------ second 1 failed in 0.01 seconds .. Ok, this completes the basic pattern. However, one often rather wants to process command line options outside of the test and rather pass in different or more complex objects. See the next example or refer to :ref:`mysetup` for more information on real-life examples. ã¯ã„。基本的ãªä½¿ã„æ–¹ãŒåˆ†ã‹ã‚Šã¾ã—ãŸã€‚ã“れ以外ã«ã‚‚ã€ãƒ†ã‚¹ãƒˆã®å¤–部ã§ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ションを処ç†ã—ã¦ã€åˆ¥ã‚ªãƒ–ジェクトや複雑ãªã‚ªãƒ–ジェクトを渡ã—ãŸã„ã“ã¨ã‚‚よãã‚りã¾ã™ã€‚次ã®ä¾‹ã€ã‚‚ã—ãã¯ç¾å®Ÿã®ä¸–界ã§ã®ä¾‹ã¯ :ref:`mysetup` ã‚’å‚ç…§ã—ã¦ãã ã•ã„。 .. Dynamically adding command line options -------------------------------------------------------------- コマンドラインオプションを動的ã«è¿½åŠ  ------------------------------------ .. regendoc:wipe .. Through :confval:`addopts` you can statically add command line options for your project. You can also dynamically modify the command line arguments before they get processed:: :confval:`addopts` を使ã£ã¦ã€ãƒ—ロジェクトã«ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ションをé™çš„ã«è¿½åŠ ã§ãã¾ã™ã€‚é™çš„ã«è¿½åŠ ã—ãŸã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ションãŒå‡¦ç†ã•れるå‰ã«ã€ãã®ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ションを動的ã«å¤‰æ›´ã™ã‚‹ã“ã¨ã‚‚ã§ãã¾ã™:: # conftest.py ã®å†…容 import sys def pytest_cmdline_preparse(args): if 'xdist' in sys.modules: # pytest-xdist プラグイン import multiprocessing num = max(multiprocessing.cpu_count() / 2, 1) args[:] = ["-n", str(num)] + args .. If you have the :ref:`xdist plugin ` installed you will now always perform test runs using a number of subprocesses close to your CPU. Running in an empty directory with the above conftest.py:: :ref:`xdist プラグイン ` をインストール済ã¿ãªã‚‰ã€æ¯Žå›ž CPU æ•°ã«è¿‘ã„サブプロセスを使ã£ã¦ãƒ†ã‚¹ãƒˆã‚’実行ã§ãã¾ã™ã€‚空ã®ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã§ä¸Šè¨˜ã® conftest.py を実行ã—ã¾ã™:: $ py.test =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 gw0 I / gw1 I / gw2 I / gw3 I gw0 [0] / gw1 [0] / gw2 [0] / gw3 [0] scheduling tests via LoadScheduling ============================= in 0.52 seconds ============================= .. _`excontrolskip`: コマンドラインオプションã§ãƒ†ã‚¹ãƒˆã®ã‚¹ã‚­ãƒƒãƒ—を制御 ------------------------------------------------ .. Control skipping of tests according to command line option -------------------------------------------------------------- .. regendoc:wipe .. Here is a ``conftest.py`` file adding a ``--runslow`` command line option to control skipping of ``slow`` marked tests:: ``slow`` ã¨ãƒžãƒ¼ã‚¯ã•れãŸãƒ†ã‚¹ãƒˆã®ã‚¹ã‚­ãƒƒãƒ—を制御ã™ã‚‹ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ション ``--runslow`` を追加ã™ã‚‹ ``conftest.py`` ãŒã‚りã¾ã™:: # conftest.py ã®å†…容 import pytest def pytest_addoption(parser): parser.addoption("--runslow", action="store_true", help="run slow tests") def pytest_runtest_setup(item): if 'slow' in item.keywords and not item.config.getvalue("runslow"): pytest.skip("need --runslow option to run") .. We can now write a test module like this:: ãƒ†ã‚¹ãƒˆãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã¯æ¬¡ã®ã‚ˆã†ã«æ›¸ãã¾ã™:: # test_module.py ã®å†…容 import pytest slow = pytest.mark.slow def test_func_fast(): pass @slow def test_func_slow(): pass .. and when running it will see a skipped "slow" test:: 実行ã™ã‚‹ã¨ã€"slow" テストãŒã‚¹ã‚­ãƒƒãƒ—ã•れã¾ã™:: $ py.test -rs # "-rs" 㯠's' ã®è©³ç´°ã‚’レãƒãƒ¼ãƒˆã—ã¾ã™ =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 2 items test_module.py .s ========================= short test summary info ========================== SKIP [1] /tmp/doc-exec-225/conftest.py:9: need --runslow option to run =================== 1 passed, 1 skipped in 0.01 seconds ==================== .. Or run it including the ``slow`` marked test:: ã‚‚ã—ã㯠``slow`` ã¨ãƒžãƒ¼ã‚¯ã•れãŸãƒ†ã‚¹ãƒˆã‚’実行ã—ã¾ã™:: $ py.test --runslow =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 2 items test_module.py .. ========================= 2 passed in 0.01 seconds ========================= .. Writing well integrated assertion helpers -------------------------------------------------- çµ±åˆçš„ãªã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ãƒ˜ãƒ«ãƒ‘ーã®ä½œæˆ ---------------------------------- .. regendoc:wipe .. If you have a test helper function called from a test you can use the ``pytest.fail`` marker to fail a test with a certain message. The test support function will not show up in the traceback if you set the ``__tracebackhide__`` option somewhere in the helper function. Example:: テストã‹ã‚‰å‘¼ã°ã‚Œã‚‹ãƒ†ã‚¹ãƒˆãƒ˜ãƒ«ãƒ‘ー関数ãŒã‚ã‚‹ãªã‚‰ã€ç‰¹å®šãƒ¡ãƒƒã‚»ãƒ¼ã‚¸ä»˜ãã§ãƒ†ã‚¹ãƒˆã‚’失敗ã•ã›ã‚‹ ``pytest.fail`` マーカーを使ãˆã¾ã™ã€‚ ``__tracebackhide__`` オプションをヘルパー関数内ã«ã‚»ãƒƒãƒˆã™ã‚‹ã¨ã€ãã®ãƒ†ã‚¹ãƒˆãƒ˜ãƒ«ãƒ‘ー関数ã¯ãƒˆãƒ¬ãƒ¼ã‚¹ãƒãƒƒã‚¯ã‚’表示ã—ãªããªã‚Šã¾ã™ã€‚サンプルを紹介ã—ã¾ã™:: # test_checkconfig.py ã®å†…容 import pytest def checkconfig(x): __tracebackhide__ = True if not hasattr(x, "config"): pytest.fail("not configured: %s" %(x,)) def test_something(): checkconfig(42) .. The ``__tracebackhide__`` setting influences py.test showing of tracebacks: the ``checkconfig`` function will not be shown unless the ``--fulltrace`` command line option is specified. Let's run our little function:: ``__tracebackhide__`` 設定ã¯ã€py.test ã®ãƒˆãƒ¬ãƒ¼ã‚¹ãƒãƒƒã‚¯è¡¨ç¤ºã«å½±éŸ¿ã‚’与ãˆã¾ã™ã€‚ ``checkconfig`` 関数ã¯ã€ ``--fulltrace`` コマンドラインオプションを指定ã—ãªã„é™ã‚Šã€ãƒˆãƒ¬ãƒ¼ã‚¹ãƒãƒƒã‚¯ã‚’表示ã—ã¾ã›ã‚“。ã“ã®å°ã•ãªé–¢æ•°ã‚’実行ã—ã¦ã¿ã¾ã—ょã†:: $ py.test -q test_checkconfig.py collecting ... collected 1 items F ================================= FAILURES ================================= ______________________________ test_something ______________________________ def test_something(): > checkconfig(42) E Failed: not configured: 42 test_checkconfig.py:8: Failed 1 failed in 0.01 seconds .. Detect if running from within a py.test run -------------------------------------------------------------- py.test ã§å®Ÿè¡Œã—ã¦ã„ã‚‹ã“ã¨ã‚’検出 -------------------------------- .. regendoc:wipe .. Usually it is a bad idea to make application code behave differently if called from a test. But if you absolutely must find out if your application code is running from a test you can do something like this:: 通常ã¯ã€ãƒ†ã‚¹ãƒˆã‹ã‚‰å‘¼ã°ã‚Œã‚‹å ´åˆã«ã‚¢ãƒ—ãƒªã‚±ãƒ¼ã‚·ãƒ§ãƒ³ã‚³ãƒ¼ãƒ‰ã®æŒ¯ã‚‹èˆžã„を分ã‘ã‚‹ã®ã¯æ‚ªã„考ãˆã§ã™ã€‚ã—ã‹ã—ã€ã‚¢ãƒ—リケーションコードãŒãƒ†ã‚¹ãƒˆã‹ã‚‰å®Ÿè¡Œã•れã¦ã„ã‚‹å ´åˆã«ã€ç¢ºå®Ÿã«è§£æ˜Žã—ãªã‘れã°ãªã‚‰ãªã„ã“ã¨ãŒã‚ã‚‹ãªã‚‰ã€æ¬¡ã®ã‚ˆã†ãªã“ã¨ãŒã§ãã¾ã™:: # conftest.py ã®å†…容 def pytest_configure(config): import sys sys._called_from_test = True def pytest_unconfigure(config): del sys._called_from_test .. and then check for the ``sys._called_from_test`` flag:: アプリケーション内㧠``sys._called_from_test`` ã¨ã„ã†ãƒ•ラグをãƒã‚§ãƒƒã‚¯ã—ã¾ã™:: if hasattr(sys, '_called_from_test'): # テスト内ã‹ã‚‰å®Ÿè¡Œæ™‚ã«å‘¼ã°ã‚Œã‚‹ else: # "普通" ã®ã¨ãã«å‘¼ã°ã‚Œã‚‹ .. accordingly in your application. It's also a good idea to use your own application module rather than ``sys`` for handling flag. フラグを処ç†ã™ã‚‹ãŸã‚ã« ``sys`` よりも独自ã®ã‚¢ãƒ—リケーションモジュールを使ã†ã®ã‚‚良ã„考ãˆã§ã™ã€‚ .. Adding info to test report header -------------------------------------------------------------- テストレãƒãƒ¼ãƒˆãƒ˜ãƒƒãƒ€ãƒ¼ã«æƒ…報を追加 ---------------------------------- .. regendoc:wipe .. It's easy to present extra information in a py.test run:: py.test ã®å®Ÿè¡Œæ™‚ã«è¿½åŠ ã®æƒ…報を表示ã™ã‚‹ã®ã¯ç°¡å˜ã§ã™:: # conftest.py ã®å†…容 def pytest_report_header(config): return "project deps: mylib-1.1" .. which will add the string to the test header accordingly:: ã“ã®é–¢æ•°ã¯ãƒ†ã‚¹ãƒˆãƒ˜ãƒƒãƒ€ãƒ¼ã«æ–‡å­—列を追加ã—ã¾ã™:: $ py.test =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 project deps: mylib-1.1 collecting ... collected 0 items ============================= in 0.00 seconds ============================= .. regendoc:wipe .. You can also return a list of strings which will be considered as several lines of information. You can of course also make the amount of reporting information on e.g. the value of ``config.option.verbose`` so that you present more information appropriately:: è¤‡æ•°è¡Œã«æ¸¡ã‚‹æƒ…報を扱ã†ãªã‚‰æ–‡å­—列ã®ãƒªã‚¹ãƒˆã‚‚è¿”ã›ã¾ã™ã€‚当然レãƒãƒ¼ãƒˆã®æƒ…å ±é‡ã‚‚制御ã§ãã¾ã™ã€‚例ãˆã°ã€å¿…è¦ãªã¨ãã«æƒ…報を表示ã™ã‚‹ãŸã‚ã« ``config.option.verbose`` ã®å€¤ã§åˆ‡ã‚Šåˆ†ã‘ã¾ã™:: # conftest.py ã®å†…容 def pytest_report_header(config): if config.option.verbose > 0: return ["info1: did you know that ...", "did you?"] .. which will add info only when run with "--v":: "--v" を指定ã—ã¦å®Ÿè¡Œã—ãŸã¨ãã®ã¿è¿½åŠ ã®æƒ…å ±ãŒè¡¨ç¤ºã•れã¾ã™:: $ py.test -v =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 -- /home/hpk/venv/0/bin/python info1: did you know that ... did you? collecting ... collected 0 items ============================= in 0.00 seconds ============================= .. and nothing when run plainly:: 何も指定ã›ãšã«å®Ÿè¡Œã™ã‚‹ã¨ä½•も表示ã—ã¾ã›ã‚“:: $ py.test =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 0 items ============================= in 0.00 seconds ============================= .. profiling test duration -------------------------- テスト実行ã®ãƒ—ロファイリング ---------------------------- .. regendoc:wipe .. versionadded: 2.2 .. If you have a slow running large test suite you might want to find out which tests are the slowest. Let's make an artifical test suite:: 巨大ãªãƒ†ã‚¹ãƒˆã‚¹ã‚¤ãƒ¼ãƒˆã®å®Ÿè¡Œã«æ™‚é–“ãŒã‹ã‹ã‚‹å ´åˆã€ã©ã®ãƒ†ã‚¹ãƒˆãŒæœ€ã‚‚é…ã„ã‹ã‚’調ã¹ãŸã„ã¨ããŒã‚りã¾ã™ã€‚擬似テストスイートã§è©¦ã—ã¦ã¿ã¾ã—ょã†:: # test_some_are_slow.py ã®å†…容 import time def test_funcfast(): pass def test_funcslow1(): time.sleep(0.1) def test_funcslow2(): time.sleep(0.2) .. Now we can profile which test functions execute the slowest:: 次ã«ã‚ˆã†ã«ã—ã¦ã€ã©ã®ãƒ†ã‚¹ãƒˆé–¢æ•°ãŒæœ€ã‚‚é…ã„ã‹ã‚’プロファイルã§ãã¾ã™:: $ py.test --durations=3 =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 3 items test_some_are_slow.py ... ========================= slowest 3 test durations ========================= 0.20s call test_some_are_slow.py::test_funcslow2 0.10s call test_some_are_slow.py::test_funcslow1 0.00s setup test_some_are_slow.py::test_funcslow2 ========================= 3 passed in 0.31 seconds ========================= pytest-2.5.1/doc/ja/example/pythoncollection.py0000664000175000017500000000030712254002202021165 0ustar hpkhpk00000000000000 # run this with $ py.test --collect-only test_collectonly.py # def test_function(): pass class TestClass: def test_method(self): pass def test_anothermethod(self): pass pytest-2.5.1/doc/ja/example/assertion/0000775000175000017500000000000012254002202017225 5ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/example/assertion/test_setup_flow_example.py0000664000175000017500000000234212254002202024541 0ustar hpkhpk00000000000000def setup_module(module): module.TestStateFullThing.classcount = 0 class TestStateFullThing: def setup_class(cls): cls.classcount += 1 def teardown_class(cls): cls.classcount -= 1 def setup_method(self, method): self.id = eval(method.__name__[5:]) def test_42(self): assert self.classcount == 1 assert self.id == 42 def test_23(self): assert self.classcount == 1 assert self.id == 23 def teardown_module(module): assert module.TestStateFullThing.classcount == 0 """ For this example the control flow happens as follows:: import test_setup_flow_example setup_module(test_setup_flow_example) setup_class(TestStateFullThing) instance = TestStateFullThing() setup_method(instance, instance.test_42) instance.test_42() setup_method(instance, instance.test_23) instance.test_23() teardown_class(TestStateFullThing) teardown_module(test_setup_flow_example) Note that ``setup_class(TestStateFullThing)`` is called and not ``TestStateFullThing.setup_class()`` which would require you to insert ``setup_class = classmethod(setup_class)`` to make your setup function callable. """ pytest-2.5.1/doc/ja/example/assertion/global_testmodule_config/0000775000175000017500000000000012254002202024257 5ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/example/assertion/global_testmodule_config/conftest.py0000664000175000017500000000050712254002202026460 0ustar hpkhpk00000000000000import pytest, py mydir = py.path.local(__file__).dirpath() def pytest_runtest_setup(item): if isinstance(item, pytest.Function): if not item.fspath.relto(mydir): return mod = item.getparent(pytest.Module).obj if hasattr(mod, 'hello'): print ("mod.hello %r" % (mod.hello,)) pytest-2.5.1/doc/ja/example/assertion/global_testmodule_config/test_hello.py0000664000175000017500000000005412254002202026772 0ustar hpkhpk00000000000000 hello = "world" def test_func(): pass pytest-2.5.1/doc/ja/example/assertion/test_failures.py0000664000175000017500000000066312254002202022455 0ustar hpkhpk00000000000000 import py failure_demo = py.path.local(__file__).dirpath('failure_demo.py') pytest_plugins = 'pytester', def test_failure_demo_fails_properly(testdir): target = testdir.tmpdir.join(failure_demo.basename) failure_demo.copy(target) failure_demo.copy(testdir.tmpdir.join(failure_demo.basename)) result = testdir.runpytest(target) result.stdout.fnmatch_lines([ "*39 failed*" ]) assert result.ret != 0 pytest-2.5.1/doc/ja/example/assertion/failure_demo.py0000664000175000017500000001101212254002202022225 0ustar hpkhpk00000000000000from py.test import raises import py def otherfunc(a,b): assert a==b def somefunc(x,y): otherfunc(x,y) def otherfunc_multi(a,b): assert (a == b) def test_generative(param1, param2): assert param1 * 2 < param2 def pytest_generate_tests(metafunc): if 'param1' in metafunc.fixturenames: metafunc.addcall(funcargs=dict(param1=3, param2=6)) class TestFailing(object): def test_simple(self): def f(): return 42 def g(): return 43 assert f() == g() def test_simple_multiline(self): otherfunc_multi( 42, 6*9) def test_not(self): def f(): return 42 assert not f() class TestSpecialisedExplanations(object): def test_eq_text(self): assert 'spam' == 'eggs' def test_eq_similar_text(self): assert 'foo 1 bar' == 'foo 2 bar' def test_eq_multiline_text(self): assert 'foo\nspam\nbar' == 'foo\neggs\nbar' def test_eq_long_text(self): a = '1'*100 + 'a' + '2'*100 b = '1'*100 + 'b' + '2'*100 assert a == b def test_eq_long_text_multiline(self): a = '1\n'*100 + 'a' + '2\n'*100 b = '1\n'*100 + 'b' + '2\n'*100 assert a == b def test_eq_list(self): assert [0, 1, 2] == [0, 1, 3] def test_eq_list_long(self): a = [0]*100 + [1] + [3]*100 b = [0]*100 + [2] + [3]*100 assert a == b def test_eq_dict(self): assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2} def test_eq_set(self): assert set([0, 10, 11, 12]) == set([0, 20, 21]) def test_eq_longer_list(self): assert [1,2] == [1,2,3] def test_in_list(self): assert 1 in [0, 2, 3, 4, 5] def test_not_in_text_multiline(self): text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail' assert 'foo' not in text def test_not_in_text_single(self): text = 'single foo line' assert 'foo' not in text def test_not_in_text_single_long(self): text = 'head ' * 50 + 'foo ' + 'tail ' * 20 assert 'foo' not in text def test_not_in_text_single_long_term(self): text = 'head ' * 50 + 'f'*70 + 'tail ' * 20 assert 'f'*70 not in text def test_attribute(): class Foo(object): b = 1 i = Foo() assert i.b == 2 def test_attribute_instance(): class Foo(object): b = 1 assert Foo().b == 2 def test_attribute_failure(): class Foo(object): def _get_b(self): raise Exception('Failed to get attrib') b = property(_get_b) i = Foo() assert i.b == 2 def test_attribute_multiple(): class Foo(object): b = 1 class Bar(object): b = 2 assert Foo().b == Bar().b def globf(x): return x+1 class TestRaises: def test_raises(self): s = 'qwe' raises(TypeError, "int(s)") def test_raises_doesnt(self): raises(IOError, "int('3')") def test_raise(self): raise ValueError("demo error") def test_tupleerror(self): a,b = [1] def test_reinterpret_fails_with_print_for_the_fun_of_it(self): l = [1,2,3] print ("l is %r" % l) a,b = l.pop() def test_some_error(self): if namenotexi: pass def func1(self): assert 41 == 42 # thanks to Matthew Scott for this test def test_dynamic_compile_shows_nicely(): src = 'def foo():\n assert 1 == 0\n' name = 'abc-123' module = py.std.imp.new_module(name) code = py.code.compile(src, name, 'exec') py.builtin.exec_(code, module.__dict__) py.std.sys.modules[name] = module module.foo() class TestMoreErrors: def test_complex_error(self): def f(): return 44 def g(): return 43 somefunc(f(), g()) def test_z1_unpack_error(self): l = [] a,b = l def test_z2_type_error(self): l = 3 a,b = l def test_startswith(self): s = "123" g = "456" assert s.startswith(g) def test_startswith_nested(self): def f(): return "123" def g(): return "456" assert f().startswith(g()) def test_global_func(self): assert isinstance(globf(42), float) def test_instance(self): self.x = 6*7 assert self.x != 42 def test_compare(self): assert globf(10) < 5 def test_try_finally(self): x = 1 try: assert x == 0 finally: x = 0 pytest-2.5.1/doc/ja/example/pythoncollection.txt0000664000175000017500000001247512254002202021365 0ustar hpkhpk00000000000000標準的㪠(Python) テスト探索ã®å¤‰æ›´ ================================== .. Changing standard (Python) test discovery =============================================== .. Changing directory recursion ----------------------------------------------------- ディレクトリã®å†å¸°æŽ¢ç´¢ã®å¤‰æ›´ ---------------------------- .. You can set the :confval:`norecursedirs` option in an ini-file, for example your ``setup.cfg`` in the project root directory:: ini ファイル㧠:confval:`norecursedirs` オプションを設定ã§ãã¾ã™ã€‚例ãˆã°ã€ãƒ—ロジェクトã®ãƒ«ãƒ¼ãƒˆãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã«ã‚ã‚‹ ``setup.cfg`` ã«è¨­å®šã—ã¾ã™:: # setup.cfg ã®å†…容 [pytest] norecursedirs = .svn _build tmp* .. This would tell py.test to not recurse into typical subversion or sphinx-build directories or into any ``tmp`` prefixed directory. ã“れã¯å…¸åž‹çš„㪠subversion 㨠sphinx ã® build ディレクトリ㨠``tmp`` ã¨ã„ã†æŽ¥é ­è¾žã‚’ã‚‚ã¤ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã‚’å†å¸°æŽ¢ç´¢ã—ãªã„設定ã§ã™ã€‚ .. _`change naming conventions`: 命åè¦å‰‡ã®å¤‰æ›´ -------------- .. Changing naming conventions ----------------------------------------------------- .. You can configure different naming conventions by setting the :confval:`python_files`, :confval:`python_classes` and :confval:`python_functions` configuration options. Example:: :confval:`python_files`, :confval:`python_classes`, :confval:`python_functions` オプションを設定ã™ã‚‹ã“ã¨ã§åˆ¥ã®å‘½åè¦å‰‡ã‚’使ã†ã“ã¨ã‚‚ã§ãã¾ã™ã€‚サンプルを紹介ã—ã¾ã™:: # setup.cfg ã®å†…容 # tox.ini ã¾ãŸã¯ pytest.init ファイルã§ã‚‚定義ã§ãã‚‹ [pytest] python_files=check_*.py python_classes=Check python_functions=check .. This would make py.test look for ``check_`` prefixes in Python filenames, ``Check`` prefixes in classes and ``check`` prefixes in functions and classes. For example, if we have:: ã“ã®è¨­å®šã¯ Python ファイルåã« ``check_`` 〠クラスåã« ``Check`` ã€é–¢æ•°åã« ``check`` ã¨ã„ã†æŽ¥é ­è¾žã‚’ py.test ãŒæŽ¢ã™ã‚ˆã†ã«ã—ã¾ã™ã€‚例ãˆã°ã€æ¬¡ã®ã‚ˆã†ãªãƒ•ァイルã§ã™:: # check_myapp.py ã®å†…容 class CheckMyApp: def check_simple(self): pass def check_complex(self): pass .. then the test collection looks like this:: ãƒ†ã‚¹ãƒˆã‚³ãƒ¬ã‚¯ã‚·ãƒ§ãƒ³ã¯æ¬¡ã®ã‚ˆã†ã«ãªã‚Šã¾ã™:: $ py.test --collect-only =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 2 items ============================= in 0.00 seconds ============================= .. Interpreting cmdline arguments as Python packages ----------------------------------------------------- Python パッケージã¨ã—ã¦ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³å¼•数を解釈 ----------------------------------------------- .. You can use the ``--pyargs`` option to make py.test try interpreting arguments as python package names, deriving their file system path and then running the test. For example if you have unittest2 installed you can type:: py.test ãŒãƒ•ァイルシステムã®ãƒ‘スã‹ã‚‰ Python パッケージåã¨ã—ã¦å¼•数を解釈ã™ã‚‹ã‚ˆã†ã« ``--pyargs`` オプションを使ãˆã¾ã™ã€‚例ãˆã°ã€unittest2 をインストール済ã¿ãªã‚‰ã€æ¬¡ã®ã‚ˆã†ã«æŒ‡å®šã§ãã¾ã™:: py.test --pyargs unittest2.test.test_skipping -q .. which would run the respective test module. Like with other options, through an ini-file and the :confval:`addopts` option you can make this change more permanently:: ãれãžã‚Œã®ãƒ†ã‚¹ãƒˆãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã‚’実行ã—ã¾ã™ã€‚ãã®ä»–ã®ã‚ªãƒ—ションã¨åŒæ§˜ã« ini ファイル㨠:confval:`addopts` オプションã«ã‚ˆã‚Šã€ã“ã®å¤‰æ›´ã‚’永続化ã§ãã¾ã™:: # pytest.ini ã®å†…容 [pytest] addopts = --pyargs .. Now a simple invocation of ``py.test NAME`` will check if NAME exists as an importable package/module and otherwise treat it as a filesystem path. å˜ç´”ã« ``py.test NAME`` を実行ã™ã‚‹ã¨ã€NAME ãŒã‚¤ãƒ³ãƒãƒ¼ãƒˆå¯èƒ½ãªãƒ‘ッケージï¼ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã¨ã—ã¦å­˜åœ¨ã—ã¦ã„ã‚‹ã‹ã©ã†ã‹ã‚’ãƒã‚§ãƒƒã‚¯ã—ã¾ã™ã€‚存在ã—ãªã„å ´åˆã€ãƒ•ァイルシステム上ã®ãƒ‘スã¨ã—㦠NAME を扱ã„ã¾ã™ã€‚ .. Finding out what is collected ----------------------------------------------- ã‚³ãƒ¬ã‚¯ã‚·ãƒ§ãƒ³ã®æŽ¢ç´¢ ------------------ .. You can always peek at the collection tree without running tests like this:: 次ã®ã‚ˆã†ã«ãƒ†ã‚¹ãƒˆã‚’実行ã›ãšã«ã‚³ãƒ¬ã‚¯ã‚·ãƒ§ãƒ³ãƒ„リーをピークã§ãã¾ã™:: . $ py.test --collect-only pythoncollection.py =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 3 items ============================= in 0.00 seconds ============================= pytest-2.5.1/doc/ja/example/nonpython.txt0000664000175000017500000001151712254002202020020 0ustar hpkhpk00000000000000 .. _`non-python tests`: Python 以外ã®ãƒ†ã‚¹ãƒˆã‚’扱ㆠ========================= .. Working with non-python tests ==================================================== .. _`yaml plugin`: Yaml ファイルã§ãƒ†ã‚¹ãƒˆã‚’指定ã™ã‚‹åŸºæœ¬çš„ãªã‚µãƒ³ãƒ—ル ----------------------------------------------- .. A basic example for specifying tests in Yaml files -------------------------------------------------------------- .. _`pytest-yamlwsgi`: http://bitbucket.org/aafshar/pytest-yamlwsgi/src/tip/pytest_yamlwsgi.py .. _`PyYAML`: http://pypi.python.org/pypi/PyYAML/ .. Here is an example ``conftest.py`` (extracted from Ali Afshnars special purpose `pytest-yamlwsgi`_ plugin). This ``conftest.py`` will collect ``test*.yml`` files and will execute the yaml-formatted content as custom tests: ``conftest.py`` (Ali Afshnars ã®ç‰¹æ®Šç”¨é€”ã® `pytest-yamlwsgi`_ プラグインã‹ã‚‰å¼•用) ã®ã‚µãƒ³ãƒ—ルを紹介ã—ã¾ã™ã€‚ã“ã® ``conftest.py`` 㯠``test*.yml`` ファイルを探ã—ã¦ãã¦ã€yaml フォーマットã®ã‚³ãƒ³ãƒ†ãƒ³ãƒ„をカスタムテストã¨ã—ã¦å®Ÿè¡Œã—ã¾ã™: .. include:: nonpython/conftest.py :literal: .. You can create a simple example file: ç°¡å˜ãªã‚µãƒ³ãƒ—ルファイルを作æˆã—ã¾ã™: .. include:: nonpython/test_simple.yml :literal: .. and if you installed `PyYAML`_ or a compatible YAML-parser you can now execute the test specification:: `PyYAML`_ ã‹ã€äº’æ›æ€§ã®ã‚ã‚‹ YAML パーサーをインストール済ã¿ãªã‚‰ã€ãã®ãƒ†ã‚¹ãƒˆä»•様を実行ã§ãã¾ã™:: nonpython $ py.test test_simple.yml =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 2 items test_simple.yml .F ================================= FAILURES ================================= ______________________________ usecase: hello ______________________________ usecase execution failed spec failed: 'some': 'other' no further details known at this point. ==================== 1 failed, 1 passed in 0.06 seconds ==================== .. You get one dot for the passing ``sub1: sub1`` check and one failure. Obviously in the above ``conftest.py`` you'll want to implement a more interesting interpretation of the yaml-values. You can easily write your own domain specific testing language this way. ``sub1: sub1`` ã¯æˆåŠŸã—ã¦ãƒ‰ãƒƒãƒˆã‚’1ã¤è¡¨ç¤ºã—ã€ã‚‚ã†1ã¤ã¯å¤±æ•—ã—ã¾ã™ã€‚上述ã—㟠``conftest.py`` ã¯è¨€ã†ã¾ã§ã‚‚ãªãå˜ç´”ãªã®ã§ã€ã‚‚ã£ã¨ãŠã‚‚ã—ã‚ã„ yaml 値を解釈ã™ã‚‹ã‚µãƒ³ãƒ—ルを実装ã—ãŸããªã‚‹ã§ã—ょã†ã€‚ã“ã®ã‚ˆã†ã«ç‹¬è‡ªã®ãƒ‰ãƒ¡ã‚¤ãƒ³å›ºæœ‰ãƒ†ã‚¹ãƒˆè¨€èªžã‚’ç°¡å˜ã«è¨˜è¿°ã§ãã¾ã™ã€‚ .. note:: .. ``repr_failure(excinfo)`` is called for representing test failures. If you create custom collection nodes you can return an error representation string of your choice. It will be reported as a (red) string. ``repr_failure(excinfo)`` ã¯ãƒ†ã‚¹ãƒˆã®å¤±æ•—を表ç¾ã™ã‚‹ãŸã‚ã«å‘¼ã°ã‚Œã¾ã™ã€‚カスタムコレクションã®ãƒŽãƒ¼ãƒ‰ã‚’作æˆã™ã‚‹å ´åˆã€å¥½ããªã‚¨ãƒ©ãƒ¼ã‚’表ç¾ã™ã‚‹æ–‡å­—列を返ã›ã¾ã™ã€‚ãれ㯠(赤ã„) 文字列ã§è¡¨ç¤ºã•れã¾ã™ã€‚ .. ``reportinfo()`` is used for representing the test location and is also consulted when reporting in ``verbose`` mode:: ``reportinfo()`` ã¯ãƒ†ã‚¹ãƒˆã®ä½ç½®ã‚’表ç¾ã—ãŸã‚Šã€ ``verbose`` モードã§ã¯ãƒ¬ãƒãƒ¼ãƒˆæ™‚ã«ã‚‚使ã‚れã¾ã™:: nonpython $ py.test -v =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 -- /home/hpk/venv/0/bin/python collecting ... collected 2 items test_simple.yml:1: usecase: ok PASSED test_simple.yml:1: usecase: hello FAILED ================================= FAILURES ================================= ______________________________ usecase: hello ______________________________ usecase execution failed spec failed: 'some': 'other' no further details known at this point. ==================== 1 failed, 1 passed in 0.06 seconds ==================== .. While developing your custom test collection and execution it's also interesting to just look at the collection tree:: カスタムテストコレクションや実行処ç†ã®é–‹ç™ºä¸­ã€ãã®ã‚³ãƒ¬ã‚¯ã‚·ãƒ§ãƒ³ãƒ„リーをã¡ã‚‡ã£ã¨è¦‹ã‚‹ã®ã‚‚ãŠã‚‚ã—ã‚ã„ã§ã™:: nonpython $ py.test --collect-only =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 2 items ============================= in 0.07 seconds ============================= pytest-2.5.1/doc/ja/xunit_setup.txt0000664000175000017500000001100212254002202016705 0ustar hpkhpk00000000000000.. _xunitsetup: =================================================== æ‹¡å¼µã•れ㟠xUnit スタイルã®ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ—フィクスãƒãƒ£ =================================================== .. ==================================== Extended xUnit style setup fixtures ==================================== .. _`funcargs`: funcargs.html .. _`test parametrization`: funcargs.html#parametrizing-tests .. _`unittest plugin`: plugin/unittest.html .. _`xUnit`: http://en.wikipedia.org/wiki/XUnit .. Python, Java and many other languages support xUnit_ style testing. This typically involves the call of a ``setup`` ("fixture") method before running a test function and ``teardown`` after it has finished. ``py.test`` supports a more fine-grained model of setup/teardown handling by optionally calling per-module and per-class hooks. Pythonã€Java ãŠã‚ˆã³ä»–ã®å¤šãã®è¨€èªžã¯ xUnit_ スタイルã®ãƒ†ã‚¹ãƒˆã«å¯¾å¿œã—ã¦ã„ã¾ã™ã€‚ã“れã¯ãƒ†ã‚¹ãƒˆé–¢æ•°ã®å®Ÿè¡Œå‰ã« ``setup`` ("フィクスãƒãƒ£") メソッドをã€ãƒ†ã‚¹ãƒˆé–¢æ•°ã®å®Ÿè¡Œå¾Œã« ``teardown`` メソッドを呼ã³å‡ºã™å…¸åž‹çš„ãªã‚¹ã‚¿ã‚¤ãƒ«ã§ã™ã€‚ ``py.test`` ã¯ã€ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«å˜ä½ã‚„クラスå˜ä½ã®ãƒ•ックを必è¦ã«å¿œã˜ã¦å‘¼ã³å‡ºã—ã¦å‡¦ç†ã™ã‚‹ã€ã‚ˆã‚Šç´°åˆ†åŒ–ã•れ㟠setup/teardown ã®ãƒ¢ãƒ‡ãƒ«ã«å¯¾å¿œã—ã¦ã„ã¾ã™ã€‚ .. Module level setup/teardown ============================================= モジュールレベル㮠setup/teardown ================================= .. If you have multiple test functions and test classes in a single module you can optionally implement the following fixture methods which will usually be called once for all the functions:: 1ã¤ã®ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã«è¤‡æ•°ã®ãƒ†ã‚¹ãƒˆé–¢æ•°ã‚„テストクラスãŒã‚ã‚‹å ´åˆã€å¿…è¦ã«å¿œã˜ã¦ã€å…¨ã¦ã®é–¢æ•°ã«å¯¾ã—ã¦é€šå¸¸1度ã ã‘呼ã³å‡ºã•れるフィクスãƒãƒ£ãƒ¡ã‚½ãƒƒãƒ‰ã‚’実装ã§ãã¾ã™:: def setup_module(module): """ モジュールã®å®Ÿè¡Œã«é–¢ã—ã¦ä»»æ„ã®çŠ¶æ…‹ã‚’ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ—ã™ã‚‹ """ def teardown_module(module): """ setup_module ã§äº‹å‰ã«ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ—ã—ãŸçŠ¶æ…‹ã‚’è§£ä½“ã™ã‚‹ """ .. Class level setup/teardown ============================================= クラスレベル㮠setup/teardown ============================= .. Similarly, the following methods are called at class level before and after all test methods of the class are called:: åŒæ§˜ã«ã€ã‚¯ãƒ©ã‚¹ã®å…¨ã¦ã®ãƒ†ã‚¹ãƒˆãƒ¡ã‚½ãƒƒãƒ‰ãŒå‘¼ã³å‡ºã•れるå‰å¾Œã«ã€ã‚¯ãƒ©ã‚¹ãƒ¬ãƒ™ãƒ«ã§æ¬¡ã®ãƒ¡ã‚½ãƒƒãƒ‰ãŒå‘¼ã°ã‚Œã¾ã™:: @classmethod def setup_class(cls): """ (通常ã¯ãƒ†ã‚¹ãƒˆã‚’å«ã‚€) クラスã®å®Ÿè¡Œã«é–¢ã—ã¦ä»»æ„ã®çŠ¶æ…‹ã‚’ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ—ã™ã‚‹ """ @classmethod def teardown_class(cls): """ setup_class ã§äº‹å‰ã«ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ—ã—ãŸçŠ¶æ…‹ã‚’è§£ä½“ã™ã‚‹ """ .. Method and function level setup/teardown ============================================= メソッドや関数レベル㮠setup/teardown ===================================== .. Similarly, the following methods are called around each method invocation:: åŒæ§˜ã«ã€ãれãžã‚Œã®ãƒ¡ã‚½ãƒƒãƒ‰å‘¼ã³å‡ºã—ã®å‰å¾Œã§æ¬¡ã®ãƒ¡ã‚½ãƒƒãƒ‰ãŒå‘¼ã°ã‚Œã¾ã™:: def setup_method(self, method): """ クラス内ã®ãƒ¡ã‚½ãƒƒãƒ‰ã®å®Ÿè¡Œã«é–¢ã—ã¦ä»»æ„ã®çŠ¶æ…‹ã‚’ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ—ã™ã‚‹ setup_method ã¯ã‚¯ãƒ©ã‚¹ã®ãƒ†ã‚¹ãƒˆãƒ¡ã‚½ãƒƒãƒ‰å˜ä½ã§å®Ÿè¡Œã•れる """ def teardown_method(self, method): """ setup_method ã§äº‹å‰ã«ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ—ã—ãŸçŠ¶æ…‹ã‚’è§£ä½“ã™ã‚‹ """ .. If you would rather define test functions directly at module level you can also use the following functions to implement fixtures:: モジュールレベルã§ç›´æŽ¥çš„ã«ãƒ†ã‚¹ãƒˆé–¢æ•°ã‚’定義ã—ãŸã„ãªã‚‰ã€æ¬¡ã®é–¢æ•°ã‚‚フィクスãƒãƒ£ã‚’実装ã™ã‚‹ã®ã«ä½¿ãˆã¾ã™:: def setup_function(function): """ 関数ã®å®Ÿè¡Œã«é–¢ã—ã¦ä»»æ„ã®çŠ¶æ…‹ã‚’ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ—ã™ã‚‹ モジュール内ã®é–¢æ•°å˜ä½ã§å®Ÿè¡Œã•れる """ def teardown_function(function): """ setup_function ã§äº‹å‰ã«ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ—ã—ãŸçŠ¶æ…‹ã‚’è§£ä½“ã™ã‚‹ """ .. Note that it is possible for setup/teardown pairs to be invoked multiple times per testing process. テストプロセスã«ã¤ã複数回実行ã•れる setup/teardown ã®çµ„ã¿åˆã‚ã›ã«ä½¿ãˆã‚‹ã“ã¨ã‚‚覚ãˆã¦ãŠã„ã¦ãã ã•ã„。 .. _`unittest.py module`: http://docs.python.org/library/unittest.html pytest-2.5.1/doc/ja/funcargs.txt0000664000175000017500000003667412254002202016154 0ustar hpkhpk00000000000000.. ============================================================== Injecting objects into test functions (funcargs) ============================================================== ========================================== テスト関数 (funcargs) ã«ã‚ªãƒ–ジェクトを注入 ========================================== .. currentmodule:: _pytest.python .. _`funcargs`: .. _`funcarg mechanism`: 関数ã®å¼•数を使ã£ãŸä¾å­˜æ€§ã®æ³¨å…¥ ============================== .. Dependency injection through function arguments ================================================= .. py.test lets you inject objects into test functions and precisely control their life cycle in relation to the test execution. It is also possible to run a test function multiple times with different objects. py.test ã¯ã€ãƒ†ã‚¹ãƒˆé–¢æ•°ã«ã‚ªãƒ–ジェクトを注入ã—ã€ãƒ†ã‚¹ãƒˆã®å®Ÿè¡Œã«é–¢é€£ä»˜ã‘ã¦ãã®ãƒ©ã‚¤ãƒ•サイクルを細ã‹ã制御ã§ãã¾ã™ã€‚ã•らã«åˆ¥ã®ã‚ªãƒ–ジェクトã§ãƒ†ã‚¹ãƒˆé–¢æ•°ã‚’複数回実行ã™ã‚‹ã“ã¨ã‚‚ã§ãã¾ã™ã€‚ .. The basic mechanism for injecting objects is also called the *funcarg mechanism* because objects are ultimately injected by calling a test function with it as an argument. Unlike the classical xUnit approach *funcargs* relate more to `Dependency Injection`_ because they help to de-couple test code from objects required for them to execute. オブジェクトを注入ã™ã‚‹ãŸã‚ã®åŸºæœ¬çš„ãªä»•組ã¿ã¯ *funcarg 機構* ã¨ã‚‚呼ã°ã‚Œã¾ã™ã€‚ã‚る引数ã«å¯¾ã—ã¦ã€ãã®å¼•æ•°ã‚’å—ã‘å–るテスト関数ãŒå‘¼ã°ã‚Œã‚‹ã“ã¨ã§æœ€çµ‚çš„ã«ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆãŒæ³¨å…¥ã•れるã‹ã‚‰ã§ã™ã€‚å¤å…¸çš„㪠xUnit ã®ã‚„り方ã¨ã¯ç•°ãªã‚Š *funcargs* 㯠`ä¾å­˜æ€§ã®æ³¨å…¥`_ ã«å¯†æŽ¥ã«é–¢é€£ã—ãŸã‚‚ã®ã§ã™ã€‚ãã®æ ¹æ‹ ã¯ã€ãƒ†ã‚¹ãƒˆã‚³ãƒ¼ãƒ‰ã‚’実行ã™ã‚‹ãŸã‚ã«å¿…è¦ãªã‚ªãƒ–ジェクトã‹ã‚‰ãƒ†ã‚¹ãƒˆã‚³ãƒ¼ãƒ‰ã‚’分離ã™ã‚‹ã®ã«å½¹ç«‹ã¤ã‹ã‚‰ã§ã™ã€‚ .. _`Dependency injection`: http://en.wikipedia.org/wiki/Dependency_injection .. _`ä¾å­˜æ€§ã®æ³¨å…¥`: http://en.wikipedia.org/wiki/Dependency_injection .. To create a value with which to call a test function a factory function is called which gets full access to the test function context and can register finalizers or invoke lifecycle-caching helpers. The factory can be implemented in same test class or test module, or in a per-directory ``conftest.py`` file or even in an external plugin. This allows full de-coupling of test code and objects needed for test execution. ãƒ†ã‚¹ãƒˆé–¢æ•°ã¸æ¸¡ã•れる値を作æˆã™ã‚‹ãŸã‚ã«ã€ãƒ†ã‚¹ãƒˆé–¢æ•°ã®ã‚³ãƒ³ãƒ†ã‚­ã‚¹ãƒˆã«å¯¾ã—ã¦ãƒ•ルアクセスをもã£ãŸãƒ•ァクトリー関数ãŒå‘¼ã°ã‚Œã¾ã™ã€‚ãã—ã¦ã€ãƒ•ァイナライザーを登録ã—ãŸã‚Šã€ãƒ©ã‚¤ãƒ•サイクルキャッシュヘルパーを実行ã—ã¾ã™ã€‚ファクトリー関数ã¯ã€åŒã˜ãƒ†ã‚¹ãƒˆã‚¯ãƒ©ã‚¹ã‹ãƒ†ã‚¹ãƒˆãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã€ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªæ¯Žã® ``conftest.py`` ファイルã€å¤–部プラグインã§ã‚ã‚ã†ã¨ã€ãã®ã„ãšã‚Œã§ã‚‚実装ã§ãã¾ã™ã€‚ã“れã«ã‚ˆã‚Šã€ãƒ†ã‚¹ãƒˆã®å®Ÿè¡Œã«å¿…è¦ãªãƒ†ã‚¹ãƒˆã‚³ãƒ¼ãƒ‰ã¨ã‚ªãƒ–ジェクトを完全ã«åˆ†é›¢ã§ãã¾ã™ã€‚ .. A test function may be invoked multiple times in which case we speak of :ref:`parametrized testing `. This can be very useful if you want to test e.g. against different database backends or with multiple numerical arguments sets and want to reuse the same set of test functions. テスト関数ã¯ã€ :ref:`パラメーターテスト ` ã§èª¬æ˜Žã—ãŸã‚ˆã†ãªã‚±ãƒ¼ã‚¹ãªã‚‰è¤‡æ•°å›žå‘¼ã³å‡ºã™ã“ã¨ã‚‚ã‚りã¾ã™ã€‚ã“れã¯ã€ä¾‹ãˆã°ã€åˆ¥ã€…ã®ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã®ãƒãƒƒã‚¯ã‚¨ãƒ³ãƒ‰ã€ã¾ãŸã¯è¤‡æ•°ã®æ•°å€¤ã®å¼•数セットをテストã—ãŸã„ã¨ãã‚„ã€ãƒ†ã‚¹ãƒˆé–¢æ•°ã®åŒã˜ã‚»ãƒƒãƒˆã‚’å†åˆ©ç”¨ã—ãŸã„ã¨ã„ã£ãŸã¨ãã«ã¨ã¦ã‚‚便利ã§ã™ã€‚ .. py.test comes with :ref:`builtinfuncargs` and there are some refined usages in the examples section. py.test ã«ã¯ :ref:`builtinfuncargs` ãŒä»˜å±žã—ã¦ã„ã¦ã€ãã®ã‚µãƒ³ãƒ—ルを紹介ã™ã‚‹ç¯€ã«æ´—ç·´ã•れãŸåˆ©ç”¨æ–¹æ³•ãŒã‚りã¾ã™ã€‚ .. _funcarg: åŸºæœ¬çš„ãªæ³¨å…¥ã®ä¾‹ ---------------- .. Basic injection example -------------------------------- .. Let's look at a simple self-contained test module:: ç°¡å˜ãªè‡ªå·±å®Œçµåž‹ã®ãƒ†ã‚¹ãƒˆãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã‚’見ã¦ã¿ã¾ã—ょã†:: # ./test_simplefactory.py ã®å†…容 def pytest_funcarg__myfuncarg(request): return 42 def test_function(myfuncarg): assert myfuncarg == 17 .. This test function needs an injected object named ``myfuncarg``. py.test will discover and call the factory named ``pytest_funcarg__myfuncarg`` within the same module in this case. ã“ã®ãƒ†ã‚¹ãƒˆé–¢æ•°ã¯ ``myfuncarg`` ã¨ã„ã†åå‰ã®ã‚ªãƒ–ジェクトã¸ã®æ³¨å…¥ã‚’å¿…è¦ã¨ã—ã¾ã™ã€‚ã“ã®å ´åˆ py.test ã¯ã€åŒã˜ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«å†…ã® ``pytest_funcarg__myfuncarg`` ã¨ã„ã†ãƒ•ァクトリー関数を見ã¤ã‘ã¦å‘¼ã³å‡ºã—ã¾ã™ã€‚ .. Running the test looks like this:: 次ã®ã‚ˆã†ã«ãƒ†ã‚¹ãƒˆãŒå®Ÿè¡Œã•れã¾ã™:: $ py.test test_simplefactory.py =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 1 items test_simplefactory.py F ================================= FAILURES ================================= ______________________________ test_function _______________________________ myfuncarg = 42 def test_function(myfuncarg): > assert myfuncarg == 17 E assert 42 == 17 test_simplefactory.py:5: AssertionError ========================= 1 failed in 0.01 seconds ========================= .. This means that indeed the test function was called with a ``myfuncarg`` argument value of ``42`` and the assert fails. Here is how py.test comes to call the test function this way: ã“れã¯å®Ÿéš›ã«ãƒ†ã‚¹ãƒˆé–¢æ•°ãŒ ``myfuncarg`` 引数ã®å€¤ãŒ ``42`` ã§å‘¼ã³å‡ºã•れã¦ã€ãã®ã‚¢ã‚µãƒ¼ãƒˆã«å¤±æ•—ã—ã¾ã™ã€‚py.test ãŒã©ã†ã„ã£ãŸæ‰‹é †ã§ãƒ†ã‚¹ãƒˆé–¢æ•°ã‚’呼ã³å‡ºã™ã‹ã‚’説明ã—ã¾ã™: .. 1. py.test :ref:`finds ` the ``test_function`` because of the ``test_`` prefix. The test function needs a function argument named ``myfuncarg``. A matching factory function is discovered by looking for the name ``pytest_funcarg__myfuncarg``. 1. py.test 㯠``test_`` ã¨ã„ã†æŽ¥é ­è¾žã‚’ã‚‚ã¤ ``test_function`` ã‚’ :ref:`探索ã—ã¾ã™ ` 。テスト関数㯠``myfuncarg`` ã¨ã„ã†é–¢æ•°ã®å¼•æ•°ã‚’å¿…è¦ã¨ã—ã¾ã™ã€‚ ``pytest_funcarg__myfuncarg`` ã¨ã„ã†åå‰ã‚’調ã¹ã¦ä¸€è‡´ã™ã‚‹ãƒ•ã‚¡ã‚¯ãƒˆãƒªãƒ¼é–¢æ•°ãŒæ¤œå‡ºã•れã¾ã™ã€‚ .. 2. ``pytest_funcarg__myfuncarg(request)`` is called and returns the value for ``myfuncarg``. 2. ``pytest_funcarg__myfuncarg(request)`` ãŒå‘¼ã³å‡ºã•れ㦠``myfuncarg`` ã®å€¤ã‚’è¿”ã—ã¾ã™ã€‚ .. 3. the test function can now be called: ``test_function(42)``. This results in the above exception because of the assertion mismatch. 3. ãã®ãƒ†ã‚¹ãƒˆé–¢æ•°ã¯ ``test_function(42)`` ã¨ã—ã¦å‘¼ã³å‡ºã•れã¾ã™ã€‚ã“ã®å®Ÿè¡Œçµæžœã¯ã€ã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ãŒä¸ä¸€è‡´ãªã®ã§ä¸Šè¿°ã—ãŸä¾‹å¤–を発生ã•ã›ã¾ã™ã€‚ .. Note that if you misspell a function argument or want to use one that isn't available, you'll see an error with a list of available function arguments. 関数ã®å¼•æ•°ã«èª¤å­—ãŒã‚ã£ãŸã‚Šã€åˆ©ç”¨ã§ããªã„ã‚‚ã®ã‚’使ãŠã†ã¨ã—ãŸã‚‰ã€åˆ©ç”¨ã§ãる関数ã®å¼•æ•°ã®ä¸€è¦§ã¨å…±ã«ã‚¨ãƒ©ãƒ¼ãŒè¡¨ç¤ºã•れるã®ã§æ³¨æ„ã—ã¦ãã ã•ã„。 .. You can always issue:: ã„ã¤ã§ã‚‚次ã®ã‚ˆã†ã«ã—ã¦:: py.test --fixtures test_simplefactory.py .. to see available function arguments (which you can also think of as "resources"). 利用ã§ãる関数ã®å¼•æ•° ("リソース" ã¨ã‚‚見ãªã›ã‚‹) を調ã¹ã‚‰ã‚Œã¾ã™ã€‚ .. _`parametrizing tests, generalized`: http://tetamap.wordpress.com/2009/05/13/parametrizing-python-tests-generalized/ .. _`blog post about the monkeypatch funcarg`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/ .. _`xUnit style`: xunit_setup.html .. _`fixture function`: .. _factory: funcarg **request** オブジェクト ================================ .. The funcarg **request** object ============================================= .. Each fixture function receives a **request** object tied to a specific test function call. A request object is passed to a fixture function and provides access to test configuration and context: funcarg ファクトリー関数ã¯ã€ç‰¹åˆ¥ãªãƒ†ã‚¹ãƒˆé–¢æ•°å‘¼ã³å‡ºã—ã«é–¢é€£ä»˜ã‘られ㟠**request** オブジェクトをå—ã‘å–りã¾ã™ã€‚request オブジェクト㯠funcarg ãƒ•ã‚¡ã‚¯ãƒˆãƒªãƒ¼ã¸æ¸¡ã•れã¦ã€ãƒ†ã‚¹ãƒˆè¨­å®šã¨ã‚³ãƒ³ãƒ†ã‚­ã‚¹ãƒˆã¸ã®ã‚¢ã‚¯ã‚»ã‚¹ã‚’æä¾›ã—ã¾ã™: .. autoclass:: _pytest.python.FixtureRequest() :members: function,cls,module,keywords,config .. _`useful caching and finalization helpers`: .. automethod:: FixtureRequest.addfinalizer .. automethod:: FixtureRequest.cached_setup .. automethod:: FixtureRequest.applymarker .. automethod:: FixtureRequest.getfuncargvalue .. _`test generators`: .. _`parametrizing-tests`: .. _`parametrized test functions`: パラメーター化ã—ãŸãƒ†ã‚¹ãƒˆé–¢æ•°ã®è¤‡æ•°å‘¼ã³å‡ºã— ========================================== .. Parametrizing multiple calls to a test function =========================================================== .. You can parametrize multiple runs of the same test function by adding new test function calls with different function argument values. Let's look at a simple self-contained example: 別ã®é–¢æ•°ã®å¼•æ•°ã®å€¤ã‚’å–ã£ã¦å‘¼ã³å‡ºã™æ–°ãŸãªãƒ†ã‚¹ãƒˆé–¢æ•°ã‚’追加ã™ã‚‹ã“ã¨ã§ã€åŒã˜ãƒ†ã‚¹ãƒˆé–¢æ•°ã«å¯¾ã—ã¦è¤‡æ•°å›žã®å®Ÿè¡Œã‚’パラメーター化ã—ã¦å®Ÿè¡Œã§ãã¾ã™ã€‚ç°¡å˜ãªè‡ªå·±å®Œçµåž‹ã®ã‚µãƒ³ãƒ—ルコードを見ã¦ã¿ã¾ã—ょã†: .. Basic generated test example ---------------------------- テストを生æˆã™ã‚‹åŸºæœ¬çš„ãªä¾‹ -------------------------- .. Let's consider a test module which uses the ``pytest_generate_tests`` hook to generate several calls to the same test function:: åŒã˜ãƒ†ã‚¹ãƒˆé–¢æ•°ã«å¯¾ã™ã‚‹è¤‡æ•°å›žå‘¼ã³å‡ºã—ã«ç”Ÿæˆã™ã‚‹ ``pytest_generate_tests`` フックを使ã†ãƒ†ã‚¹ãƒˆãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã‚’見ã¦ã¿ã¾ã—ょã†:: # test_example.py ã®å†…容 def pytest_generate_tests(metafunc): if "numiter" in metafunc.fixturenames: metafunc.parametrize("numiter", range(10)) def test_func(numiter): assert numiter < 9 .. Running this will generate ten invocations of ``test_func`` passing in each of the items in the list of ``range(10)``:: ã“ã®ã‚µãƒ³ãƒ—ルコードを実行ã™ã‚‹ã¨ ``range(10)`` ã®ãƒªã‚¹ãƒˆã®è¦ç´ ã‚’1ã¤ãšã¤å¼•æ•°ã«æ¸¡ã™ ``test_func`` ã‚’10回実行ã—ã¾ã™:: $ py.test test_example.py =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 10 items test_example.py .........F ================================= FAILURES ================================= _______________________________ test_func[9] _______________________________ numiter = 9 def test_func(numiter): > assert numiter < 9 E assert 9 < 9 test_example.py:6: AssertionError ==================== 1 failed, 9 passed in 0.02 seconds ==================== .. Obviously, only when ``numiter`` has the value of ``9`` does the test fail. Note that the ``pytest_generate_tests(metafunc)`` hook is called during the test collection phase which is separate from the actual test running. Let's just look at what is collected:: 分ã‹ã‚Šã‚„ã™ã„よã†ã« ``numiter`` ã®å€¤ãŒ ``9`` ã®ã¨ãã®ã¿ãƒ†ã‚¹ãƒˆãŒå¤±æ•—ã—ã¾ã™ã€‚ ``pytest_generate_tests(metafunc)`` フックã¯ã€å®Ÿéš›ã«ãƒ†ã‚¹ãƒˆã‚’実行ã™ã‚‹ã¨ãã¨ã¯é•ã†ãƒ•ェーズã®ã€ãƒ†ã‚¹ãƒˆã‚³ãƒ¬ã‚¯ã‚·ãƒ§ãƒ³ã§å‘¼ã°ã‚Œã‚‹ã“ã¨ã«æ³¨æ„ã—ã¦ãã ã•ã„。ã§ã¯ã€ãƒ†ã‚¹ãƒˆã‚³ãƒ¬ã‚¯ã‚·ãƒ§ãƒ³ãŒã©ã†ãªã‚‹ã‹ã‚’ã¡ã‚‡ã£ã¨è¦‹ã¦ã¿ã¾ã—ょã†:: $ py.test --collect-only test_example.py =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 10 items ============================= in 0.00 seconds ============================= .. If you want to select only the run with the value ``7`` you could do:: テスト実行時㫠``7`` ã®å€¤ãŒæ¸¡ã•れるã¨ãã ã‘実行ã—ãŸã„å ´åˆã¯æ¬¡ã®ã‚ˆã†ã«ã—ã¦è¡Œã„ã¾ã™:: $ py.test -v -k 7 test_example.py # ã¾ãŸã¯ -k test_func[7] =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 -- /home/hpk/venv/0/bin/python collecting ... collected 10 items test_example.py:5: test_func[7] PASSED ======================= 9 tests deselected by '-k7' ======================== ================== 1 passed, 9 deselected in 0.01 seconds ================== .. You might want to look at :ref:`more parametrization examples `. :ref:`ã•らã«ãƒ‘ラメーターテストã®ã‚µãƒ³ãƒ—ル ` を見ãŸããªã‚Šã¾ã™ã­ã€‚ .. _`metafunc object`: **metafunc** オブジェクト ------------------------- .. The **metafunc** object ------------------------------------------- .. metafunc objects are passed to the ``pytest_generate_tests`` hook. They help to inspect a testfunction and to generate tests according to test configuration or values specified in the class or module where a test function is defined: metafunc オブジェクト㯠``pytest_generate_tests`` ãƒ•ãƒƒã‚¯ã¸æ¸¡ã•れã¾ã™ã€‚ã“れã¯ãƒ†ã‚¹ãƒˆé–¢æ•°ã‚’検査ã—ãŸã‚Šã€ãƒ†ã‚¹ãƒˆè¨­å®šã¾ãŸã¯ãƒ†ã‚¹ãƒˆé–¢æ•°ãŒå®šç¾©ã•れã¦ã„ã‚‹ã‚¯ãƒ©ã‚¹ã‚„ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã§æŒ‡å®šã•れãŸå€¤ã‚’å–るテストを生æˆã™ã‚‹ã®ã«å½¹ç«‹ã¡ã¾ã™: .. ``metafunc.fixturenames``: set of required function arguments for given function ``metafunc.fixturenames``: ãƒ†ã‚¹ãƒˆé–¢æ•°ã¸æ¸¡ã•れる引数セット .. ``metafunc.function``: underlying python test function ``metafunc.function``: 対象ã¨ãªã‚‹ Python ã®ãƒ†ã‚¹ãƒˆé–¢æ•° .. ``metafunc.cls``: class object where the test function is defined in or None. ``metafunc.cls``: テスト関数ãŒå®šç¾©ã•れã¦ã„ã‚‹ã¨ã“ã‚ã®ã‚¯ãƒ©ã‚¹ã‚ªãƒ–ジェクトã€ã¾ãŸã¯ None .. ``metafunc.module``: the module object where the test function is defined in. ``metafunc.module``: テスト関数ãŒå®šç¾©ã•れã¦ã„ã‚‹ã¨ã“ã‚ã®ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã‚ªãƒ–ジェクト .. ``metafunc.config``: access to command line opts and general config ``metafunc.config``: ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ã‚·ãƒ§ãƒ³ã¨æ±Žç”¨çš„ãªè¨­å®šã‚ªãƒ–ジェクト .. automethod:: Metafunc.parametrize .. automethod:: Metafunc.addcall(funcargs=None,id=_notexists,param=_notexists) pytest-2.5.1/doc/ja/conf.py0000664000175000017500000002210012254002202015055 0ustar hpkhpk00000000000000# -*- coding: utf-8 -*- # # pytest documentation build configuration file, created by # sphinx-quickstart on Fri Oct 8 17:54:28 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.txt' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'contents' # General information about the project. project = u'pytest' copyright = u'2011, holger krekel et alii' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. # The short X.Y version. version = release = "2.2.4.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. language = "ja" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['links.inc', '_build', 'naming20.txt', 'test/*', 'example/attic.txt', ] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinxdoc' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = None # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = "pytest-%s" % release # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} #html_sidebars = {'index': 'indexsidebar.html'} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} #html_additional_pages = {'index': 'index.html'} # If false, no module index is generated. html_domain_indices = True # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'pytestdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('contents', 'pytest.tex', u'pytest Documentation', u'holger krekel et alii', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. latex_domain_indices = False # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('usage', 'pytest', u'pytest usage', [u'holger krekel at merlinux eu'], 1) ] # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'pytest' epub_author = u'holger krekel at merlinux eu' epub_publisher = u'holger krekel at merlinux eu' epub_copyright = u'2011, holger krekel et alii' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # -- Options for texinfo output ------------------------------------------------ texinfo_documents = [ (master_doc, 'pytest', 'pytest Documentation', ('Holger Krekel@*Benjamin Peterson@*Ronny Pfannschmidt@*' 'Floris Bruynooghe@*others'), 'pytest', 'simple powerful testing with Pytho', 'Programming', 1), ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {} # 'http://docs.python.org/': None} def setup(app): #from sphinx.ext.autodoc import cut_lines #app.connect('autodoc-process-docstring', cut_lines(4, what=['module'])) app.add_description_unit('confval', 'confval', objname='configuration value', indextemplate='pair: %s; configuration value') pytest-2.5.1/doc/ja/img/0000775000175000017500000000000012254002202014337 5ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/img/pylib.png0000664000175000017500000002012412254002202016163 0ustar hpkhpk00000000000000‰PNG  IHDRšrë1\gAMA¯È7ŠétEXtSoftwareAdobe ImageReadyqÉe<`PLTE ‰Þòïïl³šœ¢bek<¦ìÔ~  ]cR ÆÆÆ•ÊéÍ!"Lµ==*GR¹¡¬±¸×kkïÉÉiºíš "D;Håä冃ÓÖ×>Eþûû™ÿÿÿž´œqzIDATxÚb`cfƒa6IÅ@ ürrBÌrrbr $$ØYˆA[–KV\–ƒOÈgb11€bà“åä’–e‘—““‘“àg’b` ~ fYYY9&!>!9> 91€š)+/+ cæeÁË@ A9aYF¨‘œr ,ÄÀÏÎ''$''.Ä, Ç$ÇÍ"'ÇË@ ü i&19AiFa ARRòòòÄ &“㔇@ ür¬BìF” AV Ù¼pAZ€Z$&Ç Õ+ ä‘  €và åòÄÀÈPA0-++@ lÜàP““\Àpä– 7Øå„äÀÄ ÅTÍ"ËÍ()''%a 9Fy&Y9 êàúYN €„A"’Œ²L ¬ìB@c䨍Nbr²ü@CùÀÆ@ ¬ŒÀÀ–Z²GœIŽ €d@V‚¬cYÉ$ÇÊÄ@ À€>9 iY.yy.Iyy€b`á”b…xEÄŒi^!^DŠÈË(ò¡æ„„#@1-—“€ŠIÈKÅøˆk°ð—æá”áä PX1CÅ„AÖË(EÉ °@c™è((`g`‚²YyY¤Ž]‚]‚EJ† èfA&&&r&^ `*ã•“c 6>vPÈpC˜¤ä…Øee9Á†0 ðƒ@ˆI€]ˆAŽ €€æ11•HHCQVœK–äfVVV +€JÙé– €€ê„@æ Í—+Î'Ät‡Œ8帙äXä> : pØ ƒL…xê) Bq€ªãÚ+ÀÇÀJò²ò03ãh/0‰ˆÍ  :AHb‘æ”ãÊÊÄÂÉ ^^ ^ 7Ø9%¸€Î  :V°*~v>v^ )y¬ €X˜À¾c`ãàB’`ç !Yy°N€bà9H† l1²Œì( €ÚËÀIΨ6Š‚Ó /4Ó(\XÅ„$P$L¢< ³xxxy$ˆ_†W‚T¢ ©ãáa—“<€ZTÆ Î fZ Í àT,îFs31°H*c`bà&}PЀ 8ù ð $';@ 9)°B bˆ™„Ä€u0WKD“xÅ8ehÔïÀf9~&&`þ’f0v9NV9^)IY.€b“€fCnN~q9^fiiYiaipV`faá–òÄÌ‘U,*‰5+8QrËKq^ i„Ø6˜@1ðÌ«Ìb(ÙTzJJ@ó@1€Ã\†IHHˆ— E,Kò€œ@À:Ô  áEÍZ° ÎÒ @À\*ɨbˆdv&AP.“ ˆi ™ ÙXN Z¿I0Éq`Ìj *„&ÊÍÀÇ)ÄÎ@pӀƢLf°Âd‚–pâRÌÌÌr0kX9„¹µ› ÌP1>Np  €ÀUˆŒ00H0KKJJ ó1±2ƒø\œÀ¬Ï-f AvN¨E |¼à“âAH`g `‹Œ‰TxÈJÊB°fKƒJ7)PÅÇð+P,”%€µ7+(Ÿóñ±ƒ 072CL“ebeâ3¸äÄA"òÀ+2—›Pâ vƒœXˆ ˜= Á)t@ÂMœU¬WhÈ) ÒK,$ÇÎ jÀq2Àjm9vqq -B   ™è&>°±ì,2MOæ9v˜iÜÌ`¿‚\-ŽH°[8„Yø@Å‹(A]®oÙÙYdš„€ Ä§R‚,Òàããå¹J$Á >N&V I¼ìȉƒWH‚ïÂÀ`g H št˜”†d".ˆ˜(ÈɬÀ²Qˆ“ ¨—ƒ[Œؤb‚§?&!`<€J[v€B6MŒO”KÍ3I¨¯Aõ&¤d烗­ply€Â  œ,4ä ¼àR\A€¶,’È­"XNàB€$q`Æbàc  i| BPÓ€‰‡•E˜=ÒàÖ8HU·,ƒÈ s‹±2CŽ”,€næc  iìÀZ(.È ,Y98A)ˆØü–ee09¤Ä…À X|ALeMH¶Öj KÙYhš8P† %sCJY1>H;—U€šG9XNe'`‰Á ,x€9UB€“——EJŠ…— €@QPbâD2‹bLàÌ(.Æ ,,Rš™K¸¤$89M ˆ”4¬¤ pgŽÙe @ø©XLòñ²pÉ㲌Œ(|€bà@Ä 8jXÁ%¶Ðù,ÌÂÀ…< €€%++Ì£bÐÀÌÇÉ"ŒÃX™ÏÆ#Rà p E02€€‰W —õŒ<ð*Tžü<20‹ˆM°â– 8Yq›&Ç,V!±ÃªE€úTj„ 1†Mã”F÷»¬Æ `Ï\zɰÂR='ŽÆÌ4FD-)' I˜ßœB˜€å+$CòKñ²à1 h„„,<>8ÁƈðH@ëT€‚”!LÐÒPœ›I€“o¢áa&aJò@€”<@Mãå,NglF|¦ñ³ C< °Ð2y…˜ €@¦q3±3!•¡Œäü ƒ€i›‹‡‡Ÿâ,^6`?…Q €€(t”¸¶¹¯Oº€YÒ’dd‘`fd”áä„4ˆÔ©aææfà ¤1D Ý iInnfÄX»¤ ,@1ˆƒkiô¦HB˜£LGâÀšX !4k/n>^i€‚˜jåÀŒã–€6íÙ±5‘€© XV  ‰ ±€ê N.€‚™,!03´<UÞˆŽ+j>Nt€,JqÜ4`æçDnp‹)~N99,Ž9\ßÃR²rrÂ4v>p3 ØD,.Å,Ì,%#³–…X‹Rq¨ˆ$‡sr4@X´q lqW‚ l0Ð$AP'Xÿ2 ÚƒÀ¦%¯°p†5ß À@ ,`ÃAM41>a`óKŠIÜ|vÛÄ…ÅX@Lfù¼PæPûA” Í&HÚçä `*%Ì nrpKIBÂr¼Ò`0)0 €Iâ(­R&DÊ5(¤€z âä `Ü21A0°Æˆ,‡¤¥l+sÊHÃ[0Àe¿ ,ªÄ@‰OØœæ$.€b–ì Ìz‹Þ¶›Ì L à†"7"IpÛ›LÐ:Zd'»È‘œ\n²‚M“d``„6d„Á /Pú7$ঠãFÒZ‚Œ @Ú–,L Ó§2P·‚AêUhÓRŽÔìbDÊwÐB˜€Ø…ÄA!'ÆÌ,2Øýä ib|`ÓDA)Ü̵ÿ¸ ¦‚= lió!L÷}€YØ–> GJM i¬PÓ@)SjšÄ«à9¨Ù&kD3€.Ð_ìLLІ%¨y* 8¹l/$NA‘$5C Ì–Y#Ãi‰K“¨Ý ÊHì ,| ìHnæä  iâ &®<¸¥ nòsˆ1€ƒ_X,,€ ξy‘i1I0»Bˆ†?'@Ü&À‰I)1pK\XOó²ÃÛÂ–Š¨…!À‹\|“´  ÅqrÈ4!9ˆi²Œ\†3#¸H†šÆ1 Ü‚h•â@ä PüS°(…$×CMƒ6Éå!Ý(hC]Ò¤•@­BÂÀæ)ð"z˜ `§€B2 –³äa¥!´…éqBKl`ÞáådgG*ÚYY‹‚œ\„l¨Ê–惕p>—8@Ã Ô E-xIX£ƒ‡m8¹Å4pËn˜8åÊK ëeBk¢ƒŠW°ùÂÀF @1€‹dˆi …Òèv p4H£T(2 ¨U…8Ôxf`«’— €ÄYànæ;hµ %ÌÌÌé/ Ô À„'RK A²?¨½Ï$À@@Ÿ# lš$°äưÌgŠBº\¨¦ !õŒ€.V`Ög5‡€ú8ùx¹hš ŸÄ4>P¹*ó9A`—1Ê¡¸ƒðüÏ ê[ wvvpWXLqÄ4)`Nfafv^Pκœ ˜^EA>·Ä¤¤ Ř+¬G* lVÅd€‰‡ƒäN.€bæ01„Àj9 =pÞ–6ûù 1-ìÿÀ­v`׉  K˜9å$¸h°÷ÖO„4¬9À£’BÀz”ð¡£oLœLÀÖ°Y ôxH‹‰صvh¤$x¹èS&`Kš 9Þ!c"¬ & êÛJ°ƒ[ŠRÀF#¸£$j‰€ª@A9v`%Ì, Lš’ÂÒ4M4ú"„’ y!Ý20‡ܽZ.îfI ƒºYÒÌ, Æ(¨¿jÁÁZw4M­ÓÆÁ îg1°ƒÛ\À¶P7°ƒÉÈ%ŒÔS’•– U…h+(gI d~1P$=,Ïy%¤ñõÚPÛ¡©³â˜ß€NÖñ _€¼AJ· €@¦±#ÜÆ ,ö€™XØ ä”bæ’d$¥ (@ÀH\Þ‹¦oи ¨C ìOb7 !ÊÃ/ »ƒ€5*XaQ Lw¤ÂÊË+%‰«QÎŒè[2ƒ»Jˆþ"@³>R4쬼=J\^äç÷NÄ9™Á!¸ÅÄ ÎÏR ìžò1ñáî·ñ#zmÒ`+eyàƒ¹ò,-‘zÎÀJ€Yܦñ2¢w¦á^ pMMµðŠˆWw? Ñ„›h 1x®#ª·ËÏ#!7 €àÍN9¸ñ9 Y/¼ 7 €À±nw²å4 iRcD†@ €$@IT:‹!œÆ,O”i ®,(‰pÂR@KK`‹A\þÁºõø:¨<È©Üû”DŒ9$Ÿ‚úTâ`‘`Æ—ÑyàSÀ8¯$¤Ì¶ p è8èX°ôæ•Â[j 0€=q)˜s¾Š¤åŒ PM'Ä ,½9¹ðP÷€Ÿ‡Wjš8¸;.)@Ó@ÃË ~°_(Ä)LÀ4Fh„òÀâRÜÏe €›& ̪à1P÷‰¯i²Ä%v ,¥ðð²°ñË€™Æ ,µ»”<ÞÁyYð@/Ô4ðhˆ„<8Jh?«;RË¿i"0bç•`šÉ&$Øyx„¤øAS%ò4 ØßV[2BÐÞ0'^Ó@FÊ.Yˆ£@lȈH@1pÛ6œœ¼àá>PØñ ã\VìÌhe ‡0KË3ƒtÓ@1³ )|cRò\@íðôjEƒNX¢ €D8ø¡@†…IXÀù· AEÀæ— D5—47·˜˜  ÐÅ22XT²{\ ËE¸@IUX €ø‘úØÀþ¨@‘@³á@™›öô! ¤Ä íg|@ØÈa’âÖ­à§œ=Bì| *4¨ ŒH.€BqTÕq2À– Š X@v\Æ@Øe·†òù@]9üêÅÁî¶mÙN t§AÌõÐ\Æ‹.¬&™A­Â.ƒLo€ú:B„óƒfø Þ9 €0‹X‡€mh XL3ÈH±òñb #!,.`‚¦q~fan`‚d‘ÁÔÊZ¨oAN d§ Á€0XÙÁvc‰M4”Ë „q€­äƒAvä$,LEÀd$Š/ µXYYÄ‘}!ÎŒÜ)9 €Ýi Dždàã5ÀASœÌRRR`sÅKX˜‰sp1#i€PI„l± +8V}(p°{|àîB jÒ9 €€îg‡Ì´Û¤°Ñ-`K KÂZÚ²Œ’œ@Ó ä¥^æ0y€&d ¢ð—cGÍÔà䆔 €}9 ã`#2 , …ÐiZð kPsj‡¤0ÜÐÁ3a }âÂp²ÀØc—,0µòlØÓ77¸fe”G- @n–q ñOµÀY¸ˆŸCÔ|LHN“…ÍH\.߸nf&PiÀ X–ÅšAÖ Ê À(B9pA’ä5³y¥ hÜdá PÉ jVd§É3CW’Hn ‡pH#œ íõJÁËÊ„¥Ü•U| n:;°ƒîD ŠP¸:^ / ÁFÀÃ, ´@ ,à‚P\ Ùiò¢ìÒH)y1‚¢‘®€ <1/ ‹N1kQp`†¤ d ‹8¢ß) N Xá!ÅŠ¡ W€†JA=_$·G‘¤$Qœ*s«<6·±°¢Œó#d.(šA9@ˆ“šÖÅÀ%Ÿ;dt ä4€‚: ˜Aø$Nƒ¤p—BD!8•³3K"øB @“‡:œªá“uHN óƒK=vp而—€úXoƒ8a³Ï§ÔiÀ@@rš¤84ƒóñ¡;C–Måe98€y@Îü%ƒä8q)HÖ­’äƒvàäÏŠLD Ãj`šg÷ §ÄiÀ´ÊÇ+ƒ”ÖD`:dœ&Èb„Ò‚RP6È٠Ȁä8H1ÏÏÅ .ø ™2® ­ AUØ…è H u/@§ØiÀtLª2È…0²8¤˜Y$à©Ë õ. ¢Œž'%¡.…'j>P6dÖ\ lna«Ø! @éYµºW¤ §,prÊ¡8 XêB‡qàiHR‡Œ4jy K ôŠ p!dòr²ãno€e@Á/rø¥@• ¨2§5€BªÞQ) Zû(Ž9¡ @ 2¼à•hõ€ ¼ÿº%1æªIMòŒ §q¢OÊ2KðÂC l³¯ìœRÒ”%5QX„Æ $$M YQx­Ê‡8Äi l¼ ‰N&,Nãä%Ûiˆ4q¸!Ê.bQÄ! NH™]ª@ §‰CÇÐ@CF¬àµg ¡WXë(²#êp«]Ô‘€–kÂ2ÀìÅî<ƒ;>\âPgE`"@1HÉ ú„Hý\8ƒüøå€Z ›QšK˜Ô׃.1d÷N‘ÑÄ +ÉEß›v‰¥%‰îß ª0¸'éwõ×jÂIEND®B`‚pytest-2.5.1/doc/ja/changelog.txt0000664000175000017500000000020512254002202016250 0ustar hpkhpk00000000000000 .. _changelog: 変更履歴 ======== .. Changelog history ================================= .. include:: ../../CHANGELOG pytest-2.5.1/doc/ja/Makefile0000664000175000017500000001271412254002202015230 0ustar hpkhpk00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest regen: PYTHONDONTWRITEBYTECODE=1 COLUMNS=76 regendoc --update *.txt */*.txt help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* install: html rsync -avz _build/html/ pytest.org:/www/pytest.org/latest-ja installpdf: latexpdf @scp $(BUILDDIR)/latex/pytest.pdf pytest.org:/www/pytest.org/latest installall: clean install installpdf @echo "done" html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pytest.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pytest.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/pytest" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pytest" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." make -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." texinfo: mkdir -p $(BUILDDIR)/texinfo $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: mkdir -p $(BUILDDIR)/texinfo $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." pytest-2.5.1/doc/ja/projects.txt0000664000175000017500000001721612254002202016164 0ustar hpkhpk00000000000000.. _projects: プロジェクトã®ä¾‹ ================ .. Project examples ========================== .. Here are some examples of projects using py.test (please send notes via :ref:`contact`): py.test を使ã£ã¦ã„るプロジェクトを紹介ã—ã¾ã™ (:ref:`contact` を通ã—ã¦ãƒ¡ãƒ¢ã‚’é€ã£ã¦ãã ã•ã„): .. * `PyPy `_, Python with a JIT compiler, running over `16000 tests `_ * the `MoinMoin `_ Wiki Engine * `tox `_, virtualenv/Hudson integration tool * `PIDA `_ framework for integrated development * `PyPM `_ ActiveState's package manager * `Fom `_ a fluid object mapper for FluidDB * `applib `_ cross-platform utilities * `six `_ Python 2 and 3 compatibility utilities * `pediapress `_ MediaWiki articles * `mwlib `_ mediawiki parser and utility library * `The Translate Toolkit `_ for localization and conversion * `execnet `_ rapid multi-Python deployment * `pylib `_ cross-platform path, IO, dynamic code library * `Pacha `_ configuration management in five minutes * `bbfreeze `_ create standalone executables from Python scripts * `pdb++ `_ a fancier version of PDB * `py-s3fuse `_ Amazon S3 FUSE based filesystem * `waskr `_ WSGI Stats Middleware * `guachi `_ global persistent configs for Python modules * `Circuits `_ lightweight Event Driven Framework * `pygtk-helpers `_ easy interaction with PyGTK * `QuantumCore `_ statusmessage and repoze openid plugin * `pydataportability `_ libraries for managing the open web * `XIST `_ extensible HTML/XML generator * `tiddlyweb `_ optionally headless, extensible RESTful datastore * `fancycompleter `_ for colorful tab-completion * `Paludis `_ tools for Gentoo Paludis package manager * `Gerald `_ schema comparison tool * `abjad `_ Python API for Formalized Score control * `bu `_ a microscopic build system * `katcp `_ Telescope communication protocol over Twisted * `kss plugin timer `_ * `PyPy `_: JIT コンパイラーを備ãˆãŸ Python〠`16000 テスト `_ 以上を実行 * `MoinMoin `_: Wiki エンジン * `tox `_: virtualenv/Jenkins インテグレーションツール * `PIDA `_: çµ±åˆé–‹ç™ºãƒ•レームワーク * `PyPM `_: Activestate 社ã®ãƒ‘ッケージマãƒãƒ¼ã‚¸ãƒ£ãƒ¼ * `Fom `_: FluidDB ã® fluid オブジェクトマッパー * `applib `_: クロスプラットフォームユーティリティ * `six `_: Python 2 㨠3 ã®äº’æ›ãƒ¦ãƒ¼ãƒ†ã‚£ãƒªãƒ†ã‚£ * `pediapress `_: MediaWiki ã®è¨˜äº‹ * `mwlib `_: mediawiki ã®ãƒ‘ーサーã¨ãƒ¦ãƒ¼ãƒ†ã‚£ãƒªãƒ†ã‚£ãƒ©ã‚¤ãƒ–ラリ * `The Translate Toolkit `_: ローカライズã¨å¤‰æ› * `execnet `_: 高速㪠multi-Python デプロイ * `pylib `_: クロスプラットフォームã®ãƒ‘スã€IOã€å‹•的コードライブラリ * `Pacha `_: 5分ã§ã§ãã‚‹æ§‹æˆç®¡ç† * `bbfreeze `_: Python スクリプトã‹ã‚‰å˜ç‹¬ã§å®Ÿè¡Œã§ãる実行å¯èƒ½ãƒ•ァイルã®ä½œæˆ * `pdb++ `_: PDB ã®æ‰‹ã®è¾¼ã‚“ã ãƒãƒ¼ã‚¸ãƒ§ãƒ³ * `py-s3fuse `_: Amazon S3 FUSE ベースã®ãƒ•ァイルシステム * `waskr `_: WSGI 統計ミドルウェア * `guachi `_: Python モジュールã®ã‚°ãƒ­ãƒ¼ãƒãƒ«ãªæ°¸ç¶šçš„設定 * `Circuits `_: 軽é‡ã®ã‚¤ãƒ™ãƒ³ãƒˆé§†å‹•型フレームワーク * `pygtk-helpers `_: PyGTK ã«ã‚ˆã‚‹ç°¡å˜æ“作 * `QuantumCore `_: statusmessage 㨠repoze ã® openid プラグイン * `pydataportability `_: open web を管ç†ã™ã‚‹ãŸã‚ã®ãƒ©ã‚¤ãƒ–ラリ * `XIST `_: æ‹¡å¼µå¯èƒ½ãª HTML/XML ジェãƒãƒ¬ãƒ¼ã‚¿ãƒ¼ * `tiddlyweb `_: ãƒ˜ãƒƒãƒ‰ãƒ¬ã‚¹ã‚’é¸æŠžã§ãã‚‹æ‹¡å¼µå¯èƒ½ãª RESTful データストア * `fancycompleter `_: カラフルãªã‚¿ãƒ–補完 * `Paludis `_: Gentoo Paludis パッケージマãƒãƒ¼ã‚¸ãƒ£ãƒ¼ãƒ„ール * `Gerald `_: スキーマ比較ツール * `abjad `_: Formalized Score 制御ã®ãŸã‚ã® Python API * `bu `_: å¾®å°ãƒ“ルドシステム * `katcp `_: Twisted 上㮠Telescope 通信プロトコル * `kss plugin timer `_ .. Some organisations using py.test ----------------------------------- py.test を使ã£ã¦ã„る組織 ------------------------ .. * `Square Kilometre Array, Cape Town `_ * `Some Mozilla QA people `_ use pytest to distribute their Selenium tests * `Tandberg `_ * `Shootq `_ * `Stups department of Heinrich Heine University Duesseldorf `_ * `cellzome `_ * `Open End, Gothenborg `_ * `Laboraratory of Bioinformatics, Warsaw `_ * `merlinux, Germany `_ * many more ... (please be so kind to send a note via :ref:`contact`) * `Square Kilometre Array, Cape Town `_ * `Some Mozilla QA people `_: Selenium テストをé…布ã™ã‚‹ãŸã‚ã« pytest を使用 * `Tandberg `_ * `Shootq `_ * `Stups department of Heinrich Heine University Duesseldorf `_ * `cellzome `_ * `Open End, Gothenborg `_ * `Laboraratory of Bioinformatics, Warsaw `_ * `merlinux, Germany `_ * ã•らã«ãŸãã•ã‚“ ... (:ref:`contact` を通ã—ã¦ãƒ¡ãƒ¢ã‚’é€ã£ã¦ãã ã•ã„) pytest-2.5.1/doc/ja/faq.txt0000664000175000017500000003362112254002202015100 0ustar hpkhpk00000000000000課題ã¨è³ªå• ========== .. Some Issues and Questions ================================== .. If you don't find an answer here, checkout the :ref:`contact channels` to get help. .. note:: ã“ã“ã§ç­”ãˆãŒè¦‹ã¤ã‹ã‚‰ãªã„å ´åˆã¯ :ref:`contact channels` ã‹ã‚‰åŠ©ã‘を求ã‚ã¦ãã ã•ã„。 .. On naming, nosetests, licensing and magic ------------------------------------------------ åå‰ä»˜ã‘ã€nosetestsã€ãƒ©ã‚¤ã‚»ãƒ³ã‚¹ã¨é­”法 ------------------------------------- .. Why a ``py.test`` instead of a ``pytest`` command? ++++++++++++++++++++++++++++++++++++++++++++++++++ ã©ã†ã—ã¦ã‚³ãƒžãƒ³ãƒ‰å㯠``pytest`` ã§ã¯ãªã ``py.test`` ãªã®ï¼Ÿ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. Some of the reasons are historic, others are practical. ``py.test`` used to be part of the ``py`` package which provided several developer utilities, all starting with ``py.``, thus providing nice TAB-completion. If you install ``pip install pycmd`` you get these tools from a separate package. These days the command line tool could be called ``pytest`` but since many people have gotten used to the old name and there is another tool named "pytest" we just decided to stick with ``py.test``. ç†ç”±ã®ä¸€éƒ¨ã¯æ­´å²çš„ãªã‚‚ã®ã§ã€ãれ以外ã¯å®Ÿç”¨ä¸Šã®ã‚‚ã®ã§ã™ã€‚ ``py.test`` ã¯ã€è¤‡æ•°ã®é–‹ç™ºè€…å‘ã‘ユーティリティをæä¾›ã™ã‚‹ ``py`` パッケージã®ä¸€éƒ¨ã¨ã—ã¦ä½¿ã‚れã¦ã„ã¾ã—ãŸã€‚ãれã¯å…¨ã¦ ``py.`` ã§å§‹ã¾ã‚Šã€ã“ã®ã‚ˆã†ã« を補完ã™ã‚‹å„ªã‚ŒãŸæ©Ÿèƒ½ã‚’æä¾›ã—ã¦ã„ã¾ã™ã€‚ ``pip install pycmd`` ã§ã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã—ãŸã‚‰ã€åˆ¥ã€…ã®ãƒ‘ッケージã‹ã‚‰ãã†ã„ã£ãŸãƒ„ールを確èªã§ãã¾ã™ã€‚最近ã«ãªã£ã¦ã€ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ãƒ„ール㯠``pytest`` ã¨å‘¼ã‚“ã§ã„ã¾ã™ãŒã€æ˜”ã‹ã‚‰ã®å¤šãã®äººãŸã¡ãŒå¤ã„åå‰ã«ãªã˜ã‚“ã§ã„㦠"pytest" ã¨ã„ã†åå‰ã¯åˆ¥ãƒ„ãƒ¼ãƒ«ã«æ€ãˆã¾ã™ã€‚ãã®ãŸã‚ã€æˆ‘々㯠``py.test`` ã¨ã„ã†åå‰ã‚’使ã„ç¶šã‘ã‚‹ã“ã¨ã«æ±ºã‚ã¾ã—ãŸã€‚ .. How does py.test relate to nose and unittest? +++++++++++++++++++++++++++++++++++++++++++++++++ py.test 㯠nose ã‚„ unittest ã¨ã©ã‚“ãªé–¢ä¿‚ãŒã‚ã‚‹ã®ï¼Ÿ ++++++++++++++++++++++++++++++++++++++++++++++++++ .. py.test and nose_ share basic philosophy when it comes to running and writing Python tests. In fact, you can run many tests written for nose with py.test. nose_ was originally created as a clone of ``py.test`` when py.test was in the ``0.8`` release cycle. Note that starting with pytest-2.0 support for running unittest test suites is majorly improved and you should be able to run many Django and Twisted test suites without modification. py.test 㨠nose_ ã¯ã€Python テストを書ã„ã¦å®Ÿè¡Œã™ã‚‹ã®ã«åŒã˜åŸºæœ¬ç†å¿µã‚’ã‚‚ã£ã¦ã„ã¾ã™ã€‚ nose_ ã¯ã€ã‚‚ã¨ã‚‚㨠``py.test`` ㌠``0.8`` リリースã®ã¨ãã« py.test ã®ã‚¯ãƒ­ãƒ¼ãƒ³ã¨ã—ã¦ä½œæˆã•れã¾ã—ãŸã€‚pytest 2.0 㯠unittest ã®ãƒ†ã‚¹ãƒˆã‚¹ã‚¤ãƒ¼ãƒˆã‚’実行ã§ãるよã†ã«ãªã£ãŸã®ãŒä¸»ãªæ”¹å–„点ã§ã‚ã‚‹ã“ã¨ã«æ³¨ç›®ã—ã¦ãã ã•ã„。ãã—ã¦ã€å¤šãã® Django ã‚„ Twisted ã®ãƒ†ã‚¹ãƒˆã‚¹ã‚¤ãƒ¼ãƒˆã‚’変更ã›ãšã«å®Ÿè¡Œã§ãã¾ã™ã€‚ .. _features: test/features.html py.test ã® "魔法" ã¯ä¸€ä½“何ãªã®ï¼Ÿ ++++++++++++++++++++++++++++++++ .. What's this "magic" with py.test? ++++++++++++++++++++++++++++++++++++++++++ .. Around 2007 (version ``0.8``) some people claimed that py.test was using too much "magic". Partly this has been fixed by removing unused, deprecated or complicated code. It is today probably one of the smallest, most universally runnable and most customizable testing frameworks for Python. However, ``py.test`` still uses many metaprogramming techniques and reading its source is thus likely not something for Python beginners. 2007å¹´é ƒ (ãƒãƒ¼ã‚¸ãƒ§ãƒ³ ``0.8``)ã€py.test ã¯ã‚ã¾ã‚Šã«ã‚‚多ãã® "魔法" を使ã£ã¦ã„ã‚‹ã¨ä¸»å¼µã™ã‚‹äººãŸã¡ãŒã„ã¾ã—ãŸã€‚未使用ãªã‚³ãƒ¼ãƒ‰ã€éžæŽ¨å¥¨ã€è¤‡é›‘ãªã‚³ãƒ¼ãƒ‰ã‚’削除ã™ã‚‹ã“ã¨ã§éƒ¨åˆ†çš„ã«ã¯è§£æ¶ˆã•れã¾ã—ãŸã€‚今日ã§ã¯ã€py.test ã¯ç¢ºã‹ã« Python å‘ã‘ã®æœ€ã‚‚å°ã•ãæ™®éçš„ã§ã‚«ã‚¹ã‚¿ãƒžã‚¤ã‚ºå¯èƒ½ãªãƒ†ã‚¹ãƒˆãƒ•レームワークã®1ã¤ã§ã™ã€‚但㗠``py.test`` ã¯ã€ã¾ã å¤šãã®ãƒ¡ã‚¿ãƒ—ログラミングテクニックを使ã£ã¦ã„ã¦ã€Python åˆå¿ƒè€…ãŒãã®ã‚½ãƒ¼ã‚¹ã‚’読ã‚ã‚‹ã‚‚ã®ã§ã¯ã‚りã¾ã›ã‚“。 .. A second "magic" issue is arguably the assert statement debugging feature. When loading test modules py.test rewrites the source code of assert statements. When a rewritten assert statement fails, its error message has more information than the original. py.test also has a second assert debugging technique. When an ``assert`` statement that was missed by the rewriter fails, py.test re-interprets the expression to show intermediate values if a test fails. This second technique suffers from a caveat that the rewriting does not: If your expression has side effects (better to avoid them anyway!) the intermediate values may not be the same, confusing the reinterpreter and obfuscating the initial error (this is also explained at the command line if it happens). You can turn off all assertion debugging with ``py.test --assertmode=off``. 2番目㮠"魔法" ã®èª²é¡Œã¯ã€é–“é•ã„ãªã assert æ–‡ã®ãƒ‡ãƒãƒƒã‚°æ©Ÿèƒ½ã§ã™ã€‚テストモジュールãŒèª­ã¿è¾¼ã¾ã‚Œã‚‹ã¨ã€py.test 㯠assert æ–‡ã®ã‚½ãƒ¼ã‚¹ã‚³ãƒ¼ãƒ‰ã‚’æ›¸ãæ›ãˆã¾ã™ã€‚æ›¸ãæ›ãˆã‚‰ã‚ŒãŸ assert æ–‡ãŒå¤±æ•—ã—ãŸã¨ãã€ãã®ã‚¨ãƒ©ãƒ¼ãƒ¡ãƒƒã‚»ãƒ¼ã‚¸ã¯ã€ã‚ªãƒªã‚¸ãƒŠãƒ«ã® assert 文より分ã‹ã‚Šã‚„ã™ã„ã‚‚ã®ã§ã™ã€‚py.test ã«ã‚‚別ã®ãƒ‡ãƒãƒƒã‚°æ‰‹æ³•ãŒã‚りã¾ã™ã€‚æ›¸ãæ›ãˆãŒå¤±æ•—ã™ã‚‹ã“ã¨ã«ã‚ˆã‚Š ``assert`` æ–‡ãŒå¤±æ•—ã—ãŸã¨ãã€py.test ã¯ãƒ†ã‚¹ãƒˆãŒå¤±æ•—ã—ãŸã¨ãã«ä¸­é–“値を表示ã™ã‚‹ãŸã‚ã«ãã®å¼ã‚’å†è§£é‡ˆã—ã¾ã™ã€‚ã“ã®åˆ¥ã®ãƒ‡ãƒãƒƒã‚°æ‰‹æ³•ã¯æ›¸ãæ›ãˆãŒè¡Œã‚れãªã‹ã£ãŸã¨ã„ã†è­¦å‘Šã§æ‚©ã¾ã•れã¾ã™ã€‚ãã®å¼ãŒå‰¯ä½œç”¨ (ã¨ã«ã‹ã触らãªã„ã®ãŒè‰¯ã„ï¼) ã‚’ã‚‚ã¤ãªã‚‰ã€ä¸­é–“値ã¯åŒã˜ã«ãªã‚‰ãªã„å¯èƒ½æ€§ãŒã‚りã¾ã™ã€‚ãれã¯å†è§£é‡ˆã™ã‚‹ã‚¤ãƒ³ã‚¿ãƒ¼ãƒ—リターを混乱ã•ã›ã€åˆæœŸã®ã‚¨ãƒ©ãƒ¼ã‚’分ã‹ã‚Šé›£ãã—ã¾ã™ (ã“れも発生ã—ãŸã‚‰ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã§è¡¨ç¤ºã•れる) 。 ``py.test --assertmode=off`` ã«ã‚ˆã‚Šã€å…¨ã¦ã®ã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ãƒ‡ãƒãƒƒã‚°ã‚’無効ã«ã§ãã¾ã™ã€‚ .. _`py namespaces`: index.html .. _`py/__init__.py`: http://bitbucket.org/hpk42/py-trunk/src/trunk/py/__init__.py 関数ã®å¼•æ•°ã€ãƒ‘ラメーターテストã¨ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ— -------------------------------------------- .. Function arguments, parametrized tests and setup ------------------------------------------------------- .. _funcargs: test/funcargs.html funcarg- 対 xUnit セットアップスタイルã®ç–‘å• ++++++++++++++++++++++++++++++++++++++++++++ .. Is using funcarg- versus xUnit setup a style question? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. For simple applications and for people experienced with nose_ or unittest-style test setup using `xUnit style setup`_ probably feels natural. For larger test suites, parametrized testing or setup of complex test resources using funcargs_ may feel more natural. Moreover, funcargs are ideal for writing advanced test support code (like e.g. the monkeypatch_, the tmpdir_ or capture_ funcargs) because the support code can register setup/teardown functions in a managed class/module/function scope. シンプルãªã‚¢ãƒ—リケーションå‘ã‘ã‚„ã€nose_ ã‹ unittest スタイルã®çµŒé¨“ãŒã‚る人ãŸã¡ã«ã¨ã£ã¦ã¯ã€ãŠãらã :ref:`xunitsetup` ã‚’ä½¿ã†æ–¹ãŒè‡ªç„¶ã«æ„Ÿã˜ã‚‹ã¯ãšã§ã™ã€‚ã—ã‹ã—ã€å·¨å¤§ãªãƒ†ã‚¹ãƒˆã‚¹ã‚¤ãƒ¼ãƒˆå‘ã‘ã§ã¯ã€ãƒ‘ラメーターテストや funcargs_ を使ã£ãŸè¤‡é›‘ãªãƒ†ã‚¹ãƒˆãƒªã‚½ãƒ¼ã‚¹ã®ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ—ã®æ–¹ãŒã‚‚ã£ã¨è‡ªç„¶ã«æ„Ÿã˜ã‚‹ã‹ã‚‚ã—れã¾ã›ã‚“。ã•らã«è¨€ã†ã¨ã€funcargs ã¯é«˜åº¦ãªãƒ†ã‚¹ãƒˆã‚µãƒãƒ¼ãƒˆã‚³ãƒ¼ãƒ‰ (例ãˆã° monkeypatch_, tmpdir_, capture_, funcargs) を書ãã®ã«æœ€é©ã§ã™ã€‚ã¨ã„ã†ã®ã¯ã€ãã®ã‚µãƒãƒ¼ãƒˆã‚³ãƒ¼ãƒ‰ã¯ class/module/function スコープを管ç†ã™ã‚‹ setup/teardown 関数を登録ã§ãã‚‹ã‹ã‚‰ã§ã™ã€‚ .. _monkeypatch: test/plugin/monkeypatch.html .. _tmpdir: test/plugin/tmpdir.html .. _capture: test/plugin/capture.html .. _`why pytest_pyfuncarg__ methods?`: ã©ã†ã—㦠funcarg ファクトリーã®åå‰ã¯ ``pytest_funcarg__*`` ãªã®ï¼Ÿ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. Why the ``pytest_funcarg__*`` name for funcarg factories? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. We like `Convention over Configuration`_ and didn't see much point in allowing a more flexible or abstract mechanism. Moreover, it is nice to be able to search for ``pytest_funcarg__MYARG`` in source code and safely find all factory functions for the ``MYARG`` function argument. 我々㯠`設定よりè¦ç´„`_ を好ã¿ã€ã‚ˆã‚ŠæŸ”è»Ÿã«æŠ½è±¡çš„ãªä»•組ã¿ã‚’許容ã™ã‚‹ã®ã«æ„味ãŒã‚ã‚‹ã¨ã¯æ€ã„ã¾ã›ã‚“ã§ã—ãŸã€‚ã•らã«ã€ã‚½ãƒ¼ã‚¹ã‚³ãƒ¼ãƒ‰å†…ã§ ``pytest_funcarg__MYARG`` を検索ã§ãã‚‹ã®ã¯ä¾¿åˆ©ã§ã€ ``MYARG`` ã¨ã„ã†é–¢æ•°ã®å¼•æ•°ã«å¯¾ã™ã‚‹å…¨ã¦ã®ãƒ•ァクトリー関数を戸惑ã„ãªã探ã›ã¾ã™ã€‚ .. _`Convention over Configuration`: http://en.wikipedia.org/wiki/Convention_over_Configuration .. _`設定よりè¦ç´„`: http://en.wikipedia.org/wiki/Convention_over_Configuration funcarg ファクトリー関数ã‹ã‚‰è¤‡æ•°ã®å€¤ã‚’ yield ã§ãã¾ã™ã‹ï¼Ÿ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. Can I yield multiple values from a fixture function function? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ .. There are two conceptual reasons why yielding from a factory function is not possible: ファクトリー関数㌠yield ã§ããªã„概念上ã®ç†ç”±ãŒ2ã¤ã‚りã¾ã™: .. * Calling factories for obtaining test function arguments is part of setting up and running a test. At that point it is not possible to add new test calls to the test collection anymore. * テスト関数ã®å¼•æ•°ã‚’å–å¾—ã™ã‚‹ãŸã‚ã«ãƒ•ァクトリー関数を呼ã³å‡ºã™ã®ã¯ã€ãƒ†ã‚¹ãƒˆã®è¨­å®šã¨å®Ÿè¡Œã®éƒ¨åˆ†ã§ã™ã€‚ãã®æ™‚点ã§ã¯ã€ãƒ†ã‚¹ãƒˆã‚³ãƒ¬ã‚¯ã‚·ãƒ§ãƒ³ã®ãŸã‚ã«æ–°ãŸãªãƒ†ã‚¹ãƒˆå‘¼ã³å‡ºã—を追加ã§ãã¾ã›ã‚“。 .. * If multiple factories yielded values there would be no natural place to determine the combination policy - in real-world examples some combinations often should not run. * 複数ã®ãƒ•ァクトリー関数ãŒå€¤ã‚’ yield ã—ãŸå ´åˆã€çµ„ã¿åˆã‚ã›æ–¹æ³•を決定ã™ã‚‹ã®ã«é©å½“ãªå ´æ‰€ãŒã‚りã¾ã›ã‚“。ç¾å®Ÿã®ä¸–界ã®ä¾‹ã¯ã€ãã†ã„ã£ãŸçµ„ã¿åˆã‚ã›ãŒå¤šãã®å ´åˆã«å®Ÿè¡Œã•れã¾ã›ã‚“。 .. Use the `pytest_generate_tests`_ hook to solve both issues and implement the `parametrization scheme of your choice`_. 両方ã®èª²é¡Œã‚’解決ã™ã‚‹ãŸã‚ã« `pytest_generate_tests`_ フックを使ã„〠`パラメーター化ã®ä»•組ã¿ã«ã‚ã£ãŸã‚‚ã®ã‚’é¸æŠžã—ã¦`_ 実装ã—ã¦ãã ã•ã„。 .. _`pytest_generate_tests`: test/funcargs.html#parametrizing-tests .. _`parametrization scheme of your choice`: http://tetamap.wordpress.com/2009/05/13/parametrizing-python-tests-generalized/ .. _`パラメーター化ã®ä»•組ã¿ã«ã‚ã£ãŸã‚‚ã®ã‚’é¸æŠžã—ã¦`: http://tetamap.wordpress.com/2009/05/13/parametrizing-python-tests-generalized/ ãã®ä»–ã®ãƒ‘ッケージ㨠py.test ã®ç›¸äº’é€£æº --------------------------------------- .. py.test interaction with other packages --------------------------------------------------- .. Issues with py.test, multiprocess and setuptools? +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ py.test, multiprocess, setuptools ã¨é–¢é€£ã™ã‚‹å•題? ++++++++++++++++++++++++++++++++++++++++++++++++++ .. On windows the multiprocess package will instantiate sub processes by pickling and thus implicitly re-import a lot of local modules. Unfortunately, setuptools-0.6.11 does not ``if __name__=='__main__'`` protect its generated command line script. This leads to infinite recursion when running a test that instantiates Processes. Windows 上㮠multiprocess パッケージã¯ã€pickle 化ã™ã‚‹ã“ã¨ã§ã‚µãƒ–プロセスをインスタンス化ã—ã€æš—黙的ã«ãŸãã•ã‚“ã®ãƒ­ãƒ¼ã‚«ãƒ«ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã‚’å†ã‚¤ãƒ³ãƒãƒ¼ãƒˆã—ã¾ã™ã€‚残念ãªãŒã‚‰ã€setuptools 0.6.11 ãŒä½œæˆã—ãŸã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚¹ã‚¯ãƒªãƒ—ト㯠``if __name__=='__main__'`` ã«ã‚ˆã‚‹ä¿è­·ãŒã‚りã¾ã›ã‚“。ã“れã«ã‚ˆã‚Šã€å®Ÿè¡Œä¸­ã®ãƒ†ã‚¹ãƒˆãŒãƒ—ロセスをインスタンス化ã™ã‚‹ã¨ãã«ç„¡é™å†å¸°ã‚’引ãèµ·ã“ã—ã¾ã™ã€‚ .. A good solution is to `install Distribute`_ as a drop-in replacement for setuptools and then re-install ``pytest``. Otherwise you could fix the script that is created by setuptools by inserting an ``if __name__ == '__main__'``. Or you can create a "pytest.py" script with this content and invoke that with the python version:: 良ã„解決策ã¯ã€setuptools ã®ç½®ãæ›ãˆã¨ã—㦠`distribute をインストールã™ã‚‹`_ ã“ã¨ã§ã™ã€‚ãã®å¾Œã« ``pytest`` ã‚’å†ã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã—ã¾ã™ã€‚åˆ¥ã®æ–¹æ³•ã§ã¯ã€setuptools ãŒä½œæˆã—ãŸã‚¹ã‚¯ãƒªãƒ—ト㫠``if __name__ == '__main__'`` を追加ã—ã¦ä¿®æ­£ã—ã¾ã™ã€‚ã‚‚ã—ãã¯ã€ã“ã®å†…容をå«ã‚€ "pytest.py" スクリプトを作æˆã—ã¦ã€ãã®ã‚¹ã‚¯ãƒªãƒ—トを実行ã—ã¾ã™:: import pytest if __name__ == '__main__': pytest.main() .. _`install distribute`: http://pypi.python.org/pypi/distribute#installation-instructions .. _`distribute をインストールã™ã‚‹`: http://pypi.python.org/pypi/distribute#installation-instructions .. include:: links.inc pytest-2.5.1/doc/ja/contents.txt0000664000175000017500000000120012254002202016152 0ustar hpkhpk00000000000000 .. _toc: pytest ドキュメント =================== .. Full pytest documentation =========================== .. `Download latest version as PDF `_ `PDF ã§æœ€æ–°ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚’ダウンロードã™ã‚‹ `_ .. `Download latest version as EPUB `_ .. `PDF ã§æœ€æ–°ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚’ダウンロードã™ã‚‹ `_ .. toctree:: :maxdepth: 2 overview example/index apiref plugins talks develop announce/index .. toctree:: :hidden: changelog.txt pytest-2.5.1/doc/ja/links.inc0000664000175000017500000000154112254002202015377 0ustar hpkhpk00000000000000 .. _`skipping plugin`: plugin/skipping.html .. _`funcargs mechanism`: funcargs.html .. _`doctest.py`: http://docs.python.org/library/doctest.html .. _`xUnit style setup`: xunit_setup.html .. _`pytest_nose`: plugin/nose.html .. _`reStructured Text`: http://docutils.sourceforge.net .. _`Python debugger`: http://docs.python.org/lib/module-pdb.html .. _nose: http://somethingaboutorange.com/mrl/projects/nose/ .. _pytest: http://pypi.python.org/pypi/pytest .. _mercurial: http://mercurial.selenic.com/wiki/ .. _`setuptools`: http://pypi.python.org/pypi/setuptools .. _`easy_install`: .. _`distribute docs`: .. _`distribute`: http://pypi.python.org/pypi/distribute .. _`pip`: http://pypi.python.org/pypi/pip .. _`virtualenv`: http://pypi.python.org/pypi/virtualenv .. _hudson: http://hudson-ci.org/ .. _jenkins: http://jenkins-ci.org/ .. _tox: http://testrun.org/tox pytest-2.5.1/doc/ja/monkeypatch.txt0000664000175000017500000000733512254002202016656 0ustar hpkhpk00000000000000 .. Monkeypatching/mocking modules and environments ================================================================ モンキーパッãƒï¼ãƒ¢ãƒƒã‚¯ã®ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã¨ç’°å¢ƒ ======================================== .. currentmodule:: _pytest.monkeypatch .. Sometimes tests need to invoke functionality which depends on global settings or which invokes code which cannot be easily tested such as network access. The ``monkeypatch`` function argument helps you to safely set/delete an attribute, dictionary item or environment variable or to modify ``sys.path`` for importing. See the `monkeypatch blog post`_ for some introduction material and a discussion of its motivation. 時々ã€ã‚°ãƒ­ãƒ¼ãƒãƒ«è¨­å®šã«ä¾å­˜ã™ã‚‹æ©Ÿèƒ½ã®ãƒ†ã‚¹ãƒˆã‚’実行ã™ã‚‹ã€ã¾ãŸã¯ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ã‚¢ã‚¯ã‚»ã‚¹ã‚’ä¼´ã†ã‚ˆã†ãªç°¡å˜ã«ãƒ†ã‚¹ãƒˆã§ããªã„コードを実行ã™ã‚‹å¿…è¦ãŒã‚りã¾ã™ã€‚ ``monkeypatch`` ã¨ã„ã†é–¢æ•°ã®å¼•数を使ã†ã“ã¨ã§ã€å±žæ€§ã€ãƒ‡ã‚£ã‚¯ã‚·ãƒ§ãƒŠãƒªã®é …ç›®ã€ç’°å¢ƒå¤‰æ•°ã€ã‚¤ãƒ³ãƒãƒ¼ãƒˆã®ãŸã‚ã® ``sys.path`` ã®å¤‰æ›´ã‚’安全ã«è¿½åŠ ï¼å‰Šé™¤ã™ã‚‹ã®ã‚’支æ´ã—ã¾ã™ã€‚入門記事ã¨ãã®å‹•機付ã‘ã®è­°è«–㯠`monkeypatch ã®ãƒ–ログ記事`_ ã‚’å‚ç…§ã—ã¦ãã ã•ã„。 .. _`monkeypatch blog post`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/ .. _`monkeypatch ã®ãƒ–ログ記事`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/ .. Simple example: monkeypatching functions --------------------------------------------------- ç°¡å˜ãªä¾‹: ãƒ¢ãƒ³ã‚­ãƒ¼ãƒ‘ãƒƒãƒæ©Ÿèƒ½ ---------------------------- .. If you want to pretend that ``os.expanduser`` returns a certain directory, you can use the :py:meth:`monkeypatch.setattr` method to patch this function before calling into a function which uses it:: ``os.expanduser`` ãŒç‰¹å®šã®ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã‚’è¿”ã™ã‚ˆã†ã«ã•ã›ãŸã„å ´åˆã€é–¢æ•°å†…ã§ ``os.expanduser`` ãŒå‘¼ã°ã‚Œã‚‹å‰ã«ã“ã®é–¢æ•°ã¸ãƒ‘ッãƒã‚’当ã¦ã‚‹ãŸã‚ã« :py:meth:`monkeypatch.setattr` メソッドãŒä½¿ãˆã¾ã™:: # test_module.py ã®å†…容 import os.path def getssh(): # 疑似アプリケーションコード return os.path.join(os.path.expanduser("~admin"), '.ssh') def test_mytest(monkeypatch): def mockreturn(path): return '/abc' monkeypatch.setattr(os.path, 'expanduser', mockreturn) x = getssh() assert x == '/abc/.ssh' .. Here our test function monkeypatches ``os.path.expanduser`` and then calls into an function that calls it. After the test function finishes the ``os.path.expanduser`` modification will be undone. ã“ã®ãƒ†ã‚¹ãƒˆé–¢æ•°ã¯ ``os.path.expanduser`` ã«ãƒ¢ãƒ³ã‚­ãƒ¼ãƒ‘ッãƒã‚’当ã¦ãŸå¾Œã§ã€ã‚る関数内ã‹ã‚‰ãã®é–¢æ•°ãŒå‘¼ã°ã‚Œã¾ã™ã€‚ã“ã®ãƒ†ã‚¹ãƒˆé–¢æ•°ãŒçµ‚了ã—ãŸå¾Œã§ ``os.path.expanduser`` ã«å¯¾ã™ã‚‹å¤‰æ›´ã¯å…ƒã«æˆ»ã‚Šã¾ã™ã€‚ .. Method reference of the monkeypatch function argument ----------------------------------------------------- 関数ã®å¼•æ•° monkeypatch ã®ãƒ¡ã‚½ãƒƒãƒ‰ãƒªãƒ•ァレンス --------------------------------------------- .. autoclass:: monkeypatch :members: setattr, delattr, setitem, delitem, setenv, delenv, syspath_prepend, chdir, undo .. ``monkeypatch.setattr/delattr/delitem/delenv()`` all by default raise an Exception if the target does not exist. Pass ``raising=False`` if you want to skip this check. ``monkeypatch.setattr/delattr/delitem/delenv()`` ã®å…¨ã¦ã®é–¢æ•°ã«ãŠã„ã¦ã€å¤‰æ›´å¯¾è±¡ãŒå­˜åœ¨ã—ãªã„å ´åˆã«ãƒ‡ãƒ•ォルトã§ä¾‹å¤–を発生ã•ã›ã¾ã™ã€‚ã“ã®ãƒã‚§ãƒƒã‚¯å‡¦ç†ã‚’スキップã—ãŸã„ãªã‚‰ ``raising=False`` を渡ã—ã¦ãã ã•ã„。 pytest-2.5.1/doc/ja/conftest.py0000664000175000017500000000003512254002202015760 0ustar hpkhpk00000000000000collect_ignore = ["conf.py"] pytest-2.5.1/doc/ja/feedback.rst0000664000175000017500000000031512254002202016040 0ustar hpkhpk00000000000000 What users say: `py.test is pretty much the best thing ever`_ (Alex Gaynor) .. _`py.test is pretty much the best thing ever`_ (Alex Gaynor) http://twitter.com/#!/alex_gaynor/status/22389410366 pytest-2.5.1/doc/ja/apiref.txt0000664000175000017500000000067512254002202015602 0ustar hpkhpk00000000000000 .. _apiref: py.test リファレンスドキュメント ================================ .. py.test reference documentation ================================================ .. toctree:: :maxdepth: 2 builtin.txt customize.txt assert.txt funcargs.txt xunit_setup.txt capture.txt monkeypatch.txt xdist.txt tmpdir.txt skipping.txt mark.txt recwarn.txt unittest.txt nose.txt doctest.txt pytest-2.5.1/doc/ja/pytest.ini0000664000175000017500000000011212254002202015606 0ustar hpkhpk00000000000000[pytest] # just defined to prevent the root level tox.ini from kicking in pytest-2.5.1/doc/ja/capture.txt0000664000175000017500000001645412254002202016001 0ustar hpkhpk00000000000000 .. _`captures`: 標準出力/標準エラーã®ã‚­ãƒ£ãƒ—ãƒãƒ£ =============================== .. Capturing of the stdout/stderr output ========================================================= .. Default stdout/stderr/stdin capturing behaviour --------------------------------------------------------- デフォルト㮠stdout/stderr/stdin ã®ã‚­ãƒ£ãƒ—ãƒãƒ£å‡¦ç† ------------------------------------------------- .. During test execution any output sent to ``stdout`` and ``stderr`` is captured. If a test or a setup method fails its according captured output will usually be shown along with the failure traceback. テストã®å®Ÿè¡Œä¸­ ``stdout`` 㨠``stderr`` ã¸é€ã‚‰ã‚Œã‚‹å…¨ã¦ã®å‡ºåЛ内容ã¯ã‚­ãƒ£ãƒ—ãƒãƒ£ã•れã¾ã™ã€‚テストã¾ãŸã¯ã‚»ãƒƒãƒˆã‚¢ãƒƒãƒ—メソッドãŒå¤±æ•—ã—ãŸå ´åˆã€ãã“ã§ã‚­ãƒ£ãƒ—ãƒãƒ£ã•れãŸå‡ºåŠ›ã¯ã€é€šå¸¸ã€ã‚¨ãƒ©ãƒ¼ãƒˆãƒ¬ãƒ¼ã‚¹ãƒãƒƒã‚¯ã¨ä¸€ç·’ã«è¡¨ç¤ºã•れã¾ã™ã€‚ .. In addition, ``stdin`` is set to a "null" object which will fail on attempts to read from it because it is rarely desired to wait for interactive input when running automated tests. 加ãˆã¦ ``stdin`` ã¯ã€ãã®èª­ã¿è¾¼ã¿ã«å¤±æ•—ã™ã‚‹ "null" オブジェクトãŒã‚»ãƒƒãƒˆã•れã¾ã™ã€‚ãã®ç†ç”±ã¯è‡ªå‹•テストを実行ã™ã‚‹ã¨ãã«å¯¾è©±å¼ã®å…¥åŠ›ã‚’å¾…ã¤ã®ã‚’考慮ã™ã‚‹ã“ã¨ã¯ã»ã¨ã‚“ã©ãªã„ã‹ã‚‰ã§ã™ã€‚ .. By default capturing is done by intercepting writes to low level file descriptors. This allows to capture output from simple print statements as well as output from a subprocess started by a test. デフォルトã®ã‚­ãƒ£ãƒ—ãƒãƒ£ã¯ã€ä½Žãƒ¬ãƒ™ãƒ«ã®ãƒ•ァイルディスクリプタã¸ã®æ›¸ãè¾¼ã¿ã‚’横å–りã—ã¾ã™ã€‚å˜ç´”㪠print æ–‡ã‹ã‚‰ã®å‡ºåŠ›ã‚‚ã€ã‚るテストãŒç”Ÿæˆã—ãŸã‚µãƒ–プロセスã‹ã‚‰ã®å‡ºåŠ›ã‚‚åŒã˜ã‚ˆã†ã«ã‚­ãƒ£ãƒ—ãƒãƒ£ã§ãã¾ã™ã€‚ .. Setting capturing methods or disabling capturing ------------------------------------------------- メソッドをキャプãƒãƒ£ã™ã‚‹ã€ã¾ãŸã¯ç„¡åйã«ã™ã‚‹è¨­å®š ---------------------------------------------- .. There are two ways in which ``py.test`` can perform capturing: ``py.test`` ã§ã‚­ãƒ£ãƒ—ãƒãƒ£ã‚’実行ã™ã‚‹æ–¹æ³•ãŒ2ã¤ã‚りã¾ã™: .. * file descriptor (FD) level capturing (default): All writes going to the operating system file descriptors 1 and 2 will be captured. * ファイルディスクリプタ (FD) レベルã®ã‚­ãƒ£ãƒ—ãƒãƒ£ (デフォルト): オペレーティングシステムã®ãƒ•ァイルディスクリプタ1ã¨2ã¸ã®å…¨ã¦ã®æ›¸ãè¾¼ã¿ã‚’キャプãƒãƒ£ã™ã‚‹ .. * ``sys`` level capturing: Only writes to Python files ``sys.stdout`` and ``sys.stderr`` will be captured. No capturing of writes to filedescriptors is performed. * ``sys`` レベルã®ã‚­ãƒ£ãƒ—ãƒãƒ£: Python ファイル ``sys.stdout`` 㨠``sys.stderr`` ã¸ã®æ›¸ãè¾¼ã¿ã®ã¿ã‚­ãƒ£ãƒ—ãƒãƒ£ã™ã‚‹ã€ãƒ•ァイルディスクリプタã¸ã®æ›¸ãè¾¼ã¿ã¯ã‚­ãƒ£ãƒ—ãƒãƒ£ã—ãªã„ .. You can influence output capturing mechanisms from the command line:: .. _`disable capturing`: コマンドラインã‹ã‚‰å‡ºåЛ内容ã®ã‚­ãƒ£ãƒ—ãƒãƒ£è¨­å®šã‚’制御ã§ãã¾ã™:: py.test -s # å…¨ã¦ã®ã‚­ãƒ£ãƒ—ãƒãƒ£ã‚’無効ã«ã™ã‚‹ py.test --capture=sys # sys.stdout/stderr ã‚’ in-mem ファイルã«ç½®ãæ›ãˆã‚‹ py.test --capture=fd # ファイルディスクリプタ1ã¨2を一時ファイルã«å·®ã—å‘ã‘ã‚‹ .. _printdebugging: デãƒãƒƒã‚°ã« print 文を使ㆠ------------------------- .. Using print statements for debugging --------------------------------------------------- .. One primary benefit of the default capturing of stdout/stderr output is that you can use print statements for debugging:: デフォルト㧠stdout/stderr ã®å‡ºåŠ›ã‚’ã‚­ãƒ£ãƒ—ãƒãƒ£ã™ã‚‹ä¸»ãªåˆ©ç‚¹ã®1ã¤ã¨ã—ã¦ã€ãƒ‡ãƒãƒƒã‚°ã« print æ–‡ãŒä½¿ãˆã¾ã™:: # test_module.py ã®å†…容 def setup_function(function): print ("setting up %s" % function) def test_func1(): assert True def test_func2(): assert False .. and running this module will show you precisely the output of the failing function and hide the other one:: ã“ã®ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã‚’実行ã™ã‚‹ã¨ã€å¤±æ•—ã™ã‚‹ãƒ†ã‚¹ãƒˆé–¢æ•°ã®å‡ºåŠ›ã‚’é©åˆ‡ã«è¡¨ç¤ºã—ã¦ã€æˆåŠŸã™ã‚‹ã‚‚ã†1ã¤ã®ãƒ†ã‚¹ãƒˆã‚’éžè¡¨ç¤ºã«ã—ã¾ã™:: $ py.test =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 2 items test_module.py .F ================================= FAILURES ================================= ________________________________ test_func2 ________________________________ def test_func2(): > assert False E assert False test_module.py:9: AssertionError ----------------------------- Captured stdout ------------------------------ setting up ==================== 1 failed, 1 passed in 0.01 seconds ==================== .. Accessing captured output from a test function --------------------------------------------------- テスト関数ã‹ã‚‰ã‚­ãƒ£ãƒ—ãƒãƒ£ã•れãŸå‡ºåŠ›ã¸ã®ã‚¢ã‚¯ã‚»ã‚¹ ---------------------------------------------- .. The :ref:`funcarg mechanism` allows test function a very easy way to access the captured output by simply using the names ``capsys`` or ``capfd`` in the test function signature. Here is an example test function that performs some output related checks:: :ref:`funcarg mechanism` ã«ã‚ˆã‚Šã€ãƒ†ã‚¹ãƒˆé–¢æ•°ã®ã‚·ã‚°ãƒãƒãƒ£ã« ``capsys`` ã¾ãŸã¯ ``capfd`` ã¨ã„ã†åå‰ã‚’使ã†ã ã‘ã§ã€ç°¡å˜ã«ã‚­ãƒ£ãƒ—ãƒãƒ£ã•れãŸå‡ºåŠ›ã¸ã‚¢ã‚¯ã‚»ã‚¹ã§ãã¾ã™ã€‚次ã«é–¢é€£ã™ã‚‹å€¤ã®ç¢ºèªã‚’行ã†ãƒ†ã‚¹ãƒˆé–¢æ•°ã®ã‚µãƒ³ãƒ—ルを紹介ã—ã¾ã™:: def test_myoutput(capsys): # ã¾ãŸã¯ fd レベル㮠"capfd" を使ㆠprint ("hello") sys.stderr.write("world\n") out, err = capsys.readouterr() assert out == "hello\n" assert err == "world\n" print "next" out, err = capsys.readouterr() assert out == "next\n" .. The ``readouterr()`` call snapshots the output so far - and capturing will be continued. After the test function finishes the original streams will be restored. Using ``capsys`` this way frees your test from having to care about setting/resetting output streams and also interacts well with py.test's own per-test capturing. ``readouterr()`` 呼ã³å‡ºã—ã¯ã€ãã®æ™‚点ã§ã®å‡ºåЛ内容ã®ã‚¹ãƒŠãƒƒãƒ—ショットを返ã—ã€ãã®å¾Œã‚‚キャプãƒãƒ£ãŒç¶šè¡Œã•れã¾ã™ã€‚テスト関数ãŒçµ‚了ã—ãŸå¾Œã€å…ƒã®ã‚¹ãƒˆãƒªãƒ¼ãƒ ãŒå¾©å…ƒã•れã¾ã™ã€‚ ``capsys`` を使ã†ã“ã¨ã§ã€ãƒ†ã‚¹ãƒˆå†…ã§å‡ºåŠ›ã‚¹ãƒˆãƒªãƒ¼ãƒ ã‚’ã‚»ãƒƒãƒˆ/リセットã™ã‚‹ã“ã¨ã«æ³¨æ„を払ã‚ãªãã¦ã‚ˆããªã‚Šã¾ã™ã€‚ã¾ãŸã€pytest ãŒä¿æŒã™ã‚‹ãƒ†ã‚¹ãƒˆå˜ä½ã®ã‚­ãƒ£ãƒ—ãƒãƒ£ã‚‚扱ãˆã¾ã™ã€‚ .. If you want to capture on ``fd`` level you can use the ``capfd`` function argument which offers the exact same interface. ``fd`` レベルã®ã‚­ãƒ£ãƒ—ãƒãƒ£ã‚’行ã†å ´åˆã‚‚å…¨ãåŒã˜ã‚¤ãƒ³ã‚¿ãƒ¼ãƒ•ェースをæä¾›ã™ã‚‹ ``capfd`` ã¨ã„ã†é–¢æ•°ã®å¼•数を使ã„ã¾ã™ã€‚ .. include:: links.inc pytest-2.5.1/doc/ja/doctest.txt0000664000175000017500000000466312254002202016002 0ustar hpkhpk00000000000000 .. Doctest integration for modules and test files ========================================================= モジュールやテストファイル㮠doctest ==================================== .. By default all files matching the ``test*.txt`` pattern will be run through the python standard ``doctest`` module. You can change the pattern by issuing:: デフォルト㧠``test*.txt`` ã®ãƒ‘ターンã«ä¸€è‡´ã™ã‚‹å…¨ã¦ã®ãƒ•ァイルã¯ã€Python 標準㮠``doctest`` モジュールã§å®Ÿè¡Œã•れã¾ã™ã€‚次ã®ã‚ˆã†ã«ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã§ã“ã®ãƒ‘ターンを変更ã§ãã¾ã™:: py.test --doctest-glob='*.rst' .. on the command line. You can also trigger running of doctests from docstrings in all python modules (including regular python test modules):: Python モジュール (通常 python テストモジュールをå«ã‚€) ã® docstring ã‹ã‚‰ã‚‚ doctest を実行ã§ãã¾ã™:: py.test --doctest-modules .. You can make these changes permanent in your project by putting them into a pytest.ini file like this:: 次ã®ã‚ˆã†ã« pytest.ini ã«ãã®è¨­å®šã‚’追加ã™ã‚‹ã“ã¨ã§ã€è‡ªåˆ†ã®ãƒ—ロジェクトã§ãã†ã„ã£ãŸå¤‰æ›´ã‚’永続化ã§ãã¾ã™:: # pytest.ini ã®å†…容 [pytest] addopts = --doctest-modules .. If you then have a text file like this:: 次ã®ã‚ˆã†ãªãƒ†ã‚­ã‚¹ãƒˆãƒ•ァイルãŒå­˜åœ¨ã—ã¦:: # example.rst ã®å†…容 hello this is a doctest >>> x = 3 >>> x 3 .. and another like this:: ä»–ã«ã‚‚次ã®ã‚ˆã†ãªãƒ•ァイルも存在ã™ã‚‹ã¨ã—ã¾ã™:: # mymodule.py ã®å†…容 def something(): """ a doctest in a docstring >>> something() 42 """ return 42 .. then you can just invoke ``py.test`` without command line options:: コマンドラインオプションを指定ã›ãš ``py.test`` を実行ã™ã‚‹ã ã‘ã§ã™:: $ py.test =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 1 items mymodule.py . ========================= 1 passed in 0.02 seconds ========================= .. It is possible to use fixtures using the ``getfixture`` helper:: ãれ㯠``getfixture`` ヘルパーを使ã£ã¦ãƒ•ィクスãƒãƒ£ã‚’使用ã™ã‚‹ã“ã¨ãŒå¯èƒ½ã§ã‚ã‚‹:: # content of example.rst >>> tmp = getfixture('tmpdir') >>> ... pytest-2.5.1/doc/ja/contact.txt0000664000175000017500000000524012254002202015760 0ustar hpkhpk00000000000000 .. _`contact channels`: .. _`contact`: 連絡先 ====== .. Contact channels =================================== .. - `new issue tracker`_ to report bugs or suggest features (for version 2.0 and above). You may also peek at the `old issue tracker`_ but please don't submit bugs there anymore. - ãƒã‚°å ±å‘Šã‚„æ©Ÿèƒ½ææ¡ˆã¯ `新イシュートラッカー`_ を使ã£ã¦ãã ã•ã„ (ãƒãƒ¼ã‚¸ãƒ§ãƒ³ 2.0 以上) 。 `旧イシュートラッカー`_ を覗ãã“ã¨ã‚‚ã‚ã‚‹ã‹ã‚‚ã—れã¾ã›ã‚“ãŒã€ãã“ã«ã¯ç™»éŒ²ã—ãªã„よã†ã«ã—ã¦ãã ã•ã„。 .. - `Testing In Python`_: a mailing list for Python testing tools and discussion. - `Python ã«ãŠã‘るテスト`_ 㯠Python ã®ãƒ†ã‚¹ãƒˆãƒ„ールã¨ãã®è­°è«–ã®ãŸã‚ã®ãƒ¡ãƒ¼ãƒªãƒ³ã‚°ãƒªã‚¹ãƒˆã§ã™ã€‚ .. - `py-dev developers list`_ pytest specific announcements and discussions. - `py-dev 開発者ã®ãƒ¡ãƒ¼ãƒªãƒ³ã‚°ãƒªã‚¹ãƒˆ`_ 㯠pytest ã«ç‰¹åŒ–ã—ãŸã‚¢ãƒŠã‚¦ãƒ³ã‚¹ã¨è­°è«–ã®ãƒ¡ãƒ¼ãƒªãƒ³ã‚°ãƒªã‚¹ãƒˆã§ã™ã€‚ .. - #pylib on irc.freenode.net IRC channel for random questions. - ã‚れã“れèžããŸã‚ã® irc.freenode.net IRC ãƒãƒ£ãƒãƒ«ã¯ #pylib ã§ã™ã€‚ .. - private mail to Holger.Krekel at gmail com if you want to communicate sensitive issues - æ…Žé‡ãªå¯¾å¿œã‚’求ã‚ã‚‹å•題ãŒã‚ã‚‹ãªã‚‰ Holger.Krekel at gmail com å®›ã¸ãƒ¡ãƒ¼ãƒ«ã—ã¦ãã ã•ã„。 .. - `commit mailing list`_ - `コミットメーリングリスト`_ ã§ã™ã€‚ .. - `merlinux.eu`_ offers on-site teaching and consulting services. - `merlinux.eu`_ ã¯ã‚ªãƒ³ã‚µã‚¤ãƒˆæ•™è‚²ã¨ã‚³ãƒ³ã‚µãƒ«ãƒ†ã‚£ãƒ³ã‚°ã‚µãƒ¼ãƒ“スをæä¾›ã—ã¦ã„ã¾ã™ã€‚ .. _`new issue tracker`: http://bitbucket.org/hpk42/pytest/issues/ .. _`新イシュートラッカー`: http://bitbucket.org/hpk42/pytest/issues/ .. _`old issue tracker`: http://bitbucket.org/hpk42/py-trunk/issues/ .. _`旧イシュートラッカー`: http://bitbucket.org/hpk42/py-trunk/issues/ .. _`merlinux.eu`: http://merlinux.eu .. _`get an account`: .. _tetamap: http://tetamap.wordpress.com .. _`@pylibcommit`: http://twitter.com/pylibcommit .. _`Testing in Python`: http://lists.idyll.org/listinfo/testing-in-python .. _`Python ã«ãŠã‘るテスト`: http://lists.idyll.org/listinfo/testing-in-python .. _FOAF: http://en.wikipedia.org/wiki/FOAF .. _`py-dev`: .. _`development mailing list`: .. _`py-dev developers list`: http://codespeak.net/mailman/listinfo/py-dev .. _`py-dev 開発者ã®ãƒ¡ãƒ¼ãƒªãƒ³ã‚°ãƒªã‚¹ãƒˆ`: http://codespeak.net/mailman/listinfo/py-dev .. _`py-svn`: .. _`commit mailing list`: http://codespeak.net/mailman/listinfo/py-svn .. _`コミットメーリングリスト`: http://codespeak.net/mailman/listinfo/py-svn pytest-2.5.1/doc/ja/announce/0000775000175000017500000000000012254002202015371 5ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/announce/release-2.1.3.txt0000664000175000017500000000226512254002202020216 0ustar hpkhpk00000000000000py.test 2.1.3: just some more fixes =========================================================================== pytest-2.1.3 is a minor backward compatible maintenance release of the popular py.test testing tool. It is commonly used for unit, functional- and integration testing. See extensive docs with examples here: http://pytest.org/ The release contains another fix to the perfected assertions introduced with the 2.1 series as well as the new possibility to customize reporting for assertion expressions on a per-directory level. If you want to install or upgrade pytest, just type one of:: pip install -U pytest # or easy_install -U pytest Thanks to the bug reporters and to Ronny Pfannschmidt, Benjamin Peterson and Floris Bruynooghe who implemented the fixes. best, holger krekel Changes between 2.1.2 and 2.1.3 ---------------------------------------- - fix issue79: assertion rewriting failed on some comparisons in boolops, - correctly handle zero length arguments (a la pytest '') - fix issue67 / junitxml now contains correct test durations - fix issue75 / skipping test failure on jython - fix issue77 / Allow assertrepr_compare hook to apply to a subset of tests pytest-2.5.1/doc/ja/announce/index.txt0000664000175000017500000000055212254002202017243 0ustar hpkhpk00000000000000 .. Release announcements =========================================== リリースアナウンス ================== .. toctree:: :maxdepth: 2 release-2.2.4 release-2.2.2 release-2.2.1 release-2.2.0 release-2.1.3 release-2.1.2 release-2.1.1 release-2.1.0 release-2.0.3 release-2.0.2 release-2.0.1 release-2.0.0 pytest-2.5.1/doc/ja/announce/release-2.2.0.txt0000664000175000017500000001034712254002202020214 0ustar hpkhpk00000000000000py.test 2.2.0: test marking++, parametrization++ and duration profiling =========================================================================== pytest-2.2.0 is a test-suite compatible release of the popular py.test testing tool. Plugins might need upgrades. It comes with these improvements: * easier and more powerful parametrization of tests: - new @pytest.mark.parametrize decorator to run tests with different arguments - new metafunc.parametrize() API for parametrizing arguments independently - see examples at http://pytest.org/latest/example/parametrize.html - NOTE that parametrize() related APIs are still a bit experimental and might change in future releases. * improved handling of test markers and refined marking mechanism: - "-m markexpr" option for selecting tests according to their mark - a new "markers" ini-variable for registering test markers for your project - the new "--strict" bails out with an error if using unregistered markers. - see examples at http://pytest.org/latest/example/markers.html * duration profiling: new "--duration=N" option showing the N slowest test execution or setup/teardown calls. This is most useful if you want to find out where your slowest test code is. * also 2.2.0 performs more eager calling of teardown/finalizers functions resulting in better and more accurate reporting when they fail Besides there is the usual set of bug fixes along with a cleanup of pytest's own test suite allowing it to run on a wider range of environments. For general information, see extensive docs with examples here: http://pytest.org/ If you want to install or upgrade pytest you might just type:: pip install -U pytest # or easy_install -U pytest Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner, Daniel Nouri, Alfredo Deza and all who gave feedback or sent bug reports. best, holger krekel notes on incompatibility ------------------------------ While test suites should work unchanged you might need to upgrade plugins: * You need a new version of the pytest-xdist plugin (1.7) for distributing test runs. * Other plugins might need an upgrade if they implement the ``pytest_runtest_logreport`` hook which now is called unconditionally for the setup/teardown fixture phases of a test. You may choose to ignore setup/teardown failures by inserting "if rep.when != 'call': return" or something similar. Note that most code probably "just" works because the hook was already called for failing setup/teardown phases of a test so a plugin should have been ready to grok such reports already. Changes between 2.1.3 and 2.2.0 ---------------------------------------- - fix issue90: introduce eager tearing down of test items so that teardown function are called earlier. - add an all-powerful metafunc.parametrize function which allows to parametrize test function arguments in multiple steps and therefore from independent plugins and places. - add a @pytest.mark.parametrize helper which allows to easily call a test function with different argument values. - Add examples to the "parametrize" example page, including a quick port of Test scenarios and the new parametrize function and decorator. - introduce registration for "pytest.mark.*" helpers via ini-files or through plugin hooks. Also introduce a "--strict" option which will treat unregistered markers as errors allowing to avoid typos and maintain a well described set of markers for your test suite. See examples at http://pytest.org/latest/mark.html and its links. - issue50: introduce "-m marker" option to select tests based on markers (this is a stricter and more predictable version of "-k" in that "-m" only matches complete markers and has more obvious rules for and/or semantics. - new feature to help optimizing the speed of your tests: --durations=N option for displaying N slowest test calls and setup/teardown methods. - fix issue87: --pastebin now works with python3 - fix issue89: --pdb with unexpected exceptions in doctest work more sensibly - fix and cleanup pytest's own test suite to not leak FDs - fix issue83: link to generated funcarg list - fix issue74: pyarg module names are now checked against imp.find_module false positives - fix compatibility with twisted/trial-11.1.0 use cases pytest-2.5.1/doc/ja/announce/release-2.2.1.txt0000664000175000017500000000303112254002202020205 0ustar hpkhpk00000000000000pytest-2.2.1: bug fixes, perfect teardowns =========================================================================== pytest-2.2.1 is a minor backward-compatible release of the the py.test testing tool. It contains bug fixes and little improvements, including documentation fixes. If you are using the distributed testing pluginmake sure to upgrade it to pytest-xdist-1.8. For general information see here: http://pytest.org/ To install or upgrade pytest: pip install -U pytest # or easy_install -U pytest Special thanks for helping on this release to Ronny Pfannschmidt, Jurko Gospodnetic and Ralf Schmitt. best, holger krekel Changes between 2.2.0 and 2.2.1 ---------------------------------------- - fix issue99 (in pytest and py) internallerrors with resultlog now produce better output - fixed by normalizing pytest_internalerror input arguments. - fix issue97 / traceback issues (in pytest and py) improve traceback output in conjunction with jinja2 and cython which hack tracebacks - fix issue93 (in pytest and pytest-xdist) avoid "delayed teardowns": the final test in a test node will now run its teardown directly instead of waiting for the end of the session. Thanks Dave Hunt for the good reporting and feedback. The pytest_runtest_protocol as well as the pytest_runtest_teardown hooks now have "nextitem" available which will be None indicating the end of the test run. - fix collection crash due to unknown-source collected items, thanks to Ralf Schmitt (fixed by depending on a more recent pylib) pytest-2.5.1/doc/ja/announce/release-2.2.4.txt0000664000175000017500000000266012254002202020217 0ustar hpkhpk00000000000000pytest-2.2.4: bug fixes, better junitxml/unittest/python3 compat =========================================================================== pytest-2.2.4 is a minor backward-compatible release of the versatile py.test testing tool. It contains bug fixes and a few refinements to junitxml reporting, better unittest- and python3 compatibility. For general information see here: http://pytest.org/ To install or upgrade pytest: pip install -U pytest # or easy_install -U pytest Special thanks for helping on this release to Ronny Pfannschmidt and Benjamin Peterson and the contributors of issues. best, holger krekel Changes between 2.2.3 and 2.2.4 ----------------------------------- - fix error message for rewritten assertions involving the % operator - fix issue 126: correctly match all invalid xml characters for junitxml binary escape - fix issue with unittest: now @unittest.expectedFailure markers should be processed correctly (you can also use @pytest.mark markers) - document integration with the extended distribute/setuptools test commands - fix issue 140: propperly get the real functions of bound classmethods for setup/teardown_class - fix issue #141: switch from the deceased paste.pocoo.org to bpaste.net - fix issue #143: call unconfigure/sessionfinish always when configure/sessionstart where called - fix issue #144: better mangle test ids to junitxml classnames - upgrade distribute_setup.py to 0.6.27 pytest-2.5.1/doc/ja/announce/release-2.0.0.txt0000664000175000017500000001203712254002202020210 0ustar hpkhpk00000000000000py.test 2.0.0: asserts++, unittest++, reporting++, config++, docs++ =========================================================================== Welcome to pytest-2.0.0, a major new release of "py.test", the rapid easy Python testing tool. There are many new features and enhancements, see below for summary and detailed lists. A lot of long-deprecated code has been removed, resulting in a much smaller and cleaner implementation. See the new docs with examples here: http://pytest.org/2.0.0/index.html A note on packaging: pytest used to part of the "py" distribution up until version py-1.3.4 but this has changed now: pytest-2.0.0 only contains py.test related code and is expected to be backward-compatible to existing test code. If you want to install pytest, just type one of:: pip install -U pytest easy_install -U pytest Many thanks to all issue reporters and people asking questions or complaining. Particular thanks to Floris Bruynooghe and Ronny Pfannschmidt for their great coding contributions and many others for feedback and help. best, holger krekel New Features ----------------------- - new invocations through Python interpreter and from Python:: python -m pytest # on all pythons >= 2.5 or from a python program:: import pytest ; pytest.main(arglist, pluginlist) see http://pytest.org/2.0.0/usage.html for details. - new and better reporting information in assert expressions if comparing lists, sequences or strings. see http://pytest.org/2.0.0/assert.html#newreport - new configuration through ini-files (setup.cfg or tox.ini recognized), for example:: [pytest] norecursedirs = .hg data* # don't ever recurse in such dirs addopts = -x --pyargs # add these command line options by default see http://pytest.org/2.0.0/customize.html - improved standard unittest support. In general py.test should now better be able to run custom unittest.TestCases like twisted trial or Django based TestCases. Also you can now run the tests of an installed 'unittest' package with py.test:: py.test --pyargs unittest - new "-q" option which decreases verbosity and prints a more nose/unittest-style "dot" output. - many many more detailed improvements details Fixes ----------------------- - fix issue126 - introduce py.test.set_trace() to trace execution via PDB during the running of tests even if capturing is ongoing. - fix issue124 - make reporting more resilient against tests opening files on filedescriptor 1 (stdout). - fix issue109 - sibling conftest.py files will not be loaded. (and Directory collectors cannot be customized anymore from a Directory's conftest.py - this needs to happen at least one level up). - fix issue88 (finding custom test nodes from command line arg) - fix issue93 stdout/stderr is captured while importing conftest.py - fix bug: unittest collected functions now also can have "pytestmark" applied at class/module level Important Notes -------------------- * The usual way in pre-2.0 times to use py.test in python code was to import "py" and then e.g. use "py.test.raises" for the helper. This remains valid and is not planned to be deprecated. However, in most examples and internal code you'll find "import pytest" and "pytest.raises" used as the recommended default way. * pytest now first performs collection of the complete test suite before running any test. This changes for example the semantics of when pytest_collectstart/pytest_collectreport are called. Some plugins may need upgrading. * The pytest package consists of a 400 LOC core.py and about 20 builtin plugins, summing up to roughly 5000 LOCs, including docstrings. To be fair, it also uses generic code from the "pylib", and the new "py" package to help with filesystem and introspection/code manipulation. (Incompatible) Removals ----------------------------- - py.test.config is now only available if you are in a test run. - the following (mostly already deprecated) functionality was removed: - removed support for Module/Class/... collection node definitions in conftest.py files. They will cause nothing special. - removed support for calling the pre-1.0 collection API of "run()" and "join" - removed reading option values from conftest.py files or env variables. This can now be done much much better and easier through the ini-file mechanism and the "addopts" entry in particular. - removed the "disabled" attribute in test classes. Use the skipping and pytestmark mechanism to skip or xfail a test class. - py.test.collect.Directory does not exist anymore and it is not possible to provide an own "Directory" object. If you have used this and don't know what to do, get in contact. We'll figure something out. Note that pytest_collect_directory() is still called but any return value will be ignored. This allows to keep old code working that performed for example "py.test.skip()" in collect() to prevent recursion into directory trees if a certain dependency or command line option is missing. see :ref:`changelog` for more detailed changes. pytest-2.5.1/doc/ja/announce/release-2.1.1.txt0000664000175000017500000000305412254002202020211 0ustar hpkhpk00000000000000py.test 2.1.1: assertion fixes and improved junitxml output =========================================================================== pytest-2.1.1 is a backward compatible maintenance release of the popular py.test testing tool. See extensive docs with examples here: http://pytest.org/ Most bug fixes address remaining issues with the perfected assertions introduced with 2.1.0 - many thanks to the bug reporters and to Benjamin Peterson for helping to fix them. Also, junitxml output now produces system-out/err tags which lead to better displays of tracebacks with Jenkins. Also a quick note to package maintainers and others interested: there now is a "pytest" man page which can be generated with "make man" in doc/. If you want to install or upgrade pytest, just type one of:: pip install -U pytest # or easy_install -U pytest best, holger krekel / http://merlinux.eu Changes between 2.1.0 and 2.1.1 ---------------------------------------------- - fix issue64 / pytest.set_trace now works within pytest_generate_tests hooks - fix issue60 / fix error conditions involving the creation of __pycache__ - fix issue63 / assertion rewriting on inserts involving strings containing '%' - fix assertion rewriting on calls with a ** arg - don't cache rewritten modules if bytecode generation is disabled - fix assertion rewriting in read-only directories - fix issue59: provide system-out/err tags for junitxml output - fix issue61: assertion rewriting on boolean operations with 3 or more operands - you can now build a man page with "cd doc ; make man" pytest-2.5.1/doc/ja/announce/release-2.1.2.txt0000664000175000017500000000240512254002202020211 0ustar hpkhpk00000000000000py.test 2.1.2: bug fixes and fixes for jython =========================================================================== pytest-2.1.2 is a minor backward compatible maintenance release of the popular py.test testing tool. pytest is commonly used for unit, functional- and integration testing. See extensive docs with examples here: http://pytest.org/ Most bug fixes address remaining issues with the perfected assertions introduced in the 2.1 series - many thanks to the bug reporters and to Benjamin Peterson for helping to fix them. pytest should also work better with Jython-2.5.1 (and Jython trunk). If you want to install or upgrade pytest, just type one of:: pip install -U pytest # or easy_install -U pytest best, holger krekel / http://merlinux.eu Changes between 2.1.1 and 2.1.2 ---------------------------------------- - fix assertion rewriting on files with windows newlines on some Python versions - refine test discovery by package/module name (--pyargs), thanks Florian Mayer - fix issue69 / assertion rewriting fixed on some boolean operations - fix issue68 / packages now work with assertion rewriting - fix issue66: use different assertion rewriting caches when the -O option is passed - don't try assertion rewriting on Jython, use reinterp pytest-2.5.1/doc/ja/announce/release-2.1.0.txt0000664000175000017500000000415112254002202020207 0ustar hpkhpk00000000000000py.test 2.1.0: perfected assertions and bug fixes =========================================================================== Welcome to the release of pytest-2.1, a mature testing tool for Python, supporting CPython 2.4-3.2, Jython and latest PyPy interpreters. See the improved extensive docs (now also as PDF!) with tested examples here: http://pytest.org/ The single biggest news about this release are **perfected assertions** courtesy of Benjamin Peterson. You can now safely use ``assert`` statements in test modules without having to worry about side effects or python optimization ("-OO") options. This is achieved by rewriting assert statements in test modules upon import, using a PEP302 hook. See http://pytest.org/assert.html#advanced-assertion-introspection for detailed information. The work has been partly sponsored by my company, merlinux GmbH. For further details on bug fixes and smaller enhancements see below. If you want to install or upgrade pytest, just type one of:: pip install -U pytest # or easy_install -U pytest best, holger krekel / http://merlinux.eu Changes between 2.0.3 and 2.1.0 ---------------------------------------------- - fix issue53 call nosestyle setup functions with correct ordering - fix issue58 and issue59: new assertion code fixes - merge Benjamin's assertionrewrite branch: now assertions for test modules on python 2.6 and above are done by rewriting the AST and saving the pyc file before the test module is imported. see doc/assert.txt for more info. - fix issue43: improve doctests with better traceback reporting on unexpected exceptions - fix issue47: timing output in junitxml for test cases is now correct - fix issue48: typo in MarkInfo repr leading to exception - fix issue49: avoid confusing error when initialization partially fails - fix issue44: env/username expansion for junitxml file path - show releaselevel information in test runs for pypy - reworked doc pages for better navigation and PDF generation - report KeyboardInterrupt even if interrupted during session startup - fix issue 35 - provide PDF doc version and download link from index page pytest-2.5.1/doc/ja/announce/release-2.0.3.txt0000664000175000017500000000243412254002202020213 0ustar hpkhpk00000000000000py.test 2.0.3: bug fixes and speed ups =========================================================================== Welcome to pytest-2.0.3, a maintenance and bug fix release of pytest, a mature testing tool for Python, supporting CPython 2.4-3.2, Jython and latest PyPy interpreters. See the extensive docs with tested examples here: http://pytest.org/ If you want to install or upgrade pytest, just type one of:: pip install -U pytest # or easy_install -U pytest There also is a bugfix release 1.6 of pytest-xdist, the plugin that enables seemless distributed and "looponfail" testing for Python. best, holger krekel Changes between 2.0.2 and 2.0.3 ---------------------------------------------- - fix issue38: nicer tracebacks on calls to hooks, particularly early configure/sessionstart ones - fix missing skip reason/meta information in junitxml files, reported via http://lists.idyll.org/pipermail/testing-in-python/2011-March/003928.html - fix issue34: avoid collection failure with "test" prefixed classes deriving from object. - don't require zlib (and other libs) for genscript plugin without --genscript actually being used. - speed up skips (by not doing a full traceback represenation internally) - fix issue37: avoid invalid characters in junitxml's output pytest-2.5.1/doc/ja/announce/release-2.2.2.txt0000664000175000017500000000312712254002202020214 0ustar hpkhpk00000000000000pytest-2.2.2: bug fixes =========================================================================== pytest-2.2.2 (updated to 2.2.3 to fix packaging issues) is a minor backward-compatible release of the versatile py.test testing tool. It contains bug fixes and a few refinements particularly to reporting with "--collectonly", see below for betails. For general information see here: http://pytest.org/ To install or upgrade pytest: pip install -U pytest # or easy_install -U pytest Special thanks for helping on this release to Ronny Pfannschmidt and Ralf Schmitt and the contributors of issues. best, holger krekel Changes between 2.2.1 and 2.2.2 ---------------------------------------- - fix issue101: wrong args to unittest.TestCase test function now produce better output - fix issue102: report more useful errors and hints for when a test directory was renamed and some pyc/__pycache__ remain - fix issue106: allow parametrize to be applied multiple times e.g. from module, class and at function level. - fix issue107: actually perform session scope finalization - don't check in parametrize if indirect parameters are funcarg names - add chdir method to monkeypatch funcarg - fix crash resulting from calling monkeypatch undo a second time - fix issue115: make --collectonly robust against early failure (missing files/directories) - "-qq --collectonly" now shows only files and the number of tests in them - "-q --collectonly" now shows test ids - allow adding of attributes to test reports such that it also works with distributed testing (no upgrade of pytest-xdist needed) pytest-2.5.1/doc/ja/announce/release-2.0.2.txt0000664000175000017500000000535512254002202020217 0ustar hpkhpk00000000000000py.test 2.0.2: bug fixes, improved xfail/skip expressions, speed ups =========================================================================== Welcome to pytest-2.0.2, a maintenance and bug fix release of pytest, a mature testing tool for Python, supporting CPython 2.4-3.2, Jython and latest PyPy interpreters. See the extensive docs with tested examples here: http://pytest.org/ If you want to install or upgrade pytest, just type one of:: pip install -U pytest # or easy_install -U pytest Many thanks to all issue reporters and people asking questions or complaining, particularly Jurko for his insistence, Laura, Victor and Brianna for helping with improving and Ronny for his general advise. best, holger krekel Changes between 2.0.1 and 2.0.2 ---------------------------------------------- - tackle issue32 - speed up test runs of very quick test functions by reducing the relative overhead - fix issue30 - extended xfail/skipif handling and improved reporting. If you have a syntax error in your skip/xfail expressions you now get nice error reports. Also you can now access module globals from xfail/skipif expressions so that this for example works now:: import pytest import mymodule @pytest.mark.skipif("mymodule.__version__[0] == "1") def test_function(): pass This will not run the test function if the module's version string does not start with a "1". Note that specifying a string instead of a boolean expressions allows py.test to report meaningful information when summarizing a test run as to what conditions lead to skipping (or xfail-ing) tests. - fix issue28 - setup_method and pytest_generate_tests work together The setup_method fixture method now gets called also for test function invocations generated from the pytest_generate_tests hook. - fix issue27 - collectonly and keyword-selection (-k) now work together Also, if you do "py.test --collectonly -q" you now get a flat list of test ids that you can use to paste to the py.test commandline in order to execute a particular test. - fix issue25 avoid reported problems with --pdb and python3.2/encodings output - fix issue23 - tmpdir argument now works on Python3.2 and WindowsXP Starting with Python3.2 os.symlink may be supported. By requiring a newer py lib version the py.path.local() implementation acknowledges this. - fixed typos in the docs (thanks Victor Garcia, Brianna Laugher) and particular thanks to Laura Creighton who also revieved parts of the documentation. - fix slighly wrong output of verbose progress reporting for classes (thanks Amaury) - more precise (avoiding of) deprecation warnings for node.Class|Function accesses - avoid std unittest assertion helper code in tracebacks (thanks Ronny) pytest-2.5.1/doc/ja/announce/release-2.0.1.txt0000664000175000017500000000601212254002202020205 0ustar hpkhpk00000000000000py.test 2.0.1: bug fixes =========================================================================== Welcome to pytest-2.0.1, a maintenance and bug fix release of pytest, a mature testing tool for Python, supporting CPython 2.4-3.2, Jython and latest PyPy interpreters. See extensive docs with tested examples here: http://pytest.org/ If you want to install or upgrade pytest, just type one of:: pip install -U pytest # or easy_install -U pytest Many thanks to all issue reporters and people asking questions or complaining. Particular thanks to Floris Bruynooghe and Ronny Pfannschmidt for their great coding contributions and many others for feedback and help. best, holger krekel Changes between 2.0.0 and 2.0.1 ---------------------------------------------- - refine and unify initial capturing so that it works nicely even if the logging module is used on an early-loaded conftest.py file or plugin. - fix issue12 - show plugin versions with "--version" and "--traceconfig" and also document how to add extra information to reporting test header - fix issue17 (import-* reporting issue on python3) by requiring py>1.4.0 (1.4.1 is going to include it) - fix issue10 (numpy arrays truth checking) by refining assertion interpretation in py lib - fix issue15: make nose compatibility tests compatible with python3 (now that nose-1.0 supports python3) - remove somewhat surprising "same-conftest" detection because it ignores conftest.py when they appear in several subdirs. - improve assertions ("not in"), thanks Floris Bruynooghe - improve behaviour/warnings when running on top of "python -OO" (assertions and docstrings are turned off, leading to potential false positives) - introduce a pytest_cmdline_processargs(args) hook to allow dynamic computation of command line arguments. This fixes a regression because py.test prior to 2.0 allowed to set command line options from conftest.py files which so far pytest-2.0 only allowed from ini-files now. - fix issue7: assert failures in doctest modules. unexpected failures in doctests will not generally show nicer, i.e. within the doctest failing context. - fix issue9: setup/teardown functions for an xfail-marked test will report as xfail if they fail but report as normally passing (not xpassing) if they succeed. This only is true for "direct" setup/teardown invocations because teardown_class/ teardown_module cannot closely relate to a single test. - fix issue14: no logging errors at process exit - refinements to "collecting" output on non-ttys - refine internal plugin registration and --traceconfig output - introduce a mechanism to prevent/unregister plugins from the command line, see http://pytest.org/plugins.html#cmdunregister - activate resultlog plugin by default - fix regression wrt yielded tests which due to the collection-before-running semantics were not setup as with pytest 1.3.4. Note, however, that the recommended and much cleaner way to do test parametrization remains the "pytest_generate_tests" mechanism, see the docs. pytest-2.5.1/doc/ja/tmpdir.txt0000664000175000017500000000754612254002202015637 0ustar hpkhpk00000000000000 .. _`tmpdir handling`: 一時ディレクトリã¨ãƒ•ァイル ========================== .. Temporary directories and files ================================================ .. The 'tmpdir' test function argument ----------------------------------- テスト関数ã®å¼•æ•° 'tmpdir' ------------------------- .. You can use the ``tmpdir`` function argument which will provide a temporary directory unique to the test invocation, created in the `base temporary directory`_. ``tmpdir`` ã¨ã„ã†é–¢æ•°ã®å¼•数を一æ„ãªä¸€æ™‚ディレクトリをæä¾›ã™ã‚‹ã®ã«ä½¿ãˆã¾ã™ã€‚ãれ㯠:ref:`base temporary directory` ã«ä½œæˆã•れã¾ã™ã€‚ .. ``tmpdir`` is a `py.path.local`_ object which offers ``os.path`` methods and more. Here is an example test usage:: ``tmpdir`` 㯠``os.path`` メソッドやã•らã«ä»–ã®ãƒ¡ã‚½ãƒƒãƒ‰ã‚’æä¾›ã™ã‚‹ `py.path.local`_ オブジェクトã§ã™ã€‚次ã«ãƒ†ã‚¹ãƒˆã§ã®ä½¿ç”¨ä¾‹ã‚’紹介ã—ã¾ã™:: # test_tmpdir.py ã®å†…容 import os def test_create_file(tmpdir): p = tmpdir.mkdir("sub").join("hello.txt") p.write("content") assert p.read() == "content" assert len(tmpdir.listdir()) == 1 assert 0 .. Running this would result in a passed test except for the last ``assert 0`` line which we use to look at values:: ã“ã®ãƒ†ã‚¹ãƒˆã‚’実行ã™ã‚‹ã¨ã€æœ€çµ‚行㮠``assert 0`` ãŒå¤±æ•—ã—㦠``tmpdir`` ã®å€¤ãŒè¦‹ãˆã¾ã™:: $ py.test test_tmpdir.py =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 1 items test_tmpdir.py F ================================= FAILURES ================================= _____________________________ test_create_file _____________________________ tmpdir = local('/tmp/pytest-23/test_create_file0') def test_create_file(tmpdir): p = tmpdir.mkdir("sub").join("hello.txt") p.write("content") assert p.read() == "content" assert len(tmpdir.listdir()) == 1 > assert 0 E assert 0 test_tmpdir.py:7: AssertionError ========================= 1 failed in 0.02 seconds ========================= .. _`base temporary directory`: デフォルトã®ä¸€æ™‚ディレクトリ ---------------------------- .. The default base temporary directory ----------------------------------------------- .. Temporary directories are by default created as sub-directories of the system temporary directory. The base name will be ``pytest-NUM`` where ``NUM`` will be incremented with each test run. Moreover, entries older than 3 temporary directories will be removed. デフォルトã§ã¯ã€ãƒ†ã‚¹ãƒˆå‘ã‘ã®ä¸€æ™‚ディレクトリã¯ã€ã‚·ã‚¹ãƒ†ãƒ ã®ä¸€æ™‚ディレクトリã®ã‚µãƒ–ディレクトリã¨ã—ã¦ä½œæˆã•れã¾ã™ã€‚基本ã¨ãªã‚‹åå‰ã¯ ``pytest-NUM`` ã¨ãªã‚Š ``NUM`` ã¯ãƒ†ã‚¹ãƒˆãŒå®Ÿè¡Œã•ã‚Œã‚‹åº¦ã«æ•°å­—ãŒå¢—ãˆã¾ã™ã€‚ã¾ãŸã€3世代よりå¤ã„一時ディレクトリã¯å‰Šé™¤ã•れã¾ã™ã€‚ .. You can override the default temporary directory setting like this:: デフォルトã®ä¸€æ™‚ディレクトリã®è¨­å®šã¯æ¬¡ã®ã‚ˆã†ã«æ›¸ãæ›ãˆã‚‰ã‚Œã¾ã™:: py.test --basetemp=mydir .. When distributing tests on the local machine, ``py.test`` takes care to configure a basetemp directory for the sub processes such that all temporary data lands below a single per-test run basetemp directory. ``py.test`` ã¯ã€ãƒ­ãƒ¼ã‚«ãƒ«ãƒžã‚·ãƒ³ä¸Šã§åˆ†æ•£ãƒ†ã‚¹ãƒˆã‚’行ã†ã¨ãã€å…¨ã¦ã®ä¸€æ™‚データ㌠basetemp ディレクトリã®é…下ã§å®Ÿè¡Œã•れã¦ãƒ†ã‚¹ãƒˆæ¯Žã«ä¸€æ„ã«ãªã‚‹ã‚ˆã†ã€ã‚µãƒ–プロセスã«å¯¾ã—ã¦ã‚‚ basetemp ディレクトリをã¡ã‚ƒã‚“ã¨è¨­å®šã—ã¾ã™ã€‚ .. _`py.path.local`: http://py.rtfd.org/path.html pytest-2.5.1/doc/ja/_static/0000775000175000017500000000000012254002202015211 5ustar hpkhpk00000000000000pytest-2.5.1/doc/ja/_static/sphinxdoc.css0000664000175000017500000001402212254002202017721 0ustar hpkhpk00000000000000/* * sphinxdoc.css_t * ~~~~~~~~~~~~~~~ * * Sphinx stylesheet -- sphinxdoc theme. Originally created by * Armin Ronacher for Werkzeug. * * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @import url("basic.css"); /* -- page layout ----------------------------------------------------------- */ body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif; font-size: 1.1em; letter-spacing: -0.01em; line-height: 150%; text-align: center; background-color: #BFD1D4; color: black; padding: 0; border: 1px solid #aaa; margin: 0px 80px 0px 80px; min-width: 740px; } div.document { background-color: white; text-align: left; background-image: url(contents.png); background-repeat: repeat-x; } div.bodywrapper { margin: 0 290px 0 0; border-right: 1px solid #ccc; } div.body { margin: 0; padding: 0.5em 20px 20px 20px; } div.related { font-size: 0.8em; } div.related ul { background-image: url(navigation.png); height: 2em; border-top: 1px solid #ddd; border-bottom: 1px solid #ddd; } div.related ul li { margin: 0; padding: 0; height: 2em; float: left; } div.related ul li.right { float: right; margin-right: 5px; } div.related ul li a { margin: 0; padding: 0 5px 0 5px; line-height: 1.75em; color: #EE9816; } div.related ul li a:hover { color: #3CA8E7; } div.sphinxsidebarwrapper { padding: 0; } div.sphinxsidebar { margin: 0; padding: 0.5em 15px 15px 0; width: 260px; float: right; font-size: 1em; text-align: left; } div.sphinxsidebar h3, div.sphinxsidebar h4 { margin: 1em 0 0.5em 0; font-size: 1em; padding: 0.1em 0 0.1em 0.5em; color: white; border: 1px solid #86989B; background-color: #AFC1C4; } div.sphinxsidebar h3 a { color: white; } div.sphinxsidebar ul { padding-left: 1.5em; margin-top: 7px; padding: 0; line-height: 130%; } div.sphinxsidebar ul ul { margin-left: 20px; } div.sphinxsidebar #searchbox input[type="submit"] { width: 55px; } div.footer { background-color: #E3EFF1; color: #86989B; padding: 3px 8px 3px 0; clear: both; font-size: 0.8em; text-align: right; } div.footer a { color: #86989B; text-decoration: underline; } /* -- body styles ----------------------------------------------------------- */ p { margin: 0.8em 0 0.5em 0; } a { color: #CA7900; text-decoration: none; } a:hover { color: #2491CF; } div.body a { text-decoration: underline; } h1 { margin: 0; padding: 0.7em 0 0.3em 0; font-size: 1.5em; color: #11557C; } h2 { margin: 1.3em 0 0.2em 0; font-size: 1.35em; padding: 0; } h3 { margin: 1em 0 -0.3em 0; font-size: 1.2em; } div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a { color: black!important; } h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { display: none; margin: 0 0 0 0.3em; padding: 0 0.2em 0 0.2em; color: #aaa!important; } h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, h5:hover a.anchor, h6:hover a.anchor { display: inline; } h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, h5 a.anchor:hover, h6 a.anchor:hover { color: #777; background-color: #eee; } a.headerlink { color: #c60f0f!important; font-size: 1em; margin-left: 6px; padding: 0 4px 0 4px; text-decoration: none!important; } a.headerlink:hover { background-color: #ccc; color: white!important; } cite, code, tt { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.01em; } tt { background-color: #f2f2f2; border-bottom: 1px solid #ddd; color: #333; } tt.descname, tt.descclassname, tt.xref { border: 0; } hr { border: 1px solid #abc; margin: 2em; } a tt { border: 0; color: #CA7900; } a tt:hover { color: #2491CF; } pre { font-family: 'Consolas', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; font-size: 0.95em; letter-spacing: 0.015em; line-height: 120%; padding: 0.5em; border: 1px solid #ccc; background-color: #f8f8f8; } pre a { color: inherit; text-decoration: underline; } td.linenos pre { padding: 0.5em 0; } div.quotebar { background-color: #f8f8f8; max-width: 250px; float: right; padding: 2px 7px; border: 1px solid #ccc; } div.topic { background-color: #f8f8f8; } table { border-collapse: collapse; margin: 0 -0.5em 0 -0.5em; } table td, table th { padding: 0.2em 0.5em 0.2em 0.5em; } div.admonition, div.warning { font-size: 0.9em; margin: 1em 0 1em 0; border: 1px solid #86989B; background-color: #f7f7f7; padding: 0; } div.admonition p, div.warning p { margin: 0.5em 1em 0.5em 1em; padding: 0; } div.admonition pre, div.warning pre { margin: 0.4em 1em 0.4em 1em; } div.admonition p.admonition-title, div.warning p.admonition-title { margin: 0; padding: 0.1em 0 0.1em 0.5em; color: white; border-bottom: 1px solid #86989B; font-weight: bold; background-color: #AFC1C4; } div.warning { border: 1px solid #940000; } div.warning p.admonition-title { background-color: #CF0000; border-bottom-color: #940000; } div.admonition ul, div.admonition ol, div.warning ul, div.warning ol { margin: 0.1em 0.5em 0.5em 3em; padding: 0; } div.versioninfo { margin: 1em 0 0 0; border: 1px solid #ccc; background-color: #DDEAF0; padding: 8px; line-height: 1.3em; font-size: 0.9em; } .viewcode-back { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', sans-serif; } div.viewcode-block:target { background-color: #f4debf; border-top: 1px solid #ac9; border-bottom: 1px solid #ac9; } pytest-2.5.1/doc/ja/customize.txt0000664000175000017500000001754312254002202016360 0ustar hpkhpk00000000000000.. Basic test configuration =================================== 基本的ãªãƒ†ã‚¹ãƒˆã®è¨­å®š ==================== .. Command line options and configuration file settings ----------------------------------------------------------------- ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ã‚·ãƒ§ãƒ³ã¨æ§‹æˆãƒ•ァイルã®è¨­å®š -------------------------------------------- .. You can get help on command line options and values in INI-style configurations files by using the general help option:: 一般的ãªãƒ˜ãƒ«ãƒ—オプションを使ã£ã¦ã€ini ã‚¹ã‚¿ã‚¤ãƒ«ã®æ§‹æˆãƒ•ァイルã®ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ションã¨ãã®å€¤ã®ãƒ˜ãƒ«ãƒ—を確èªã§ãã¾ã™:: py.test -h # ã‚ªãƒ—ã‚·ãƒ§ãƒ³ã¨æ§‹æˆãƒ•ァイルã®è¨­å®šã‚’表示 .. This will display command line and configuration file settings which were registered by installed plugins. ã“れã¯ã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«æ¸ˆã¿ã®ãƒ—ラグインãŒç™»éŒ²ã—ãŸã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³è¨­å®šã¨æ§‹æˆãƒ•ァイル設定も表示ã—ã¾ã™ã€‚ .. How test configuration is read from configuration INI-files ------------------------------------------------------------- INI æ§‹æˆãƒ•ァイルã‹ã‚‰ãƒ†ã‚¹ãƒˆè¨­å®šã®èª­ã¿è¾¼ã¿æ–¹æ³• -------------------------------------------- .. py.test searches for the first matching ini-style configuration file in the directories of command line argument and the directories above. It looks for file basenames in this order:: py.test ã¯ã€ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³å¼•æ•°ã®ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã¨ãã®ä¸Šä½ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã®ã€æœ€åˆã«ä¸€è‡´ã™ã‚‹ ini ã‚¹ã‚¿ã‚¤ãƒ«ã®æ§‹æˆãƒ•ァイルを検索ã—ã¾ã™ã€‚次ã®é †ç•ªã§ãƒ•ァイルåを見ã¦ã„ãã¾ã™:: pytest.ini tox.ini setup.cfg .. Searching stops when the first ``[pytest]`` section is found. There is no merging of configuration values from multiple files. Example:: 最åˆã® ``[pytest]`` ã®ã‚»ã‚¯ã‚·ãƒ§ãƒ³ã‚’見ã¤ã‘ãŸã¨ãã«æ¤œç´¢ã‚’中止ã—ã¾ã™ã€‚複数ã®è¨­å®šãƒ•ァイルã‹ã‚‰è¨­å®šå€¤ã‚’マージã™ã‚‹ã‚ˆã†ãªã“ã¨ã¯ã—ã¾ã›ã‚“。サンプルを紹介ã—ã¾ã™:: py.test path/to/testdir 次ã®ã‚ˆã†ã«æ§‹æˆãƒ•ァイルをå«ã‚€ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªãŒã‚りã¾ã™:: path/to/testdir/pytest.ini path/to/testdir/tox.ini path/to/testdir/setup.cfg path/to/pytest.ini path/to/tox.ini path/to/setup.cfg ... # ファイルシステムã®ãƒ«ãƒ¼ãƒˆã¾ã§ä¸Šã‚‹ .. If argument is provided to a py.test run, the current working directory is used to start the search. 引数㌠py.test を実行ã™ã‚‹ãŸã‚ã«æä¾›ã•れるもã®ãªã‚‰ã€ã‚«ãƒ¬ãƒ³ãƒˆãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªãŒãã®æ¤œç´¢ã®é–‹å§‹ä½ç½®ã«ä½¿ã‚れã¾ã™ã€‚ .. _`how to change command line options defaults`: .. _`adding default options`: コマンドラインオプションã®ãƒ‡ãƒ•ォルト値ã®å¤‰æ›´æ–¹æ³• ------------------------------------------------ .. How to change command line options defaults ------------------------------------------------ .. It can be tedious to type the same series of command line options every time you use py.test . For example, if you always want to see detailed info on skipped and xfailed tests, as well as have terser "dot" progress output, you can write it into a configuration file:: py.test を使ã†ã¨ãã«æ¯Žå›žä¸€é€£ã®ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã‚ªãƒ—ションを入力ã™ã‚‹ã®ã¯é£½ã飽ãã—ã¦ãã¾ã™ã€‚例ãˆã°ã€æ¯Žå›žã‚¹ã‚­ãƒƒãƒ—ã—ãŸã‚Š xfail ã—ãŸãƒ†ã‚¹ãƒˆã®è©³ç´°æƒ…報を見ãŸã„ãªã‚‰ã€é€²æ—状æ³ã‚’簡潔㪠"ドット" 出力ã«ã™ã‚‹ã®ã¨åŒæ§˜ã«ã€æ§‹æˆãƒ•ァイル内ã«ãã®è¨­å®šã‚’記述ã§ãã¾ã™:: # pytest.ini ã®å†…容 # (ã¾ãŸã¯ tox.ini ã‹ setup.cfg) [pytest] addopts = -rsxX -q .. From now on, running ``py.test`` will add the specified options. 設定後ã«å®Ÿè¡Œã™ã‚‹ã¨ ``py.test`` ã¯æŒ‡å®šã—ãŸã‚ªãƒ—ションを追加ã—ã¾ã™ã€‚ .. Builtin configuration file options ---------------------------------------------- 組ã¿è¾¼ã¿ã®æ§‹æˆãƒ•ァイルオプション --------------------------------- .. confval:: minversion .. Specifies a minimal pytest version required for running tests. minversion = 2.1 # will fail if we run with pytest-2.0 テストã®å®Ÿè¡Œã«å¿…è¦ãª pytest ã®æœ€å°ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚’指定ã—ã¾ã™ minversion = 2.1 # pytest-2.0 ã§å®Ÿè¡Œã™ã‚‹ã¨å¤±æ•—ã™ã‚‹ .. confval:: addopts .. Add the specified ``OPTS`` to the set of command line arguments as if they had been specified by the user. Example: if you have this ini file content:: [pytest] addopts = --maxfail=2 -rf # exit after 2 failures, report fail info issuing ``py.test test_hello.py`` actually means:: py.test --maxfail=2 -rf test_hello.py Default is to add no options. ãƒ¦ãƒ¼ã‚¶ãƒ¼ãŒæŒ‡å®šã™ã‚‹ã‚ˆã†ã«ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³å¼•数をセットã™ã‚‹ã®ã«ç‰¹åŒ–ã—㟠``OPTS`` を追加ã—ã¾ã™ã€‚次ã®ã‚ˆã†ãª ini ファイルãŒã‚ã‚‹å ´åˆ:: [pytest] addopts = --maxfail=2 -rf # 2回失敗ã—ãŸã‚‰çµ‚了ã—ã¦ã€ãã®å†…容をレãƒãƒ¼ãƒˆã™ã‚‹ ``py.test test_hello.py`` ã¯ã€å®Ÿéš›ã«ã¯æ¬¡ã®å†…容ã¨åŒã˜ã§ã™:: py.test --maxfail=2 -rf test_hello.py デフォルトã§ã¯ä½•ã®ã‚ªãƒ—ションを追加ã—ã¾ã›ã‚“。 .. confval:: norecursedirs .. Set the directory basename patterns to avoid when recursing for test discovery. The individual (fnmatch-style) patterns are applied to the basename of a directory to decide if to recurse into it. Pattern matching characters:: * matches everything ? matches any single character [seq] matches any character in seq [!seq] matches any char not in seq Default patterns are ``.* _* CVS {args}``. Setting a ``norecursedir`` replaces the default. Here is an example of how to avoid certain directories:: # content of setup.cfg [pytest] norecursedirs = .svn _build tmp* å†å¸°çš„ã«æŽ¢ç´¢ã—ãªã„テストディレクトリã®ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªåã®ãƒ‘ターンを設定ã—ã¾ã™ã€‚ãれãžã‚Œ (fnmatch スタイル) ã®ãƒ‘ターンãŒãã®ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªå†…ã‚’å†å¸°çš„ã«èª¿ã¹ã‚‹ã‹ã‚’決ã‚ã‚‹ã®ã«ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªåã«é©ç”¨ã•れã¾ã™ã€‚パターンマッãƒãƒ³ã‚°æ–‡å­—ã¯æ¬¡ã®é€šã‚Šã§ã™:: * å…¨ã¦ã«ä¸€è‡´ã™ã‚‹ ? ä»»æ„ã®1文字ã«ä¸€è‡´ã™ã‚‹ [seq] seq ã®ã†ã¡ä»»æ„ã®1文字ã«ä¸€è‡´ã™ã‚‹ [!seq] seq ã®ã©ã®æ–‡å­—ã«ã‚‚一致ã—ãªã„ デフォルトパターン㯠``.* _* CVS {args}`` ã¨ãªã£ã¦ãŠã‚Šã€ ``norecursedir`` を設定ã™ã‚‹ã“ã¨ã§ç½®ãæ›ãˆã‚‰ã‚Œã¾ã™ã€‚特定ã®ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã‚’探索ã—ãªã„方法ã®ã‚µãƒ³ãƒ—ãƒ«ã¯æ¬¡ã®é€šã‚Šã§ã™:: # setup.cfg ã®å†…容 [pytest] norecursedirs = .svn _build tmp* ã“れã¯å…¸åž‹çš„㪠subversion 㨠sphinx ã® build ディレクトリ㨠``tmp`` ã¨ã„ã†æŽ¥é ­è¾žã‚’ã‚‚ã¤ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã‚’å†å¸°æŽ¢ç´¢ã—ãªã„設定ã§ã™ã€‚ .. confval:: python_files .. One or more Glob-style file patterns determining which python files are considered as test modules. python ファイルをテストモジュールã¨ã¿ãªã™ã€1ã¤ã‹ãれ以上㮠Glob スタイルã®ãƒ•ァイルパターンã§ã™ã€‚ .. confval:: python_classes .. One or more name prefixes determining which test classes are considered as test modules. テストクラスをテストモジュールã¨ã¿ãªã™ã€1ã¤ã‹ãã‚Œä»¥ä¸Šã®æŽ¥é ­è¾žã§ã™ã€‚ .. confval:: python_functions .. One or more name prefixes determining which test functions and methods are considered as test modules. See :ref:`change naming conventions` for examples. テスト関数やメソッドをテストモジュールã¨ã¿ãªã™ã€1ã¤ã‹ãã‚Œä»¥ä¸Šã®æŽ¥é ­è¾žã§ã™ã€‚ :ref:`change naming conventions` ã®ã‚µãƒ³ãƒ—ルもã”覧ãã ã•ã„。 pytest-2.5.1/doc/ja/assert.txt0000664000175000017500000004020312254002202015624 0ustar hpkhpk00000000000000 .. The writing and reporting of assertions in tests ================================================== テストã®ã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ã«ãŠã‘る書ãè¾¼ã¿ã¨ãƒ¬ãƒãƒ¼ãƒˆ ============================================== .. _`assert with the assert statement`: ``assert`` æ–‡ã«ã‚ˆã‚‹ã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ ------------------------------- .. Asserting with the ``assert`` statement --------------------------------------------------------- .. ``py.test`` allows you to use the standard python ``assert`` for verifying expectations and values in Python tests. For example, you can write the following:: ``py.test`` ã¯ã€ãƒ†ã‚¹ãƒˆã§æœŸå¾…値ã¨å®Ÿéš›ã®å€¤ã‚’検証ã™ã‚‹ã®ã« Python 標準㮠``assert`` æ–‡ãŒä½¿ãˆã¾ã™ã€‚例ãˆã°ã€æ¬¡ã®ã‚ˆã†ã«ãƒ†ã‚¹ãƒˆã‚’作æˆã—ã¾ã™:: # test_assert1.py ã®å†…容 def f(): return 3 def test_function(): assert f() == 4 .. to assert that your function returns a certain value. If this assertion fails you will see the return value of the function call:: ã“ã®ã‚µãƒ³ãƒ—ルã¯ã€é–¢æ•°ãŒç‰¹å®šã®å€¤ã‚’è¿”ã™ã®ã‚’アサートã—ã¾ã™ã€‚ã“ã®ã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ãŒå¤±æ•—ã—ãŸå ´åˆã€é–¢æ•°å‘¼ã³å‡ºã—ã®è¿”り値ãŒè¡¨ç¤ºã•れã¾ã™:: $ py.test test_assert1.py =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 1 items test_assert1.py F ================================= FAILURES ================================= ______________________________ test_function _______________________________ def test_function(): > assert f() == 4 E assert 3 == 4 E + where 3 = f() test_assert1.py:5: AssertionError ========================= 1 failed in 0.01 seconds ========================= .. py.test has support for showing the values of the most common subexpressions including calls, attributes, comparisons, and binary and unary operators. (See :ref:`tbreportdemo`). This allows you to use the idiomatic python constructs without boilerplate code while not losing introspection information. py.test ã¯ã€é–¢æ•°å‘¼ã³å‡ºã—ã€å±žæ€§ã€æ¯”較ã€ãƒã‚¤ãƒŠãƒªã‚„å˜é …演算å­ã¨ã„ã£ãŸå‡¦ç†ã‚’å«ã‚€é€šå¸¸ã®éƒ¨åˆ†å¼ã®å€¤ã‚’表示ã™ã‚‹æ©Ÿèƒ½ãŒã‚りã¾ã™ (:ref:`tbreportdemo` ã‚’å‚ç…§) 。ã“ã®æ©Ÿèƒ½ã«ã‚ˆã‚Šã€å®šåž‹çš„ãªã‚³ãƒ¼ãƒ‰ã‚’å¿…è¦ã¨ã›ãšã€Python ã‚¤ãƒ‡ã‚£ã‚ªãƒ çš„ãªæ¦‚念も利用ã§ãã¾ã™ã€‚ãã®ä¸Šã§ã‚¤ãƒ³ãƒˆãƒ­ã‚¹ãƒšã‚¯ã‚·ãƒ§ãƒ³æƒ…報を失ã†ã“ã¨ã‚‚ã‚りã¾ã›ã‚“。 .. However, if you specify a message with the assertion like this:: 但ã—ã€æ¬¡ã®ã‚ˆã†ã«ã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ã¨ä¸€ç·’ã«ãƒ¡ãƒƒã‚»ãƒ¼ã‚¸ã‚’指定ã—ãŸå ´åˆ:: assert a % 2 == 0, "value was odd, should be even" .. then no assertion introspection takes places at all and the message will be simply shown in the traceback. ãã“ã§ã‚¢ã‚µãƒ¼ãƒˆã‚¤ãƒ³ãƒˆãƒ­ã‚¹ãƒšã‚¯ã‚·ãƒ§ãƒ³ã‚’行ã‚ãšã€ã“ã®ãƒ¡ãƒƒã‚»ãƒ¼ã‚¸ã¯å˜ç´”ã«ãƒˆãƒ¬ãƒ¼ã‚¹ãƒãƒƒã‚¯ã§è¡¨ç¤ºã•れã¾ã™ã€‚ .. See :ref:`assert-details` for more information on assertion introspection. アサートイントロスペクションã®è©³ç´°ã«ã¤ã„ã¦ã¯ :ref:`assert-details` ã‚’å‚ç…§ã—ã¦ãã ã•ã„。 .. Assertions about expected exceptions ------------------------------------------ 例外発生を期待ã™ã‚‹ã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ ------------------------------ .. In order to write assertions about raised exceptions, you can use ``pytest.raises`` as a context manager like this:: 発生ã—ãŸä¾‹å¤–ã®ã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ã‚’行ã†ã«ã¯ã€æ¬¡ã®ã‚ˆã†ã«ã‚³ãƒ³ãƒ†ã‚­ã‚¹ãƒˆ マãƒãƒ¼ã‚¸ãƒ£ãƒ¼ã¨ã—㦠``pytest.raises`` を使ã„ã¾ã™:: import pytest with pytest.raises(ZeroDivisionError): 1 / 0 .. and if you need to have access to the actual exception info you may use:: ã‚‚ã—実際ã®ä¾‹å¤–ã®æƒ…報を調ã¹ã‚‹å¿…è¦ãŒã‚ã‚‹ãªã‚‰ã€æ¬¡ã®ã‚ˆã†ã«è¡Œã„ã¾ã™:: with pytest.raises(RuntimeError) as excinfo: def f(): f() f() # excinfo.type, excinfo.value, excinfo.traceback ã¨ã„ã£ãŸé–¢é€£ã™ã‚‹å€¤ã‚’確èªã™ã‚‹ .. If you want to write test code that works on Python 2.4 as well, you may also use two other ways to test for an expected exception:: Python 2.4 ã§ã‚‚åŒã˜ã‚ˆã†ã«å‹•作ã™ã‚‹ãƒ†ã‚¹ãƒˆã‚³ãƒ¼ãƒ‰ã‚’書ããŸã„ãªã‚‰ã€ä¾‹å¤–発生を期待ã™ã‚‹ãƒ†ã‚¹ãƒˆã‚’行ã†åˆ¥ã®æ–¹æ³•ãŒ2ã¤ã‚りã¾ã™:: pytest.raises(ExpectedException, func, *args, **kwargs) pytest.raises(ExpectedException, "func(*args, **kwargs)") .. both of which execute the specified function with args and kwargs and asserts that the given ``ExpectedException`` is raised. The reporter will provide you with helpful output in case of failures such as *no exception* or *wrong exception*. 両方ã¨ã‚‚指定ã—ãŸé–¢æ•°ã¸ args 㨠kwargs を渡ã—ã¦å®Ÿè¡Œã—ã€å¼•æ•°ã¨ã—ã¦ä¸ŽãˆãŸ ``ExpectedException`` ãŒç™ºç”Ÿã™ã‚‹ã“ã¨ã‚’アサートã—ã¾ã™ã€‚ã“ã®ãƒ¬ãƒãƒ¼ãƒˆã¯ *no exception* ã¾ãŸã¯ *wrong exception* ã¨ã„ã£ãŸãƒ†ã‚¹ãƒˆã«å¤±æ•—ã—ãŸã¨ãã«åˆ†ã‹ã‚Šã‚„ã™ã„内容を表示ã—ã¾ã™ã€‚ .. _newreport: コンテキストã«ä¾å­˜ã—ãŸå†…å®¹ã®æ¯”較 -------------------------------- .. Making use of context-sensitive comparisons ------------------------------------------------- .. versionadded:: 2.0 .. py.test has rich support for providing context-sensitive information when it encounters comparisons. For example:: py.test ã¯ã€æ¯”較ã™ã‚‹ã¨ãã«ã‚³ãƒ³ãƒ†ã‚­ã‚¹ãƒˆä¾å­˜ã®æƒ…報を分ã‹ã‚Šã‚„ã™ã表示ã—ã¾ã™ã€‚例ãˆã°ã€:: # test_assert2.py ã®å†…容 def test_set_comparison(): set1 = set("1308") set2 = set("8035") assert set1 == set2 .. if you run this module:: ã“ã®ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã‚’実行ã™ã‚‹ã¨:: $ py.test test_assert2.py =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 1 items test_assert2.py F ================================= FAILURES ================================= ___________________________ test_set_comparison ____________________________ def test_set_comparison(): set1 = set("1308") set2 = set("8035") > assert set1 == set2 E assert set(['0', '1', '3', '8']) == set(['0', '3', '5', '8']) E Extra items in the left set: E '1' E Extra items in the right set: E '5' test_assert2.py:5: AssertionError ========================= 1 failed in 0.01 seconds ========================= .. Special comparisons are done for a number of cases: 複数ã®ã‚±ãƒ¼ã‚¹ã«ãŠã„ã¦ã€ç‰¹åˆ¥ãªæ¯”較ãŒè¡Œã‚れã¾ã™: .. * comparing long strings: a context diff is shown * comparing long sequences: first failing indices * comparing dicts: different entries * é•·ã„æ–‡å­—åˆ—ã®æ¯”較: コンテキスト diff を表示 * é•·ã„ã‚·ãƒ¼ã‚±ãƒ³ã‚¹ã®æ¯”較: 最åˆã«å¤±æ•—ã—ãŸã‚¤ãƒ³ãƒ‡ãƒƒã‚¯ã‚¹ * ãƒ‡ã‚£ã‚¯ã‚·ãƒ§ãƒŠãƒªã®æ¯”較: ç•°ãªã‚‹ã‚¨ãƒ³ãƒˆãƒª .. See the :ref:`reporting demo ` for many more examples. より多ãã®ã‚µãƒ³ãƒ—ルã«ã¤ã„ã¦ã¯ :ref:`レãƒãƒ¼ãƒˆã®ãƒ‡ãƒ¢ ` å‚ç…§ã—ã¦ãã ã•ã„。 .. Defining your own assertion comparison ---------------------------------------------- アサーション比較ã®å®šç¾© ---------------------- .. It is possible to add your own detailed explanations by implementing the ``pytest_assertrepr_compare`` hook. ``pytest_assertrepr_compare`` フックを実装ã™ã‚‹ã“ã¨ã§ç‹¬è‡ªã®è©³ç´°èª¬æ˜Žã‚’追加ã§ãã¾ã™ã€‚ .. autofunction:: _pytest.hookspec.pytest_assertrepr_compare .. As an example consider adding the following hook in a conftest.py which provides an alternative explanation for ``Foo`` objects:: 例ã¨ã—ã¦ã€conftest.py ã«æ¬¡ã®ãƒ•ックを追加ã—ã¦ã¿ã¾ã™ã€‚ã“れ㯠``Foo`` オブジェクトã®åˆ¥ã®èª¬æ˜Žã‚’æä¾›ã—ã¾ã™:: # conftest.py ã®å†…容 from test_foocompare import Foo def pytest_assertrepr_compare(op, left, right): if isinstance(left, Foo) and isinstance(right, Foo) and op == "==": return ['Comparing Foo instances:', ' vals: %s != %s' % (left.val, right.val)] .. now, given this test module:: ã“ã“ã§æ¬¡ã®ãƒ†ã‚¹ãƒˆãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ãŒã‚りã¾ã™:: # test_foocompare.py ã®å†…容 class Foo: def __init__(self, val): self.val = val def test_compare(): f1 = Foo(1) f2 = Foo(2) assert f1 == f2 .. you can run the test module and get the custom output defined in the conftest file:: ã“ã®ãƒ†ã‚¹ãƒˆãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã‚’実行ã™ã‚‹ã¨ã€conftest ファイルã§å®šç¾©ã—ãŸç‹¬è‡ªã®å‡ºåЛ内容ãŒè¡¨ç¤ºã•れã¾ã™:: $ py.test -q test_foocompare.py collecting ... collected 1 items F ================================= FAILURES ================================= _______________________________ test_compare _______________________________ def test_compare(): f1 = Foo(1) f2 = Foo(2) > assert f1 == f2 E assert Comparing Foo instances: E vals: 1 != 2 test_foocompare.py:8: AssertionError 1 failed in 0.01 seconds .. _assert-details: .. _`assert introspection`: 高度ãªã‚¢ã‚µãƒ¼ãƒˆã‚¤ãƒ³ãƒˆãƒ­ã‚¹ãƒšã‚¯ã‚·ãƒ§ãƒ³ ---------------------------------- .. Advanced assertion introspection ---------------------------------- .. versionadded:: 2.1 .. Reporting details about a failing assertion is achieved either by rewriting assert statements before they are run or re-evaluating the assert expression and recording the intermediate values. Which technique is used depends on the location of the assert, py.test's configuration, and Python version being used to run py.test. Note that for assert statements with a manually provided message, i.e. ``assert expr, message``, no assertion introspection takes place and the manually provided message will be rendered in tracebacks. 失敗ã™ã‚‹ã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ã«é–¢ã™ã‚‹è©³ç´°ã®ãƒ¬ãƒãƒ¼ãƒˆã¯ã€å®Ÿè¡Œå‰ã« assert æ–‡ã‚’æ›¸ãæ›ãˆã‚‹ã‹ã€ã¾ãŸã¯ assert å¼ã‚’å†è©•価ã—ã¦ä¸­é–“値を記録ã™ã‚‹ã‹ã®ã©ã¡ã‚‰ã‹ã®æ–¹æ³•ã§è¡Œã‚れã¾ã™ã€‚ã©ã¡ã‚‰ã®æ–¹æ³•を使ã†ã‹ã¯ assert ã®ä½ç½®ã€pytest ã®è¨­å®šã€pytest を実行ã™ã‚‹ã®ã«ä½¿ã‚れる Python ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã«ä¾å­˜ã—ã¾ã™ã€‚ ``assert expr, message`` ã®ã‚ˆã†ã«ç›´æŽ¥ã‚³ãƒ¼ãƒ‰å†…ã§ãƒ¡ãƒƒã‚»ãƒ¼ã‚¸ã‚’記述ã—㟠assert æ–‡ã¯ã€ã‚¢ã‚µãƒ¼ãƒˆã‚¤ãƒ³ãƒˆãƒ­ã‚¹ãƒšã‚¯ã‚·ãƒ§ãƒ³ãŒè¡Œã‚れãšã€æŒ‡å®šã—ãŸãƒ¡ãƒƒã‚»ãƒ¼ã‚¸ãŒãƒˆãƒ¬ãƒ¼ã‚¹ãƒãƒƒã‚¯ã«è¡¨ç¤ºã•れるã“ã¨ã«æ³¨æ„ã—ã¦ãã ã•ã„。 .. By default, if the Python version is greater than or equal to 2.6, py.test rewrites assert statements in test modules. Rewritten assert statements put introspection information into the assertion failure message. py.test only rewrites test modules directly discovered by its test collection process, so asserts in supporting modules which are not themselves test modules will not be rewritten. デフォルトã§ã¯ã€Python ãƒãƒ¼ã‚¸ãƒ§ãƒ³ãŒ 2.6 以上ã®å ´åˆã€py.test ã¯ãƒ†ã‚¹ãƒˆãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã® assert æ–‡ã‚’æ›¸ãæ›ãˆã¾ã™ã€‚æ›¸ãæ›ãˆã‚‰ã‚ŒãŸ assert æ–‡ã¯ã€ã‚¤ãƒ³ãƒˆãƒ­ã‚¹ãƒšã‚¯ã‚·ãƒ§ãƒ³æƒ…報をアサーションã®å¤±æ•—メッセージã«è¿½åŠ ã—ã¾ã™ã€‚py.test ã¯ã€ãƒ†ã‚¹ãƒˆã‚³ãƒ¬ã‚¯ã‚·ãƒ§ãƒ³å‡¦ç†ã§æ¤œå‡ºã—ãŸãƒ†ã‚¹ãƒˆãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã®ã¿ã‚’ç›´æŽ¥æ›¸ãæ›ãˆã¾ã™ã€‚ãã®ãŸã‚ã€ãƒ†ã‚¹ãƒˆãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã§ã¯ãªã„サãƒãƒ¼ãƒˆãƒ©ã‚¤ãƒ–ラリ㮠assert æ–‡ã¯æ›¸ãæ›ãˆã‚‰ã‚Œã¾ã›ã‚“。 .. note:: .. py.test rewrites test modules on import. It does this by using an import hook to write a new pyc files. Most of the time this works transparently. However, if you are messing with import yourself, the import hook may interfere. If this is the case, simply use ``--assert=reinterp`` or ``--assert=plain``. Additionally, rewriting will fail silently if it cannot write new pycs, i.e. in a read-only filesystem or a zipfile. py.test ã¯ã€ã‚¤ãƒ³ãƒãƒ¼ãƒˆæ™‚ã«ãƒ†ã‚¹ãƒˆãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã‚’æ›¸ãæ›ãˆã¾ã™ã€‚æ–°ãŸã« pyc ファイルを書ã込むãŸã‚ã«ã‚¤ãƒ³ãƒãƒ¼ãƒˆãƒ•ックを使ã†ã“ã¨ã§ã“ã®å‡¦ç†ã‚’行ã„ã¾ã™ã€‚ã“ã®å‡¦ç†ã¯ã»ã¨ã‚“ã©é€éŽçš„ã«è¡Œã‚れã¾ã™ã€‚但ã—ã€è‡ªåˆ†ã§ã‚¤ãƒ³ãƒãƒ¼ãƒˆã‚’行ã£ã¦ã”ã¡ã‚ƒã”ã¡ã‚ƒã«ãªã£ã¦ã„ã‚‹å ´åˆã€ãã®ã‚¤ãƒ³ãƒãƒ¼ãƒˆãƒ•ックãŒã‚¤ãƒ³ã‚¿ãƒ¼ãƒ•ェースã«ãªã‚‹å¯èƒ½æ€§ãŒã‚りã¾ã™ã€‚ã“ã®ã‚ˆã†ãªã‚±ãƒ¼ã‚¹ã§ã¯ã€å˜ç´”ã« ``--assert=reinterp`` ã‹ ``--assert=plain`` を使ã£ã¦ãã ã•ã„。ã•らã«ã€æ–°ãŸã« pyc ファイルを書ãè¾¼ã‚ãªã„å ´åˆã€æ›¸ãæ›ãˆã¯ã‚µã‚¤ãƒ¬ãƒ³ãƒˆãƒ¢ãƒ¼ãƒ‰ã§å¤±æ•—ã—ã¾ã™ã€‚例ãˆã°ã€èª­ã¿è¾¼ã¿å°‚用ファイルシステムや zip ファイルã§è¡Œã†ã‚ˆã†ãªã¨ãã§ã™ã€‚ .. If an assert statement has not been rewritten or the Python version is less than 2.6, py.test falls back on assert reinterpretation. In assert reinterpretation, py.test walks the frame of the function containing the assert statement to discover sub-expression results of the failing assert statement. You can force py.test to always use assertion reinterpretation by passing the ``--assert=reinterp`` option. assert æ–‡ãŒæ›¸ãæ›ãˆã‚‰ã‚Œãªã„ã€ã¾ãŸã¯ Python ãƒãƒ¼ã‚¸ãƒ§ãƒ³ 2.6 よりもå°ã•ã„å ´åˆã€py.test ã¯ã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ã®å†è§£é‡ˆã‚’行ã„ã¾ã™ã€‚アサーションã®å†è§£é‡ˆã§ã¯ã€py.test ãŒã€assert æ–‡ã®å¤±æ•—ã™ã‚‹éƒ¨åˆ†å¼ã‚’見ã¤ã‘ã‚‹ãŸã‚ã« assert 文をå«ã‚€é–¢æ•°ã®ãƒ•レームを辿りã¾ã™ã€‚py.test ã«ã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ã®å†è§£é‡ˆã‚’行ã†ã‚ˆã†å¼·åˆ¶ã™ã‚‹ã«ã¯ ``--assert=reinterp`` オプションを指定ã—ã¾ã™ã€‚ .. Assert reinterpretation has a caveat not present with assert rewriting: If evaluating the assert expression has side effects you may get a warning that the intermediate values could not be determined safely. A common example of this issue is an assertion which reads from a file:: アサーションã®å†è§£é‡ˆã¯ã€assert æ–‡ã®æ›¸ãæ›ãˆã‚’行ã‚ãªã„ã“ã¨ã®æ³¨æ„ãŒå¿…è¦ã§ã™: ãれ㯠assert å¼ã®è©•価ãŒå‰¯ä½œç”¨ã‚’ã‚‚ã¤å ´åˆã€ä¸­é–“値ãŒå®‰å…¨ã«æ±ºå®šã—ãªã„ã¨ã„ã†è­¦å‘Šã‚’å—ã‘å–ã‚‹ã‹ã‚‚ã—れã¾ã›ã‚“。ã“ã®å•題ã®ä¸€èˆ¬çš„ãªä¾‹ã¨ã—ã¦ã€ãƒ•ァイルを読ã¿è¾¼ã‚€ã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ãŒã‚りã¾ã™:: assert f.read() != '...' .. If this assertion fails then the re-evaluation will probably succeed! This is because ``f.read()`` will return an empty string when it is called the second time during the re-evaluation. However, it is easy to rewrite the assertion and avoid any trouble:: ã“ã®ã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ãŒå¤±æ•—ã—ãŸå ´åˆã€ãã®å†è©•価ã¯ãŠãã‚‰ãæˆåŠŸã—ã¾ã™ï¼ã¤ã¾ã‚Šå†è©•価ã«ãŠã„ã¦2回目ã«å‘¼ã³å‡ºã•れãŸã¨ãã« ``f.read()`` ãŒç©ºã®æ–‡å­—列を返ã™ã‹ã‚‰ã§ã™ã€‚ã¨ã¯ã„ãˆã€ã“ã®ã‚¢ã‚µãƒ¼ã‚·ãƒ§ãƒ³ã‚’æ›¸ãæ›ãˆã¦ã€ãã†ã„ã£ãŸãƒˆãƒ©ãƒ–ルをé¿ã‘ã‚‹ã®ã¯ç°¡å˜ã§ã™:: content = f.read() assert content != '...' .. All assert introspection can be turned off by passing ``--assert=plain``. å…¨ã¦ã®ã‚¢ã‚µãƒ¼ãƒˆã‚¤ãƒ³ãƒˆãƒ­ã‚¹ãƒšã‚¯ã‚·ãƒ§ãƒ³ã‚’無効ã«ã™ã‚‹ã«ã¯ ``--assert=plain`` を指定ã—ã¾ã™ã€‚ .. For further information, Benjamin Peterson wrote up `Behind the scenes of py.test's new assertion rewriting `_. 詳細ã«ã¤ã„ã¦ã¯ã€Benjamin Peterson ãŒè©³ã—ãã¾ã¨ã‚㟠`Behind the scenes of py.test's new assertion rewriting `_ ã‚’å‚ç…§ã—ã¦ãã ã•ã„。 .. Add assert rewriting as an alternate introspection technique. .. versionadded:: 2.1 代替イントロスペクション手法ã¨ã—㦠assert æ›¸ãæ›ãˆæ©Ÿèƒ½ã‚’追加 .. Introduce the ``--assert`` option. Deprecate ``--no-assert`` and ``--nomagic``. .. versionchanged:: 2.1 ``--assert`` オプションを追加。 ``--no-assert`` 㨠``--nomagic`` を廃止。 pytest-2.5.1/doc/ja/skipping.txt0000664000175000017500000003306612254002202016160 0ustar hpkhpk00000000000000.. _`skip and xfail`: skip 㨠xfail: æˆåŠŸã—ãªã„テストを扱ㆠ===================================== .. Skip and xfail: dealing with tests that can not succeed ===================================================================== .. If you have test functions that cannot be run on certain platforms or that you expect to fail you can mark them accordingly or you may call helper functions during execution of setup or test functions. テスト関数ãŒã€ç‰¹å®šã®ãƒ—ラットフォームã§å®Ÿè¡Œã§ããªã„ã€ã¾ãŸã¯ãƒžãƒ¼ã‚¯ã—ãŸã‚‚ã®ãŒå¤±æ•—ã™ã‚‹ã“ã¨ã‚’期待ã™ã‚‹ã€ã¾ãŸã¯ãƒ†ã‚¹ãƒˆé–¢æ•°ã‚„ setup 関数ã®å®Ÿè¡Œä¸­ã«ãƒ˜ãƒ«ãƒ‘ー関数を呼ã³å‡ºã™ã¨ã„ã£ãŸå ´åˆãŒã‚りã¾ã™ã€‚ .. A *skip* means that you expect your test to pass unless a certain configuration or condition (e.g. wrong Python interpreter, missing dependency) prevents it to run. And *xfail* means that your test can run but you expect it to fail because there is an implementation problem. *skip* ã¯ã€ç‰¹å®šã®è¨­å®šã‚„æ¡ä»¶ (誤ã£ãŸ Python インタープリターやä¾å­˜é–¢ä¿‚ã®æ¬ è½ãªã©) ãŒãƒ†ã‚¹ãƒˆé–¢æ•°ã‚’実行ã•ã›ãªã„å ´åˆã‚’除ã‘ã°ã€ãƒ†ã‚¹ãƒˆãŒæˆåŠŸã™ã‚‹ã®ã‚’期待ã—ã¾ã™ã€‚ *xfail* ã¯ã€ãƒ†ã‚¹ãƒˆãã®ã‚‚ã®ã¯å®Ÿè¡Œã—ã¾ã™ãŒã€å®Ÿè£…上ã®å•題ã‹ã‚‰ãã®ãƒ†ã‚¹ãƒˆãŒå¤±æ•—ã™ã‚‹ã“ã¨ã‚’期待ã—ã¾ã™ã€‚ .. py.test counts and lists *skip* and *xfail* tests separately. However, detailed information about skipped/xfailed tests is not shown by default to avoid cluttering the output. You can use the ``-r`` option to see details corresponding to the "short" letters shown in the test progress:: py.test 㯠*skip* 㨠*xfail* ãƒ†ã‚¹ãƒˆã‚’åˆ¥ã€…ã«æ•°ãˆã¦ä¸€è¦§è¡¨ç¤ºã—ã¾ã™ã€‚ã—ã‹ã—ã€ã‚¹ã‚­ãƒƒãƒ—ã—ãŸï¼å¤±æ•—ã—ãŸãƒ†ã‚¹ãƒˆãƒ†ã‚¹ãƒˆã«ã¤ã„ã¦ã®è©³ç´°ãªæƒ…å ±ã¯ã€å‡ºåЛ内容ãŒã”ã¡ã‚ƒã”ã¡ã‚ƒã«ãªã‚‰ãªã„よã†ã«ãƒ‡ãƒ•ォルトã§ã¯è¡¨ç¤ºã•れã¾ã›ã‚“。テストã®é€²æ—状æ³ã‚’表示ã™ã‚‹ "短ã„" 文字ã«å¯¾å¿œã™ã‚‹è©³ç´°ã‚’見るãŸã‚ã« ``-r`` オプションãŒä½¿ãˆã¾ã™:: py.test -rxs # skips 㨠xfails ã®è£œè¶³æƒ…報を表示 .. (See :ref:`how to change command line options defaults`) (:ref:`how to change command line options defaults` ã‚’å‚ç…§) .. _skipif: スキップã™ã‚‹ãƒ†ã‚¹ãƒˆé–¢æ•°ã®ãƒžãƒ¼ã‚¯ ------------------------------ .. Marking a test function to be skipped ------------------------------------------- .. Here is an example of marking a test function to be skipped when run on a Python3 interpreter:: Python 3 インタープリターã§å®Ÿè¡Œã™ã‚‹ã¨ãã«ã‚¹ã‚­ãƒƒãƒ—ã™ã‚‹ãƒ†ã‚¹ãƒˆé–¢æ•°ã‚’マークã™ã‚‹ä¾‹ã‚’紹介ã—ã¾ã™:: import sys @pytest.mark.skipif("sys.version_info >= (3,0)") def test_function(): ... .. During test function setup the skipif condition is evaluated by calling ``eval('sys.version_info >= (3,0)', namespace)``. (*New in version 2.0.2*) The namespace contains all the module globals of the test function so that you can for example check for versions of a module you are using:: テスト関数㮠setup 処ç†ä¸­ã« skipif ã®æ¡ä»¶ãŒ ``eval('sys.version_info >= (3,0)', namespace)`` を呼ã³å‡ºã™ã“ã¨ã«ã‚ˆã‚Šè©•価ã•れã¾ã™ (*ãƒãƒ¼ã‚¸ãƒ§ãƒ³ 2.0.2 ã§è¿½åŠ *) 。ãã®åå‰ç©ºé–“ã¯ã€ä¾‹ãˆã°ä½¿ã£ã¦ã„るモジュールã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚’ãƒã‚§ãƒƒã‚¯ã§ãるよã†ã«ã€ãƒ†ã‚¹ãƒˆé–¢æ•°ã®å…¨ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã® globals ã‚’å«ã¿ã¾ã™:: import mymodule @pytest.mark.skipif("mymodule.__version__ < '1.2'") def test_function(): ... .. The test function will not be run ("skipped") if ``mymodule`` is below the specified version. The reason for specifying the condition as a string is mainly that py.test can report a summary of skip conditions. For information on the construction of the ``namespace`` see `evaluation of skipif/xfail conditions`_. ã“ã®ãƒ†ã‚¹ãƒˆé–¢æ•°ã¯ã€ ``mymodule`` ãŒæŒ‡å®šã—ãŸãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚ˆã‚Šä½Žã„ã¨ãã«ã¯å®Ÿè¡Œã•れã¾ã›ã‚“ ("スキップã•れる") 。ã“ã†ã„ã£ãŸæ¡ä»¶ã‚’æ–‡å­—åˆ—ã§æŒ‡å®šã™ã‚‹ä¸»ãªç†ç”±ã¯ã€py.test ㌠skip æ¡ä»¶ã®æ¦‚è¦ã‚’レãƒãƒ¼ãƒˆã§ãã‚‹ã‹ã‚‰ã§ã™ã€‚ ``namespace`` ã®æ§‹ç¯‰ã«é–¢ã™ã‚‹è©³ç´°ã¯ :ref:`evaluation of skipif/xfail conditions` ã‚’å‚ç…§ã—ã¦ãã ã•ã„。 .. You can of course create a shortcut for your conditional skip decorator at module level like this:: 次ã®ã‚ˆã†ã«ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ãƒ¬ãƒ™ãƒ«ã§æ¡ä»¶ä»˜ãã® skipif デコレーターã®ã‚·ãƒ§ãƒ¼ãƒˆã‚«ãƒƒãƒˆã‚‚作æˆã§ãã¾ã™:: win32only = pytest.mark.skipif("sys.platform != 'win32'") @win32only def test_function(): ... .. Skip all test functions of a class -------------------------------------- クラスã®å…¨ãƒ†ã‚¹ãƒˆé–¢æ•°ã®ã‚¹ã‚­ãƒƒãƒ— ------------------------------ .. As with all function :ref:`marking ` you can skip test functions at the `whole class- or module level`_. Here is an example for skipping all methods of a test class based on the platform:: å…¨ã¦ã®é–¢æ•°ã‚’ :ref:`マークã™ã‚‹ ` ã®ã¨åŒæ§˜ã« `クラス全体ã¾ãŸã¯ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ãƒ¬ãƒ™ãƒ«`_ ã§ãƒ†ã‚¹ãƒˆé–¢æ•°ã‚’スキップã§ãã¾ã™ã€‚プラットフォームã«ã‚ˆã‚Šãƒ†ã‚¹ãƒˆã‚¯ãƒ©ã‚¹ã®å…¨ãƒ¡ã‚½ãƒƒãƒ‰ã‚’スキップã™ã‚‹ã‚µãƒ³ãƒ—ルを紹介ã—ã¾ã™:: class TestPosixCalls: pytestmark = pytest.mark.skipif("sys.platform == 'win32'") def test_function(self): "will not be setup or run under 'win32' platform" .. The ``pytestmark`` special name tells py.test to apply it to each test function in the class. If your code targets python2.6 or above you can more naturally use the skipif decorator (and any other marker) on classes:: ``pytestmark`` ã¨ã„ã†ç‰¹åˆ¥ãªåå‰ã‚’使ã£ã¦ã€ã‚¯ãƒ©ã‚¹å†…ã®å„テスト関数ã¸ã‚»ãƒƒãƒˆã—ãŸé–¢æ•°ã‚’ pytest ã«é©ç”¨ã•ã›ã¾ã™ã€‚テストコード㌠Python 2.6 以上を想定ã—ã¦ã„ã‚‹ãªã‚‰ã€ã‚‚ã£ã¨è‡ªç„¶ã« skipif デコレーター (ãã®ä»–ã®ä»»æ„ã®ãƒžãƒ¼ã‚«ãƒ¼) をクラスã«å¯¾ã—ã¦é©ç”¨ã§ãã¾ã™:: @pytest.mark.skipif("sys.platform == 'win32'") class TestPosixCalls: def test_function(self): "will not be setup or run under 'win32' platform" .. Using multiple "skipif" decorators on a single function is generally fine - it means that if any of the conditions apply the function execution will be skipped. 1ã¤ã®é–¢æ•°ã«è¤‡æ•°ã® "skipif" デコレーターを使ã†ã“ã¨ã¯ä¸€èˆ¬çš„ã«è‰¯ã„ã“ã¨ã§ã™ã€‚ã„ãšã‚Œã‹ã®æ¡ä»¶ãŒé©ç”¨ã•れãŸå ´åˆã«ãã®ãƒ†ã‚¹ãƒˆé–¢æ•°ã®å®Ÿè¡Œã¯ã‚¹ã‚­ãƒƒãƒ—ã•れるã“ã¨ã«ãªã‚Šã¾ã™ã€‚ .. _`whole class- or module level`: mark.html#scoped-marking .. _`クラス全体ã¾ãŸã¯ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ãƒ¬ãƒ™ãƒ«`: mark.html#scoped-marking .. _xfail: 失敗を期待ã™ã‚‹ãƒ†ã‚¹ãƒˆé–¢æ•°ã®ãƒžãƒ¼ã‚¯ -------------------------------- .. Mark a test function as expected to fail ------------------------------------------------------- .. You can use the ``xfail`` marker to indicate that you expect the test to fail:: テストã®å¤±æ•—を期待ã—ã¦ã„ã‚‹ã“ã¨ã‚’表ã™ã®ã« ``xfail`` マーカーを使ã„ã¾ã™:: @pytest.mark.xfail def test_function(): ... .. This test will be run but no traceback will be reported when it fails. Instead terminal reporting will list it in the "expected to fail" or "unexpectedly passing" sections. ã“ã®ãƒ†ã‚¹ãƒˆã¯å®Ÿè¡Œã•れã¾ã™ãŒã€å¤±æ•—ã™ã‚‹ã¨ãã«ãƒˆãƒ¬ãƒ¼ã‚¹ãƒãƒƒã‚¯ã‚’表示ã—ã¾ã›ã‚“。ãã®ä»£ã‚りã€ã‚¿ãƒ¼ãƒŸãƒŠãƒ«ä¸Šã« "expected to fail" ã‹ "unexpectedly passing" セクションã«ãã®ä¸€è¦§ãŒè¡¨ç¤ºã•れã¾ã™ã€‚ .. By specifying on the commandline:: ã‚³ãƒžãƒ³ãƒ‰ãƒ©ã‚¤ãƒ³ã§æ¬¡ã®ã‚ˆã†ã«æŒ‡å®šã™ã‚‹ã¨:: pytest --runxfail .. you can force the running and reporting of an ``xfail`` marked test as if it weren't marked at all. ``xfail`` ã§ãƒžãƒ¼ã‚¯ã•れã¦ã„ãªã„ã‹ã®ã‚ˆã†ã« ``xfail`` ã§ãƒžãƒ¼ã‚¯ã•れãŸãƒ†ã‚¹ãƒˆé–¢æ•°ã‚’実行ã—ã¦ãƒ¬ãƒãƒ¼ãƒˆã®è¡¨ç¤ºã‚’強制ã§ãã¾ã™ã€‚ .. As with skipif_ you can also mark your expectation of a failure on a particular platform:: skipif_ ã¨åŒæ§˜ã«ã€ç‰¹å®šã®ãƒ—ラットフォームã§ã®å¤±æ•—を期待ã™ã‚‹ã‚ˆã†ã«ã‚‚マークã§ãã¾ã™:: @pytest.mark.xfail("sys.version_info >= (3,0)") def test_function(): ... .. You can furthermore prevent the running of an "xfail" test or specify a reason such as a bug ID or similar. Here is a simple test file with the several usages: ã•らã«ã€ãƒã‚° ID ã¨ã„ã£ãŸ reason を指定ã—㦠"xfail" テストを実行ã—ãªã„よã†ã«ã‚‚ã§ãã¾ã™ã€‚ä»–ã«ã‚‚使用例ã¨ç°¡å˜ãªãƒ†ã‚¹ãƒˆã‚’紹介ã—ã¾ã™: .. literalinclude:: example/xfail_demo.py .. Running it with the report-on-xfail option gives this output:: 次ã®ã‚ˆã†ã« xfail ã®ãƒ¬ãƒãƒ¼ãƒˆã‚’表示ã™ã‚‹ã‚ªãƒ—ションを指定ã—ã¦å®Ÿè¡Œã—ã¾ã™:: example $ py.test -rx xfail_demo.py =========================== test session starts ============================ platform linux2 -- Python 2.7.1 -- pytest-2.2.4 collecting ... collected 6 items xfail_demo.py xxxxxx ========================= short test summary info ========================== XFAIL xfail_demo.py::test_hello XFAIL xfail_demo.py::test_hello2 reason: [NOTRUN] XFAIL xfail_demo.py::test_hello3 condition: hasattr(os, 'sep') XFAIL xfail_demo.py::test_hello4 bug 110 XFAIL xfail_demo.py::test_hello5 condition: pytest.__version__[0] != "17" XFAIL xfail_demo.py::test_hello6 reason: reason ======================== 6 xfailed in 0.03 seconds ========================= .. _`evaluation of skipif/xfail conditions`: skipif/xfail å¼ã®è©•価 --------------------- .. Evaluation of skipif/xfail expressions ---------------------------------------------------- .. versionadded:: 2.0.2 .. The evaluation of a condition string in ``pytest.mark.skipif(conditionstring)`` or ``pytest.mark.xfail(conditionstring)`` takes place in a namespace dictionary which is constructed as follows: ``pytest.mark.skipif(conditionstring)`` ã¾ãŸã¯ ``pytest.mark.xfail(conditionstring)`` ã®æ¡ä»¶æ–‡å­—列ã®è©•価ã¯ã€æ¬¡ã®ã‚ˆã†ã«æ§‹ç¯‰ã•れãŸåå‰ç©ºé–“ディクショナリã®ä¸­ã§è¡Œã‚れã¾ã™: .. * the namespace is initialized by putting the ``sys`` and ``os`` modules and the pytest ``config`` object into it. * ``sys`` 㨠``os`` モジュール㨠pytest ã® ``config`` オブジェクトを加ãˆã¦åå‰ç©ºé–“ãŒåˆæœŸåŒ–ã•れる .. * updated with the module globals of the test function for which the expression is applied. * ãã®å¼ã®è©•価をé©ç”¨ã™ã‚‹ãƒ†ã‚¹ãƒˆé–¢æ•°ã®ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã® globals ãŒæ›´æ–°ã•れる .. The pytest ``config`` object allows you to skip based on a test configuration value which you might have added:: pytest ã® ``config`` オブジェクトを使ã£ã¦ã€è¿½åŠ ã—ãŸãƒ†ã‚¹ãƒˆè¨­å®šå€¤ã«ã‚ˆã‚Šã‚¹ã‚­ãƒƒãƒ—ã•ã›ã¾ã™:: @pytest.mark.skipif("not config.getvalue('db')") def test_function(...): ... .. Imperative xfail from within a test or setup function ------------------------------------------------------ テスト関数ã¾ãŸã¯ setup 関数内ã‹ã‚‰å‘½ä»¤åž‹ xfail --------------------------------------------- .. If you cannot declare xfail-conditions at import time you can also imperatively produce an XFail-outcome from within test or setup code. Example:: インãƒãƒ¼ãƒˆæ™‚ã« xfail ã®æ¡ä»¶ã‚’宣言ã§ããªã„å ´åˆã€ãƒ†ã‚¹ãƒˆé–¢æ•°ã¾ãŸã¯ setup コード内ã‹ã‚‰ xfail ã™ã‚‹ã‚ˆã†ã«å‘½ä»¤çš„ã«è¨˜è¿°ã§ãã¾ã™ã€‚サンプルを紹介ã—ã¾ã™:: def test_function(): if not valid_config(): pytest.xfail("unsupported configuration") .. Skipping on a missing import dependency -------------------------------------------------- インãƒãƒ¼ãƒˆã®ä¾å­˜é–¢ä¿‚ã®æ¬ è½ã‚’スキップ ------------------------------------ .. You can use the following import helper at module level or within a test or test setup function:: モジュールレベルã€ã¾ãŸã¯ãƒ†ã‚¹ãƒˆé–¢æ•°ã‚„ setup 関数内ã‹ã‚‰æ¬¡ã®ã‚¤ãƒ³ãƒãƒ¼ãƒˆãƒ˜ãƒ«ãƒ‘ーãŒä½¿ãˆã¾ã™:: docutils = pytest.importorskip("docutils") .. If ``docutils`` cannot be imported here, this will lead to a skip outcome of the test. You can also skip based on the version number of a library:: ã‚‚ã— ``docutils`` ãŒã“ã®å ´æ‰€ã§ã‚¤ãƒ³ãƒãƒ¼ãƒˆã§ããªã„ãªã‚‰ã€ã“ã®ãƒ†ã‚¹ãƒˆã¯ã‚¹ã‚­ãƒƒãƒ—ã•れã¾ã™ã€‚ライブラリã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã«ã‚ˆã‚Šã‚¹ã‚­ãƒƒãƒ—ã•ã›ã‚‹ã“ã¨ã‚‚ã§ãã¾ã™:: docutils = pytest.importorskip("docutils", minversion="0.3") .. The version will be read from the specified module's ``__version__`` attribute. ã“ã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã¯æŒ‡å®šã—ãŸãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã® ``__ version__`` 属性ã‹ã‚‰èª­ã¿è¾¼ã¾ã‚Œã¾ã™ã€‚ .. Imperative skip from within a test or setup function ------------------------------------------------------ テスト関数ã¾ãŸã¯ setup 関数内ã‹ã‚‰å‘½ä»¤åž‹ã‚¹ã‚­ãƒƒãƒ— ----------------------------------------------- .. If for some reason you cannot declare skip-conditions you can also imperatively produce a skip-outcome from within test or setup code. Example:: 何らã‹ã®ç†ç”±ã§ã‚¹ã‚­ãƒƒãƒ—æ¡ä»¶ã‚’宣言ã§ããªã„å ´åˆã‚‚ã€ãƒ†ã‚¹ãƒˆé–¢æ•°ã¾ãŸã¯ setup コード内ã‹ã‚‰ã‚¹ã‚­ãƒƒãƒ—ã™ã‚‹ã‚ˆã†ã«å‘½ä»¤çš„ã«è¨˜è¿°ã§ãã¾ã™ã€‚サンプルを紹介ã—ã¾ã™:: def test_function(): if not valid_config(): pytest.skip("unsupported configuration") pytest-2.5.1/README.rst0000664000175000017500000000337012254002202014116 0ustar hpkhpk00000000000000 Documentation: http://pytest.org/latest/ Changelog: http://pytest.org/latest/changelog.html Issues: https://bitbucket.org/hpk42/pytest/issues?status=open The ``py.test`` testing tool makes it easy to write small tests, yet scales to support complex functional testing. It provides - `auto-discovery `_ of test modules and functions, - detailed info on failing `assert statements `_ (no need to remember ``self.assert*`` names) - `modular fixtures `_ for managing small or parametrized long-lived test resources. - multi-paradigm support: you can use ``py.test`` to run test suites based on `unittest `_ (or trial), `nose `_ - single-source compatibility to Python2.4 all the way up to Python3.3, PyPy-1.9 and Jython-2.5.1. - many `external plugins `_. .. image:: https://secure.travis-ci.org/hpk42/pytest.png :target: http://travis-ci.org/hpk42/pytest A simple example for a test:: # content of test_module.py def test_function(): i = 4 assert i == 3 which can be run with ``py.test test_module.py``. See `getting-started `_ for more examples. For much more info, including PDF docs, see http://pytest.org and report bugs at: http://bitbucket.org/hpk42/pytest/issues/ and checkout repos at: http://github.com/hpk42/pytest/ (mirror) http://bitbucket.org/hpk42/pytest/ Copyright Holger Krekel and others, 2004-2013 Licensed under the MIT license. pytest-2.5.1/pytest.py0000664000175000017500000000076212254002202014333 0ustar hpkhpk00000000000000# PYTHON_ARGCOMPLETE_OK """ pytest: unit and functional testing with Python. """ __all__ = ['main'] if __name__ == '__main__': # if run as a script or by 'python -m pytest' # we trigger the below "else" condition by the following import import pytest raise SystemExit(pytest.main()) # else we are imported from _pytest.config import main, UsageError, _preloadplugins, cmdline from _pytest import __version__ _preloadplugins() # to populate pytest.* namespace so help(pytest) works pytest-2.5.1/testing/0000775000175000017500000000000012254002202014101 5ustar hpkhpk00000000000000pytest-2.5.1/testing/test_tmpdir.py0000664000175000017500000000664512254002202017024 0ustar hpkhpk00000000000000import py, pytest from _pytest.tmpdir import tmpdir, TempdirHandler def test_funcarg(testdir): testdir.makepyfile(""" def pytest_generate_tests(metafunc): metafunc.addcall(id='a') metafunc.addcall(id='b') def test_func(tmpdir): pass """) reprec = testdir.inline_run() calls = reprec.getcalls("pytest_runtest_setup") item = calls[0].item # pytest_unconfigure has deleted the TempdirHandler already config = item.config config._tmpdirhandler = TempdirHandler(config) item._initrequest() p = tmpdir(item._request) assert p.check() bn = p.basename.strip("0123456789") assert bn.endswith("test_func_a_") item.name = "qwe/\\abc" p = tmpdir(item._request) assert p.check() bn = p.basename.strip("0123456789") assert bn == "qwe__abc" def test_ensuretemp(recwarn): #py.test.deprecated_call(py.test.ensuretemp, 'hello') d1 = py.test.ensuretemp('hello') d2 = py.test.ensuretemp('hello') assert d1 == d2 assert d1.check(dir=1) class TestTempdirHandler: def test_mktemp(self, testdir): config = testdir.parseconfig() config.option.basetemp = testdir.mkdir("hello") t = TempdirHandler(config) tmp = t.mktemp("world") assert tmp.relto(t.getbasetemp()) == "world0" tmp = t.mktemp("this") assert tmp.relto(t.getbasetemp()).startswith("this") tmp2 = t.mktemp("this") assert tmp2.relto(t.getbasetemp()).startswith("this") assert tmp2 != tmp class TestConfigTmpdir: def test_getbasetemp_custom_removes_old(self, testdir): p = testdir.tmpdir.join("xyz") config = testdir.parseconfigure("--basetemp=xyz") b = config._tmpdirhandler.getbasetemp() assert b == p h = b.ensure("hello") config._tmpdirhandler.getbasetemp() assert h.check() config = testdir.parseconfigure("--basetemp=xyz") b2 = config._tmpdirhandler.getbasetemp() assert b2.check() assert not h.check() def test_basetemp(testdir): mytemp = testdir.tmpdir.mkdir("mytemp") p = testdir.makepyfile(""" import pytest def test_1(): pytest.ensuretemp("hello") """) result = testdir.runpytest(p, '--basetemp=%s' % mytemp) assert result.ret == 0 assert mytemp.join('hello').check() @pytest.mark.skipif("not hasattr(py.path.local, 'mksymlinkto')") def test_tmpdir_always_is_realpath(testdir): # the reason why tmpdir should be a realpath is that # when you cd to it and do "os.getcwd()" you will anyway # get the realpath. Using the symlinked path can thus # easily result in path-inequality # XXX if that proves to be a problem, consider using # os.environ["PWD"] realtemp = testdir.tmpdir.mkdir("myrealtemp") linktemp = testdir.tmpdir.join("symlinktemp") linktemp.mksymlinkto(realtemp) p = testdir.makepyfile(""" def test_1(tmpdir): import os assert os.path.realpath(str(tmpdir)) == str(tmpdir) """) result = testdir.runpytest("-s", p, '--basetemp=%s/bt' % linktemp) assert not result.ret def test_tmpdir_too_long_on_parametrization(testdir): testdir.makepyfile(""" import pytest @pytest.mark.parametrize("arg", ["1"*1000]) def test_some(arg, tmpdir): tmpdir.ensure("hello") """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) pytest-2.5.1/testing/test_nose.py0000664000175000017500000002103312254002202016455 0ustar hpkhpk00000000000000import py, pytest def setup_module(mod): mod.nose = py.test.importorskip("nose") def test_nose_setup(testdir): p = testdir.makepyfile(""" l = [] from nose.tools import with_setup @with_setup(lambda: l.append(1), lambda: l.append(2)) def test_hello(): assert l == [1] def test_world(): assert l == [1,2] test_hello.setup = lambda: l.append(1) test_hello.teardown = lambda: l.append(2) """) result = testdir.runpytest(p, '-p', 'nose') result.stdout.fnmatch_lines([ "*2 passed*" ]) def test_setup_func_with_setup_decorator(): from _pytest.nose import call_optional l = [] class A: @pytest.fixture(autouse=True) def f(self): l.append(1) call_optional(A(), "f") assert not l def test_setup_func_not_callable(): from _pytest.nose import call_optional class A: f = 1 call_optional(A(), "f") def test_nose_setup_func(testdir): p = testdir.makepyfile(""" from nose.tools import with_setup l = [] def my_setup(): a = 1 l.append(a) def my_teardown(): b = 2 l.append(b) @with_setup(my_setup, my_teardown) def test_hello(): print (l) assert l == [1] def test_world(): print (l) assert l == [1,2] """) result = testdir.runpytest(p, '-p', 'nose') result.stdout.fnmatch_lines([ "*2 passed*" ]) def test_nose_setup_func_failure(testdir): p = testdir.makepyfile(""" from nose.tools import with_setup l = [] my_setup = lambda x: 1 my_teardown = lambda x: 2 @with_setup(my_setup, my_teardown) def test_hello(): print (l) assert l == [1] def test_world(): print (l) assert l == [1,2] """) result = testdir.runpytest(p, '-p', 'nose') result.stdout.fnmatch_lines([ "*TypeError: ()*" ]) def test_nose_setup_func_failure_2(testdir): testdir.makepyfile(""" l = [] my_setup = 1 my_teardown = 2 def test_hello(): assert l == [] test_hello.setup = my_setup test_hello.teardown = my_teardown """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_nose_setup_partial(testdir): py.test.importorskip("functools") p = testdir.makepyfile(""" from functools import partial l = [] def my_setup(x): a = x l.append(a) def my_teardown(x): b = x l.append(b) my_setup_partial = partial(my_setup, 1) my_teardown_partial = partial(my_teardown, 2) def test_hello(): print (l) assert l == [1] def test_world(): print (l) assert l == [1,2] test_hello.setup = my_setup_partial test_hello.teardown = my_teardown_partial """) result = testdir.runpytest(p, '-p', 'nose') result.stdout.fnmatch_lines([ "*2 passed*" ]) def test_nose_test_generator_fixtures(testdir): p = testdir.makepyfile(""" # taken from nose-0.11.1 unit_tests/test_generator_fixtures.py from nose.tools import eq_ called = [] def outer_setup(): called.append('outer_setup') def outer_teardown(): called.append('outer_teardown') def inner_setup(): called.append('inner_setup') def inner_teardown(): called.append('inner_teardown') def test_gen(): called[:] = [] for i in range(0, 5): yield check, i def check(i): expect = ['outer_setup'] for x in range(0, i): expect.append('inner_setup') expect.append('inner_teardown') expect.append('inner_setup') eq_(called, expect) test_gen.setup = outer_setup test_gen.teardown = outer_teardown check.setup = inner_setup check.teardown = inner_teardown class TestClass(object): def setup(self): print ("setup called in %s" % self) self.called = ['setup'] def teardown(self): print ("teardown called in %s" % self) eq_(self.called, ['setup']) self.called.append('teardown') def test(self): print ("test called in %s" % self) for i in range(0, 5): yield self.check, i def check(self, i): print ("check called in %s" % self) expect = ['setup'] #for x in range(0, i): # expect.append('setup') # expect.append('teardown') #expect.append('setup') eq_(self.called, expect) """) result = testdir.runpytest(p, '-p', 'nose') result.stdout.fnmatch_lines([ "*10 passed*" ]) def test_module_level_setup(testdir): testdir.makepyfile(""" from nose.tools import with_setup items = {} def setup(): items[1]=1 def teardown(): del items[1] def setup2(): items[2] = 2 def teardown2(): del items[2] def test_setup_module_setup(): assert items[1] == 1 @with_setup(setup2, teardown2) def test_local_setup(): assert items[2] == 2 assert 1 not in items """) result = testdir.runpytest('-p', 'nose') result.stdout.fnmatch_lines([ "*2 passed*", ]) def test_nose_style_setup_teardown(testdir): testdir.makepyfile(""" l = [] def setup_module(): l.append(1) def teardown_module(): del l[0] def test_hello(): assert l == [1] def test_world(): assert l == [1] """) result = testdir.runpytest('-p', 'nose') result.stdout.fnmatch_lines([ "*2 passed*", ]) def test_nose_setup_ordering(testdir): testdir.makepyfile(""" def setup_module(mod): mod.visited = True class TestClass: def setup(self): assert visited def test_first(self): pass """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*1 passed*", ]) def test_apiwrapper_problem_issue260(testdir): # this would end up trying a call a optional teardown on the class # for plain unittests we dont want nose behaviour testdir.makepyfile(""" import unittest class TestCase(unittest.TestCase): def setup(self): #should not be called in unittest testcases assert 0, 'setup' def teardown(self): #should not be called in unittest testcases assert 0, 'teardown' def setUp(self): print('setup') def tearDown(self): print('teardown') def test_fun(self): pass """) result = testdir.runpytest() result.stdout.fnmatch_lines("*1 passed*") @pytest.mark.skipif("sys.version_info < (2,6)") def test_setup_teardown_linking_issue265(testdir): # we accidentally didnt integrate nose setupstate with normal setupstate # this test ensures that won't happen again testdir.makepyfile(''' import pytest class TestGeneric(object): def test_nothing(self): """Tests the API of the implementation (for generic and specialized).""" @pytest.mark.skipif("True", reason= "Skip tests to check if teardown is skipped as well.") class TestSkipTeardown(TestGeneric): def setup(self): """Sets up my specialized implementation for $COOL_PLATFORM.""" raise Exception("should not call setup for skipped tests") def teardown(self): """Undoes the setup.""" raise Exception("should not call teardown for skipped tests") ''') reprec = testdir.inline_run() reprec.assertoutcome(passed=1, skipped=1) def test_SkipTest_during_collection(testdir): testdir.makepyfile(""" import nose raise nose.SkipTest("during collection") def test_failing(): assert False """) reprec = testdir.inline_run() reprec.assertoutcome(skipped=1) pytest-2.5.1/testing/test_unittest.py0000664000175000017500000005114512254002202017377 0ustar hpkhpk00000000000000import pytest def test_simple_unittest(testdir): testpath = testdir.makepyfile(""" import unittest pytest_plugins = "pytest_unittest" class MyTestCase(unittest.TestCase): def testpassing(self): self.assertEquals('foo', 'foo') def test_failing(self): self.assertEquals('foo', 'bar') """) reprec = testdir.inline_run(testpath) assert reprec.matchreport("testpassing").passed assert reprec.matchreport("test_failing").failed def test_runTest_method(testdir): testdir.makepyfile(""" import unittest pytest_plugins = "pytest_unittest" class MyTestCaseWithRunTest(unittest.TestCase): def runTest(self): self.assertEquals('foo', 'foo') class MyTestCaseWithoutRunTest(unittest.TestCase): def runTest(self): self.assertEquals('foo', 'foo') def test_something(self): pass """) result = testdir.runpytest("-v") result.stdout.fnmatch_lines(""" *MyTestCaseWithRunTest.runTest* *MyTestCaseWithoutRunTest.test_something* *2 passed* """) def test_isclasscheck_issue53(testdir): testpath = testdir.makepyfile(""" import unittest class _E(object): def __getattr__(self, tag): pass E = _E() """) result = testdir.runpytest(testpath) assert result.ret == 0 def test_setup(testdir): testpath = testdir.makepyfile(""" import unittest class MyTestCase(unittest.TestCase): def setUp(self): self.foo = 1 def setup_method(self, method): self.foo2 = 1 def test_both(self): self.assertEquals(1, self.foo) assert self.foo2 == 1 def teardown_method(self, method): assert 0, "42" """) reprec = testdir.inline_run("-s", testpath) assert reprec.matchreport("test_both", when="call").passed rep = reprec.matchreport("test_both", when="teardown") assert rep.failed and '42' in str(rep.longrepr) def test_setUpModule(testdir): testpath = testdir.makepyfile(""" l = [] def setUpModule(): l.append(1) def tearDownModule(): del l[0] def test_hello(): assert l == [1] def test_world(): assert l == [1] """) result = testdir.runpytest(testpath) result.stdout.fnmatch_lines([ "*2 passed*", ]) def test_setUpModule_failing_no_teardown(testdir): testpath = testdir.makepyfile(""" l = [] def setUpModule(): 0/0 def tearDownModule(): l.append(1) def test_hello(): pass """) reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=0, failed=1) call = reprec.getcalls("pytest_runtest_setup")[0] assert not call.item.module.l def test_new_instances(testdir): testpath = testdir.makepyfile(""" import unittest class MyTestCase(unittest.TestCase): def test_func1(self): self.x = 2 def test_func2(self): assert not hasattr(self, 'x') """) reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=2) def test_teardown(testdir): testpath = testdir.makepyfile(""" import unittest class MyTestCase(unittest.TestCase): l = [] def test_one(self): pass def tearDown(self): self.l.append(None) class Second(unittest.TestCase): def test_check(self): self.assertEquals(MyTestCase.l, [None]) """) reprec = testdir.inline_run(testpath) passed, skipped, failed = reprec.countoutcomes() assert failed == 0, failed assert passed == 2 assert passed + skipped + failed == 2 @pytest.mark.skipif("sys.version_info < (2,7)") def test_unittest_skip_issue148(testdir): testpath = testdir.makepyfile(""" import unittest @unittest.skip("hello") class MyTestCase(unittest.TestCase): @classmethod def setUpClass(self): xxx def test_one(self): pass @classmethod def tearDownClass(self): xxx """) reprec = testdir.inline_run(testpath) reprec.assertoutcome(skipped=1) def test_method_and_teardown_failing_reporting(testdir): testdir.makepyfile(""" import unittest, pytest class TC(unittest.TestCase): def tearDown(self): assert 0, "down1" def test_method(self): assert False, "down2" """) result = testdir.runpytest("-s") assert result.ret == 1 result.stdout.fnmatch_lines([ "*tearDown*", "*assert 0*", "*test_method*", "*assert False*", "*1 failed*1 error*", ]) def test_setup_failure_is_shown(testdir): testdir.makepyfile(""" import unittest import pytest class TC(unittest.TestCase): def setUp(self): assert 0, "down1" def test_method(self): print ("never42") xyz """) result = testdir.runpytest("-s") assert result.ret == 1 result.stdout.fnmatch_lines([ "*setUp*", "*assert 0*down1*", "*1 failed*", ]) assert 'never42' not in result.stdout.str() def test_setup_setUpClass(testdir): testpath = testdir.makepyfile(""" import unittest import pytest class MyTestCase(unittest.TestCase): x = 0 @classmethod def setUpClass(cls): cls.x += 1 def test_func1(self): assert self.x == 1 def test_func2(self): assert self.x == 1 @classmethod def tearDownClass(cls): cls.x -= 1 def test_teareddown(): assert MyTestCase.x == 0 """) reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=3) def test_setup_class(testdir): testpath = testdir.makepyfile(""" import unittest import pytest class MyTestCase(unittest.TestCase): x = 0 def setup_class(cls): cls.x += 1 def test_func1(self): assert self.x == 1 def test_func2(self): assert self.x == 1 def teardown_class(cls): cls.x -= 1 def test_teareddown(): assert MyTestCase.x == 0 """) reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=3) @pytest.mark.parametrize("type", ['Error', 'Failure']) def test_testcase_adderrorandfailure_defers(testdir, type): testdir.makepyfile(""" from unittest import TestCase import pytest class MyTestCase(TestCase): def run(self, result): excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0) try: result.add%s(self, excinfo._excinfo) except KeyboardInterrupt: raise except: pytest.fail("add%s should not raise") def test_hello(self): pass """ % (type, type)) result = testdir.runpytest() assert 'should not raise' not in result.stdout.str() @pytest.mark.parametrize("type", ['Error', 'Failure']) def test_testcase_custom_exception_info(testdir, type): testdir.makepyfile(""" from unittest import TestCase import py, pytest class MyTestCase(TestCase): def run(self, result): excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0) # we fake an incompatible exception info from _pytest.monkeypatch import monkeypatch mp = monkeypatch() def t(*args): mp.undo() raise TypeError() mp.setattr(py.code, 'ExceptionInfo', t) try: excinfo = excinfo._excinfo result.add%(type)s(self, excinfo) finally: mp.undo() def test_hello(self): pass """ % locals()) result = testdir.runpytest() result.stdout.fnmatch_lines([ "NOTE: Incompatible Exception Representation*", "*ZeroDivisionError*", "*1 failed*", ]) def test_testcase_totally_incompatible_exception_info(testdir): item, = testdir.getitems(""" from unittest import TestCase class MyTestCase(TestCase): def test_hello(self): pass """) item.addError(None, 42) excinfo = item._excinfo.pop(0) assert 'ERROR: Unknown Incompatible' in str(excinfo.getrepr()) def test_module_level_pytestmark(testdir): testpath = testdir.makepyfile(""" import unittest import pytest pytestmark = pytest.mark.xfail class MyTestCase(unittest.TestCase): def test_func1(self): assert 0 """) reprec = testdir.inline_run(testpath, "-s") reprec.assertoutcome(skipped=1) def test_trial_testcase_skip_property(testdir): pytest.importorskip('twisted.trial.unittest') testpath = testdir.makepyfile(""" from twisted.trial import unittest class MyTestCase(unittest.TestCase): skip = 'dont run' def test_func(self): pass """) reprec = testdir.inline_run(testpath, "-s") reprec.assertoutcome(skipped=1) def test_trial_testfunction_skip_property(testdir): pytest.importorskip('twisted.trial.unittest') testpath = testdir.makepyfile(""" from twisted.trial import unittest class MyTestCase(unittest.TestCase): def test_func(self): pass test_func.skip = 'dont run' """) reprec = testdir.inline_run(testpath, "-s") reprec.assertoutcome(skipped=1) def test_trial_testcase_todo_property(testdir): pytest.importorskip('twisted.trial.unittest') testpath = testdir.makepyfile(""" from twisted.trial import unittest class MyTestCase(unittest.TestCase): todo = 'dont run' def test_func(self): assert 0 """) reprec = testdir.inline_run(testpath, "-s") reprec.assertoutcome(skipped=1) def test_trial_testfunction_todo_property(testdir): pytest.importorskip('twisted.trial.unittest') testpath = testdir.makepyfile(""" from twisted.trial import unittest class MyTestCase(unittest.TestCase): def test_func(self): assert 0 test_func.todo = 'dont run' """) reprec = testdir.inline_run(testpath, "-s") reprec.assertoutcome(skipped=1) class TestTrialUnittest: def setup_class(cls): cls.ut = pytest.importorskip("twisted.trial.unittest") def test_trial_testcase_runtest_not_collected(self, testdir): testdir.makepyfile(""" from twisted.trial.unittest import TestCase class TC(TestCase): def test_hello(self): pass """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) testdir.makepyfile(""" from twisted.trial.unittest import TestCase class TC(TestCase): def runTest(self): pass """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_trial_exceptions_with_skips(self, testdir): testdir.makepyfile(""" from twisted.trial import unittest import pytest class TC(unittest.TestCase): def test_hello(self): pytest.skip("skip_in_method") @pytest.mark.skipif("sys.version_info != 1") def test_hello2(self): pass @pytest.mark.xfail(reason="iwanto") def test_hello3(self): assert 0 def test_hello4(self): pytest.xfail("i2wanto") def test_trial_skip(self): pass test_trial_skip.skip = "trialselfskip" def test_trial_todo(self): assert 0 test_trial_todo.todo = "mytodo" def test_trial_todo_success(self): pass test_trial_todo_success.todo = "mytodo" class TC2(unittest.TestCase): def setup_class(cls): pytest.skip("skip_in_setup_class") def test_method(self): pass """) result = testdir.runpytest("-rxs") assert result.ret == 0 result.stdout.fnmatch_lines_random([ "*XFAIL*test_trial_todo*", "*trialselfskip*", "*skip_in_setup_class*", "*iwanto*", "*i2wanto*", "*sys.version_info*", "*skip_in_method*", "*4 skipped*3 xfail*1 xpass*", ]) def test_trial_error(self, testdir): testdir.makepyfile(""" from twisted.trial.unittest import TestCase from twisted.internet.defer import Deferred from twisted.internet import reactor class TC(TestCase): def test_one(self): crash def test_two(self): def f(_): crash d = Deferred() d.addCallback(f) reactor.callLater(0.3, d.callback, None) return d def test_three(self): def f(): pass # will never get called reactor.callLater(0.3, f) # will crash at teardown def test_four(self): def f(_): reactor.callLater(0.3, f) crash d = Deferred() d.addCallback(f) reactor.callLater(0.3, d.callback, None) return d # will crash both at test time and at teardown """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*ERRORS*", "*DelayedCalls*", "*test_four*", "*NameError*crash*", "*test_one*", "*NameError*crash*", "*test_three*", "*DelayedCalls*", "*test_two*", "*crash*", ]) def test_trial_pdb(self, testdir): p = testdir.makepyfile(""" from twisted.trial import unittest import pytest class TC(unittest.TestCase): def test_hello(self): assert 0, "hellopdb" """) child = testdir.spawn_pytest(p) child.expect("hellopdb") child.sendeof() def test_djangolike_testcase(testdir): # contributed from Morten Breekevold testdir.makepyfile(""" from unittest import TestCase, main class DjangoLikeTestCase(TestCase): def setUp(self): print ("setUp()") def test_presetup_has_been_run(self): print ("test_thing()") self.assertTrue(hasattr(self, 'was_presetup')) def tearDown(self): print ("tearDown()") def __call__(self, result=None): try: self._pre_setup() except (KeyboardInterrupt, SystemExit): raise except Exception: import sys result.addError(self, sys.exc_info()) return super(DjangoLikeTestCase, self).__call__(result) try: self._post_teardown() except (KeyboardInterrupt, SystemExit): raise except Exception: import sys result.addError(self, sys.exc_info()) return def _pre_setup(self): print ("_pre_setup()") self.was_presetup = True def _post_teardown(self): print ("_post_teardown()") """) result = testdir.runpytest("-s") assert result.ret == 0 result.stdout.fnmatch_lines([ "*_pre_setup()*", "*setUp()*", "*test_thing()*", "*tearDown()*", "*_post_teardown()*", ]) def test_unittest_not_shown_in_traceback(testdir): testdir.makepyfile(""" import unittest class t(unittest.TestCase): def test_hello(self): x = 3 self.assertEquals(x, 4) """) res = testdir.runpytest() assert "failUnlessEqual" not in res.stdout.str() def test_unorderable_types(testdir): testdir.makepyfile(""" import unittest class TestJoinEmpty(unittest.TestCase): pass def make_test(): class Test(unittest.TestCase): pass Test.__name__ = "TestFoo" return Test TestFoo = make_test() """) result = testdir.runpytest() assert "TypeError" not in result.stdout.str() assert result.ret == 0 def test_unittest_typerror_traceback(testdir): testdir.makepyfile(""" import unittest class TestJoinEmpty(unittest.TestCase): def test_hello(self, arg1): pass """) result = testdir.runpytest() assert "TypeError" in result.stdout.str() assert result.ret == 1 @pytest.mark.skipif("sys.version_info < (2,7)") def test_unittest_unexpected_failure(testdir): testdir.makepyfile(""" import unittest class MyTestCase(unittest.TestCase): @unittest.expectedFailure def test_func1(self): assert 0 @unittest.expectedFailure def test_func2(self): assert 1 """) result = testdir.runpytest("-rxX") result.stdout.fnmatch_lines([ "*XFAIL*MyTestCase*test_func1*", "*XPASS*MyTestCase*test_func2*", "*1 xfailed*1 xpass*", ]) def test_unittest_setup_interaction(testdir): testdir.makepyfile(""" import unittest import pytest class MyTestCase(unittest.TestCase): @pytest.fixture(scope="class", autouse=True) def perclass(self, request): request.cls.hello = "world" @pytest.fixture(scope="function", autouse=True) def perfunction(self, request): request.instance.funcname = request.function.__name__ def test_method1(self): assert self.funcname == "test_method1" assert self.hello == "world" def test_method2(self): assert self.funcname == "test_method2" def test_classattr(self): assert self.__class__.hello == "world" """) result = testdir.runpytest() result.stdout.fnmatch_lines("*3 passed*") def test_non_unittest_no_setupclass_support(testdir): testpath = testdir.makepyfile(""" class TestFoo: x = 0 @classmethod def setUpClass(cls): cls.x = 1 def test_method1(self): assert self.x == 0 @classmethod def tearDownClass(cls): cls.x = 1 def test_not_teareddown(): assert TestFoo.x == 0 """) reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=2) def test_no_teardown_if_setupclass_failed(testdir): testpath = testdir.makepyfile(""" import unittest class MyTestCase(unittest.TestCase): x = 0 @classmethod def setUpClass(cls): cls.x = 1 assert False def test_func1(self): cls.x = 10 @classmethod def tearDownClass(cls): cls.x = 100 def test_notTornDown(): assert MyTestCase.x == 1 """) reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=1, failed=1) def test_issue333_result_clearing(testdir): testdir.makeconftest(""" def pytest_runtest_call(__multicall__, item): __multicall__.execute() assert 0 """) testdir.makepyfile(""" import unittest class TestIt(unittest.TestCase): def test_func(self): 0/0 """) reprec = testdir.inline_run() reprec.assertoutcome(failed=1) pytest-2.5.1/testing/test_pytester.py0000664000175000017500000000741512254002202017400 0ustar hpkhpk00000000000000import py import pytest import os from _pytest.pytester import HookRecorder from _pytest.core import PluginManager def test_reportrecorder(testdir): item = testdir.getitem("def test_func(): pass") recorder = testdir.getreportrecorder(item.config) assert not recorder.getfailures() pytest.xfail("internal reportrecorder tests need refactoring") class rep: excinfo = None passed = False failed = True skipped = False when = "call" recorder.hook.pytest_runtest_logreport(report=rep) failures = recorder.getfailures() assert failures == [rep] failures = recorder.getfailures() assert failures == [rep] class rep: excinfo = None passed = False failed = False skipped = True when = "call" rep.passed = False rep.skipped = True recorder.hook.pytest_runtest_logreport(report=rep) modcol = testdir.getmodulecol("") rep = modcol.config.hook.pytest_make_collect_report(collector=modcol) rep.passed = False rep.failed = True rep.skipped = False recorder.hook.pytest_collectreport(report=rep) passed, skipped, failed = recorder.listoutcomes() assert not passed and skipped and failed numpassed, numskipped, numfailed = recorder.countoutcomes() assert numpassed == 0 assert numskipped == 1 assert numfailed == 1 assert len(recorder.getfailedcollections()) == 1 recorder.unregister() recorder.clear() recorder.hook.pytest_runtest_logreport(report=rep) pytest.raises(ValueError, "recorder.getfailures()") def test_parseconfig(testdir): config1 = testdir.parseconfig() config2 = testdir.parseconfig() assert config2 != config1 assert config1 != py.test.config def test_testdir_runs_with_plugin(testdir): testdir.makepyfile(""" pytest_plugins = "pytest_pytester" def test_hello(testdir): assert 1 """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*1 passed*" ]) def test_hookrecorder_basic(): rec = HookRecorder(PluginManager()) class ApiClass: def pytest_xyz(self, arg): "x" rec.start_recording(ApiClass) rec.hook.pytest_xyz(arg=123) call = rec.popcall("pytest_xyz") assert call.arg == 123 assert call._name == "pytest_xyz" pytest.raises(pytest.fail.Exception, "rec.popcall('abc')") def test_hookrecorder_basic_no_args_hook(): rec = HookRecorder(PluginManager()) apimod = type(os)('api') def pytest_xyz(): "x" apimod.pytest_xyz = pytest_xyz rec.start_recording(apimod) rec.hook.pytest_xyz() call = rec.popcall("pytest_xyz") assert call._name == "pytest_xyz" def test_functional(testdir, linecomp): reprec = testdir.inline_runsource(""" import pytest from _pytest.core import HookRelay, PluginManager pytest_plugins="pytester" def test_func(_pytest): class ApiClass: def pytest_xyz(self, arg): "x" hook = HookRelay([ApiClass], PluginManager()) rec = _pytest.gethookrecorder(hook) class Plugin: def pytest_xyz(self, arg): return arg + 1 rec._pluginmanager.register(Plugin()) res = rec.hook.pytest_xyz(arg=41) assert res == [42] """) reprec.assertoutcome(passed=1) def test_makepyfile_unicode(testdir): global unichr try: unichr(65) except NameError: unichr = chr testdir.makepyfile(unichr(0xfffd)) def test_inprocess_plugins(testdir): class Plugin(object): configured = False def pytest_configure(self, config): self.configured = True plugin = Plugin() testdir.inprocess_run([], [plugin]) assert plugin.configured pytest-2.5.1/testing/test_pdb.py0000664000175000017500000001672212254002202016267 0ustar hpkhpk00000000000000 import py import sys from test_doctest import xfail_if_pdbpp_installed class TestPDB: def pytest_funcarg__pdblist(self, request): monkeypatch = request.getfuncargvalue("monkeypatch") pdblist = [] def mypdb(*args): pdblist.append(args) plugin = request.config.pluginmanager.getplugin('pdb') monkeypatch.setattr(plugin, 'post_mortem', mypdb) return pdblist def test_pdb_on_fail(self, testdir, pdblist): rep = testdir.inline_runsource1('--pdb', """ def test_func(): assert 0 """) assert rep.failed assert len(pdblist) == 1 tb = py.code.Traceback(pdblist[0][0]) assert tb[-1].name == "test_func" def test_pdb_on_xfail(self, testdir, pdblist): rep = testdir.inline_runsource1('--pdb', """ import pytest @pytest.mark.xfail def test_func(): assert 0 """) assert "xfail" in rep.keywords assert not pdblist def test_pdb_on_skip(self, testdir, pdblist): rep = testdir.inline_runsource1('--pdb', """ import pytest def test_func(): pytest.skip("hello") """) assert rep.skipped assert len(pdblist) == 0 def test_pdb_on_BdbQuit(self, testdir, pdblist): rep = testdir.inline_runsource1('--pdb', """ import bdb def test_func(): raise bdb.BdbQuit """) assert rep.failed assert len(pdblist) == 0 def test_pdb_interaction(self, testdir): p1 = testdir.makepyfile(""" def test_1(): i = 0 assert i == 1 """) child = testdir.spawn_pytest("--pdb %s" % p1) child.expect(".*def test_1") child.expect(".*i = 0") child.expect("(Pdb)") child.sendeof() rest = child.read().decode("utf8") assert "1 failed" in rest assert "def test_1" not in rest if child.isalive(): child.wait() def test_pdb_interaction_exception(self, testdir): p1 = testdir.makepyfile(""" import pytest def globalfunc(): pass def test_1(): pytest.raises(ValueError, globalfunc) """) child = testdir.spawn_pytest("--pdb %s" % p1) child.expect(".*def test_1") child.expect(".*pytest.raises.*globalfunc") child.expect("(Pdb)") child.sendline("globalfunc") child.expect(".*function") child.sendeof() child.expect("1 failed") if child.isalive(): child.wait() def test_pdb_interaction_on_collection_issue181(self, testdir): p1 = testdir.makepyfile(""" import pytest xxx """) child = testdir.spawn_pytest("--pdb %s" % p1) #child.expect(".*import pytest.*") child.expect("(Pdb)") child.sendeof() child.expect("1 error") if child.isalive(): child.wait() def test_pdb_interaction_on_internal_error(self, testdir): testdir.makeconftest(""" def pytest_runtest_protocol(): 0/0 """) p1 = testdir.makepyfile("def test_func(): pass") child = testdir.spawn_pytest("--pdb %s" % p1) #child.expect(".*import pytest.*") child.expect("(Pdb)") child.sendeof() if child.isalive(): child.wait() def test_pdb_interaction_capturing_simple(self, testdir): p1 = testdir.makepyfile(""" import pytest def test_1(): i = 0 print ("hello17") pytest.set_trace() x = 3 """) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.expect("x = 3") child.expect("(Pdb)") child.sendeof() rest = child.read().decode("utf-8") assert "1 failed" in rest assert "def test_1" in rest assert "hello17" in rest # out is captured if child.isalive(): child.wait() def test_pdb_set_trace_interception(self, testdir): p1 = testdir.makepyfile(""" import pdb def test_1(): pdb.set_trace() """) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.expect("(Pdb)") child.sendeof() rest = child.read().decode("utf8") assert "1 failed" in rest assert "reading from stdin while output" not in rest if child.isalive(): child.wait() def test_pdb_and_capsys(self, testdir): p1 = testdir.makepyfile(""" import pytest def test_1(capsys): print ("hello1") pytest.set_trace() """) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.send("capsys.readouterr()\n") child.expect("hello1") child.sendeof() child.read() if child.isalive(): child.wait() @xfail_if_pdbpp_installed def test_pdb_interaction_doctest(self, testdir): p1 = testdir.makepyfile(""" import pytest def function_1(): ''' >>> i = 0 >>> assert i == 1 ''' """) child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1) child.expect("(Pdb)") child.sendline('i') child.expect("0") child.expect("(Pdb)") child.sendeof() rest = child.read().decode("utf8") assert "1 failed" in rest if child.isalive(): child.wait() def test_pdb_interaction_capturing_twice(self, testdir): p1 = testdir.makepyfile(""" import pytest def test_1(): i = 0 print ("hello17") pytest.set_trace() x = 3 print ("hello18") pytest.set_trace() x = 4 """) child = testdir.spawn_pytest(str(p1)) child.expect("test_1") child.expect("x = 3") child.expect("(Pdb)") child.sendline('c') child.expect("x = 4") child.sendeof() rest = child.read().decode("utf8") assert "1 failed" in rest assert "def test_1" in rest assert "hello17" in rest # out is captured assert "hello18" in rest # out is captured if child.isalive(): child.wait() def test_pdb_used_outside_test(self, testdir): p1 = testdir.makepyfile(""" import pytest pytest.set_trace() x = 5 """) child = testdir.spawn("%s %s" %(sys.executable, p1)) child.expect("x = 5") child.sendeof() child.wait() def test_pdb_used_in_generate_tests(self, testdir): p1 = testdir.makepyfile(""" import pytest def pytest_generate_tests(metafunc): pytest.set_trace() x = 5 def test_foo(a): pass """) child = testdir.spawn_pytest(str(p1)) child.expect("x = 5") child.sendeof() child.wait() def test_pdb_collection_failure_is_shown(self, testdir): p1 = testdir.makepyfile("""xxx """) result = testdir.runpytest("--pdb", p1) result.stdout.fnmatch_lines([ "*NameError*xxx*", "*1 error*", ]) pytest-2.5.1/testing/test_config.py0000664000175000017500000003014612254002202016763 0ustar hpkhpk00000000000000import py, pytest from _pytest.config import getcfg class TestParseIni: def test_getcfg_and_config(self, testdir, tmpdir): sub = tmpdir.mkdir("sub") sub.chdir() tmpdir.join("setup.cfg").write(py.code.Source(""" [pytest] name = value """)) cfg = getcfg([sub], ["setup.cfg"]) assert cfg['name'] == "value" config = testdir.parseconfigure(sub) assert config.inicfg['name'] == 'value' def test_getcfg_empty_path(self, tmpdir): getcfg([''], ['setup.cfg']) #happens on py.test "" def test_append_parse_args(self, testdir, tmpdir): tmpdir.join("setup.cfg").write(py.code.Source(""" [pytest] addopts = --verbose """)) config = testdir.parseconfig(tmpdir) assert config.option.verbose #config = testdir.Config() #args = [tmpdir,] #config._preparse(args, addopts=False) #assert len(args) == 1 def test_tox_ini_wrong_version(self, testdir): testdir.makefile('.ini', tox=""" [pytest] minversion=9.0 """) result = testdir.runpytest() assert result.ret != 0 result.stderr.fnmatch_lines([ "*tox.ini:2*requires*9.0*actual*" ]) @pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split()) def test_ini_names(self, testdir, name): testdir.tmpdir.join(name).write(py.std.textwrap.dedent(""" [pytest] minversion = 1.0 """)) config = testdir.parseconfig() assert config.getini("minversion") == "1.0" def test_toxini_before_lower_pytestini(self, testdir): sub = testdir.tmpdir.mkdir("sub") sub.join("tox.ini").write(py.std.textwrap.dedent(""" [pytest] minversion = 2.0 """)) testdir.tmpdir.join("pytest.ini").write(py.std.textwrap.dedent(""" [pytest] minversion = 1.5 """)) config = testdir.parseconfigure(sub) assert config.getini("minversion") == "2.0" @pytest.mark.xfail(reason="probably not needed") def test_confcutdir(self, testdir): sub = testdir.mkdir("sub") sub.chdir() testdir.makeini(""" [pytest] addopts = --qwe """) result = testdir.runpytest("--confcutdir=.") assert result.ret == 0 class TestConfigCmdlineParsing: def test_parsing_again_fails(self, testdir): config = testdir.parseconfig() pytest.raises(AssertionError, lambda: config.parse([])) class TestConfigAPI: def test_config_trace(self, testdir): config = testdir.parseconfig() l = [] config.trace.root.setwriter(l.append) config.trace("hello") assert len(l) == 1 assert l[0] == "hello [config]\n" def test_config_getvalue_honours_conftest(self, testdir): testdir.makepyfile(conftest="x=1") testdir.mkdir("sub").join("conftest.py").write("x=2 ; y = 3") config = testdir.parseconfig() o = testdir.tmpdir assert config.getvalue("x") == 1 assert config.getvalue("x", o.join('sub')) == 2 pytest.raises(KeyError, "config.getvalue('y')") config = testdir.parseconfigure(str(o.join('sub'))) assert config.getvalue("x") == 2 assert config.getvalue("y") == 3 assert config.getvalue("x", o) == 1 pytest.raises(KeyError, 'config.getvalue("y", o)') def test_config_getoption(self, testdir): testdir.makeconftest(""" def pytest_addoption(parser): parser.addoption("--hello", "-X", dest="hello") """) config = testdir.parseconfig("--hello=this") for x in ("hello", "--hello", "-X"): assert config.getoption(x) == "this" pytest.raises(ValueError, "config.getoption('qweqwe')") @pytest.mark.skipif('sys.version_info[:2] not in [(2, 6), (2, 7)]') def test_config_getoption_unicode(self, testdir): testdir.makeconftest(""" from __future__ import unicode_literals def pytest_addoption(parser): parser.addoption('--hello', type='string') """) config = testdir.parseconfig('--hello=this') assert config.getoption('hello') == 'this' def test_config_getvalueorskip(self, testdir): config = testdir.parseconfig() pytest.raises(pytest.skip.Exception, "config.getvalueorskip('hello')") verbose = config.getvalueorskip("verbose") assert verbose == config.option.verbose config.option.hello = None try: config.getvalueorskip('hello') except KeyboardInterrupt: raise except: excinfo = py.code.ExceptionInfo() frame = excinfo.traceback[-2].frame assert frame.code.name == "getvalueorskip" assert frame.eval("__tracebackhide__") def test_config_overwrite(self, testdir): o = testdir.tmpdir o.ensure("conftest.py").write("x=1") config = testdir.parseconfig(str(o)) assert config.getvalue('x') == 1 config.option.x = 2 assert config.getvalue('x') == 2 config = testdir.parseconfig([str(o)]) assert config.getvalue('x') == 1 def test_getconftest_pathlist(self, testdir, tmpdir): somepath = tmpdir.join("x", "y", "z") p = tmpdir.join("conftest.py") p.write("pathlist = ['.', %r]" % str(somepath)) config = testdir.parseconfigure(p) assert config._getconftest_pathlist('notexist') is None pl = config._getconftest_pathlist('pathlist') print(pl) assert len(pl) == 2 assert pl[0] == tmpdir assert pl[1] == somepath def test_addini(self, testdir): testdir.makeconftest(""" def pytest_addoption(parser): parser.addini("myname", "my new ini value") """) testdir.makeini(""" [pytest] myname=hello """) config = testdir.parseconfig() val = config.getini("myname") assert val == "hello" pytest.raises(ValueError, config.getini, 'other') def test_addini_pathlist(self, testdir): testdir.makeconftest(""" def pytest_addoption(parser): parser.addini("paths", "my new ini value", type="pathlist") parser.addini("abc", "abc value") """) p = testdir.makeini(""" [pytest] paths=hello world/sub.py """) config = testdir.parseconfig() l = config.getini("paths") assert len(l) == 2 assert l[0] == p.dirpath('hello') assert l[1] == p.dirpath('world/sub.py') pytest.raises(ValueError, config.getini, 'other') def test_addini_args(self, testdir): testdir.makeconftest(""" def pytest_addoption(parser): parser.addini("args", "new args", type="args") parser.addini("a2", "", "args", default="1 2 3".split()) """) testdir.makeini(""" [pytest] args=123 "123 hello" "this" """) config = testdir.parseconfig() l = config.getini("args") assert len(l) == 3 assert l == ["123", "123 hello", "this"] l = config.getini("a2") assert l == list("123") def test_addini_linelist(self, testdir): testdir.makeconftest(""" def pytest_addoption(parser): parser.addini("xy", "", type="linelist") parser.addini("a2", "", "linelist") """) testdir.makeini(""" [pytest] xy= 123 345 second line """) config = testdir.parseconfig() l = config.getini("xy") assert len(l) == 2 assert l == ["123 345", "second line"] l = config.getini("a2") assert l == [] def test_addinivalue_line_existing(self, testdir): testdir.makeconftest(""" def pytest_addoption(parser): parser.addini("xy", "", type="linelist") """) testdir.makeini(""" [pytest] xy= 123 """) config = testdir.parseconfig() l = config.getini("xy") assert len(l) == 1 assert l == ["123"] config.addinivalue_line("xy", "456") l = config.getini("xy") assert len(l) == 2 assert l == ["123", "456"] def test_addinivalue_line_new(self, testdir): testdir.makeconftest(""" def pytest_addoption(parser): parser.addini("xy", "", type="linelist") """) config = testdir.parseconfig() assert not config.getini("xy") config.addinivalue_line("xy", "456") l = config.getini("xy") assert len(l) == 1 assert l == ["456"] config.addinivalue_line("xy", "123") l = config.getini("xy") assert len(l) == 2 assert l == ["456", "123"] def test_options_on_small_file_do_not_blow_up(testdir): def runfiletest(opts): reprec = testdir.inline_run(*opts) passed, skipped, failed = reprec.countoutcomes() assert failed == 2 assert skipped == passed == 0 path = testdir.makepyfile(""" def test_f1(): assert 0 def test_f2(): assert 0 """) for opts in ([], ['-l'], ['-s'], ['--tb=no'], ['--tb=short'], ['--tb=long'], ['--fulltrace'], ['--nomagic'], ['--traceconfig'], ['-v'], ['-v', '-v']): runfiletest(opts + [path]) def test_preparse_ordering_with_setuptools(testdir, monkeypatch): pkg_resources = py.test.importorskip("pkg_resources") def my_iter(name): assert name == "pytest11" class EntryPoint: name = "mytestplugin" class dist: pass def load(self): class PseudoPlugin: x = 42 return PseudoPlugin() return iter([EntryPoint()]) monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter) testdir.makeconftest(""" pytest_plugins = "mytestplugin", """) monkeypatch.setenv("PYTEST_PLUGINS", "mytestplugin") config = testdir.parseconfig() plugin = config.pluginmanager.getplugin("mytestplugin") assert plugin.x == 42 def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch): pkg_resources = py.test.importorskip("pkg_resources") def my_iter(name): assert name == "pytest11" class EntryPoint: name = "mytestplugin" def load(self): assert 0, "should not arrive here" return iter([EntryPoint()]) monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter) config = testdir.parseconfig("-p", "no:mytestplugin") plugin = config.pluginmanager.getplugin("mytestplugin") assert plugin == -1 def test_cmdline_processargs_simple(testdir): testdir.makeconftest(""" def pytest_cmdline_preparse(args): args.append("-h") """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*pytest*", "*-h*", ]) @pytest.mark.skipif("sys.platform == 'win32'") def test_toolongargs_issue224(testdir): result = testdir.runpytest("-m", "hello" * 500) assert result.ret == 0 def test_notify_exception(testdir, capfd): config = testdir.parseconfig() excinfo = pytest.raises(ValueError, "raise ValueError(1)") config.notify_exception(excinfo) out, err = capfd.readouterr() assert "ValueError" in err class A: def pytest_internalerror(self, excrepr): return True config.pluginmanager.register(A()) config.notify_exception(excinfo) out, err = capfd.readouterr() assert not err def test_load_initial_conftest_last_ordering(testdir): from _pytest.config import get_plugin_manager pm = get_plugin_manager() class My: def pytest_load_initial_conftests(self): pass m = My() pm.register(m) l = pm.listattr("pytest_load_initial_conftests") assert l[-1].__module__ == "_pytest.capture" assert l[-2] == m.pytest_load_initial_conftests assert l[-3].__module__ == "_pytest.config" pytest-2.5.1/testing/test_parseopt.py0000664000175000017500000003046712254002202017361 0ustar hpkhpk00000000000000from __future__ import with_statement import sys import os import py, pytest from _pytest import config as parseopt @pytest.fixture def parser(): return parseopt.Parser() class TestParser: def test_no_help_by_default(self, capsys): parser = parseopt.Parser(usage="xyz") pytest.raises(SystemExit, lambda: parser.parse(["-h"])) out, err = capsys.readouterr() assert err.find("error: unrecognized arguments") != -1 def test_argument(self): with pytest.raises(parseopt.ArgumentError): # need a short or long option argument = parseopt.Argument() argument = parseopt.Argument('-t') assert argument._short_opts == ['-t'] assert argument._long_opts == [] assert argument.dest == 't' argument = parseopt.Argument('-t', '--test') assert argument._short_opts == ['-t'] assert argument._long_opts == ['--test'] assert argument.dest == 'test' argument = parseopt.Argument('-t', '--test', dest='abc') assert argument.dest == 'abc' def test_argument_type(self): argument = parseopt.Argument('-t', dest='abc', type='int') assert argument.type is int argument = parseopt.Argument('-t', dest='abc', type='string') assert argument.type is str argument = parseopt.Argument('-t', dest='abc', type=float) assert argument.type is float with pytest.raises(KeyError): argument = parseopt.Argument('-t', dest='abc', type='choice') argument = parseopt.Argument('-t', dest='abc', type='choice', choices=['red', 'blue']) assert argument.type is str def test_argument_processopt(self): argument = parseopt.Argument('-t', type=int) argument.default = 42 argument.dest = 'abc' res = argument.attrs() assert res['default'] == 42 assert res['dest'] == 'abc' def test_group_add_and_get(self, parser): group = parser.getgroup("hello", description="desc") assert group.name == "hello" assert group.description == "desc" def test_getgroup_simple(self, parser): group = parser.getgroup("hello", description="desc") assert group.name == "hello" assert group.description == "desc" group2 = parser.getgroup("hello") assert group2 is group def test_group_ordering(self, parser): parser.getgroup("1") parser.getgroup("2") parser.getgroup("3", after="1") groups = parser._groups groups_names = [x.name for x in groups] assert groups_names == list("132") def test_group_addoption(self): group = parseopt.OptionGroup("hello") group.addoption("--option1", action="store_true") assert len(group.options) == 1 assert isinstance(group.options[0], parseopt.Argument) def test_group_shortopt_lowercase(self, parser): group = parser.getgroup("hello") pytest.raises(ValueError, """ group.addoption("-x", action="store_true") """) assert len(group.options) == 0 group._addoption("-x", action="store_true") assert len(group.options) == 1 def test_parser_addoption(self, parser): group = parser.getgroup("custom options") assert len(group.options) == 0 group.addoption("--option1", action="store_true") assert len(group.options) == 1 def test_parse(self, parser): parser.addoption("--hello", dest="hello", action="store") args = parser.parse(['--hello', 'world']) assert args.hello == "world" assert not getattr(args, parseopt.FILE_OR_DIR) def test_parse2(self, parser): args = parser.parse([py.path.local()]) assert getattr(args, parseopt.FILE_OR_DIR)[0] == py.path.local() def test_parse_known_args(self, parser): parser.parse_known_args([py.path.local()]) parser.addoption("--hello", action="store_true") ns = parser.parse_known_args(["x", "--y", "--hello", "this"]) assert ns.hello def test_parse_will_set_default(self, parser): parser.addoption("--hello", dest="hello", default="x", action="store") option = parser.parse([]) assert option.hello == "x" del option.hello parser.parse_setoption([], option) assert option.hello == "x" def test_parse_setoption(self, parser): parser.addoption("--hello", dest="hello", action="store") parser.addoption("--world", dest="world", default=42) class A: pass option = A() args = parser.parse_setoption(['--hello', 'world'], option) assert option.hello == "world" assert option.world == 42 assert not args def test_parse_special_destination(self, parser): parser.addoption("--ultimate-answer", type=int) args = parser.parse(['--ultimate-answer', '42']) assert args.ultimate_answer == 42 def test_parse_split_positional_arguments(self, parser): parser.addoption("-R", action='store_true') parser.addoption("-S", action='store_false') args = parser.parse(['-R', '4', '2', '-S']) assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2'] args = parser.parse(['-R', '-S', '4', '2', '-R']) assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2'] assert args.R == True assert args.S == False args = parser.parse(['-R', '4', '-S', '2']) assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2'] assert args.R == True assert args.S == False def test_parse_defaultgetter(self): def defaultget(option): if not hasattr(option, 'type'): return if option.type is int: option.default = 42 elif option.type is str: option.default = "world" parser = parseopt.Parser(processopt=defaultget) parser.addoption("--this", dest="this", type="int", action="store") parser.addoption("--hello", dest="hello", type="string", action="store") parser.addoption("--no", dest="no", action="store_true") option = parser.parse([]) assert option.hello == "world" assert option.this == 42 assert option.no is False @pytest.mark.skipif("sys.version_info < (2,5)") def test_drop_short_helper(self): parser = py.std.argparse.ArgumentParser(formatter_class=parseopt.DropShorterLongHelpFormatter) parser.add_argument('-t', '--twoword', '--duo', '--two-word', '--two', help='foo').map_long_option = {'two': 'two-word'} # throws error on --deux only! parser.add_argument('-d', '--deuxmots', '--deux-mots', action='store_true', help='foo').map_long_option = {'deux': 'deux-mots'} parser.add_argument('-s', action='store_true', help='single short') parser.add_argument('--abc', '-a', action='store_true', help='bar') parser.add_argument('--klm', '-k', '--kl-m', action='store_true', help='bar') parser.add_argument('-P', '--pq-r', '-p', '--pqr', action='store_true', help='bar') parser.add_argument('--zwei-wort', '--zweiwort', '--zweiwort', action='store_true', help='bar') parser.add_argument('-x', '--exit-on-first', '--exitfirst', action='store_true', help='spam').map_long_option = {'exitfirst': 'exit-on-first'} parser.add_argument('files_and_dirs', nargs='*') args = parser.parse_args(['-k', '--duo', 'hallo', '--exitfirst']) assert args.twoword == 'hallo' assert args.klm is True assert args.zwei_wort is False assert args.exit_on_first is True assert args.s is False args = parser.parse_args(['--deux-mots']) with pytest.raises(AttributeError): assert args.deux_mots is True assert args.deuxmots is True args = parser.parse_args(['file', 'dir']) assert '|'.join(args.files_and_dirs) == 'file|dir' def test_drop_short_0(self, parser): parser.addoption('--funcarg', '--func-arg', action='store_true') parser.addoption('--abc-def', '--abc-def', action='store_true') parser.addoption('--klm-hij', action='store_true') args = parser.parse(['--funcarg', '--k']) assert args.funcarg is True assert args.abc_def is False assert args.klm_hij is True @pytest.mark.skipif("sys.version_info < (2,5)") def test_drop_short_2(self, parser): parser.addoption('--func-arg', '--doit', action='store_true') args = parser.parse(['--doit']) assert args.func_arg is True @pytest.mark.skipif("sys.version_info < (2,5)") def test_drop_short_3(self, parser): parser.addoption('--func-arg', '--funcarg', '--doit', action='store_true') args = parser.parse(['abcd']) assert args.func_arg is False assert args.file_or_dir == ['abcd'] @pytest.mark.skipif("sys.version_info < (2,5)") def test_drop_short_help0(self, parser, capsys): parser.addoption('--func-args', '--doit', help = 'foo', action='store_true') parser.parse([]) help = parser.optparser.format_help() assert '--func-args, --doit foo' in help # testing would be more helpful with all help generated @pytest.mark.skipif("sys.version_info < (2,5)") def test_drop_short_help1(self, parser, capsys): group = parser.getgroup("general") group.addoption('--doit', '--func-args', action='store_true', help='foo') group._addoption("-h", "--help", action="store_true", dest="help", help="show help message and configuration info") parser.parse(['-h']) help = parser.optparser.format_help() assert '-doit, --func-args foo' in help @pytest.mark.skipif("sys.version_info < (2,5)") def test_addoption_parser_epilog(testdir): testdir.makeconftest(""" def pytest_addoption(parser): parser.hints.append("hello world") parser.hints.append("from me too") """) result = testdir.runpytest('--help') #assert result.ret != 0 result.stdout.fnmatch_lines(["hint: hello world", "hint: from me too"]) @pytest.mark.skipif("sys.version_info < (2,6)") def test_argcomplete(testdir, monkeypatch): if not py.path.local.sysfind('bash'): pytest.skip("bash not available") script = str(testdir.tmpdir.join("test_argcomplete")) pytest_bin = sys.argv[0] if "py.test" not in os.path.basename(pytest_bin): pytest.skip("need to be run with py.test executable, not %s" %(pytest_bin,)) with open(str(script), 'w') as fp: # redirect output from argcomplete to stdin and stderr is not trivial # http://stackoverflow.com/q/12589419/1307905 # so we use bash fp.write('COMP_WORDBREAKS="$COMP_WORDBREAKS" %s 8>&1 9>&2' % pytest_bin) # alternative would be exteneded Testdir.{run(),_run(),popen()} to be able # to handle a keyword argument env that replaces os.environ in popen or # extends the copy, advantage: could not forget to restore monkeypatch.setenv('_ARGCOMPLETE', "1") monkeypatch.setenv('_ARGCOMPLETE_IFS',"\x0b") monkeypatch.setenv('COMP_WORDBREAKS', ' \\t\\n"\\\'><=;|&(:') arg = '--fu' monkeypatch.setenv('COMP_LINE', "py.test " + arg) monkeypatch.setenv('COMP_POINT', str(len("py.test " + arg))) result = testdir.run('bash', str(script), arg) if result.ret == 255: # argcomplete not found pytest.skip("argcomplete not available") elif not result.stdout.str(): pytest.skip("bash provided no output, argcomplete not available?") else: if py.std.sys.version_info < (2,7): result.stdout.lines = result.stdout.lines[0].split('\x0b') result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"]) else: result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"]) if py.std.sys.version_info < (2,7): return os.mkdir('test_argcomplete.d') arg = 'test_argc' monkeypatch.setenv('COMP_LINE', "py.test " + arg) monkeypatch.setenv('COMP_POINT', str(len('py.test ' + arg))) result = testdir.run('bash', str(script), arg) result.stdout.fnmatch_lines(["test_argcomplete", "test_argcomplete.d/"]) pytest-2.5.1/testing/test_doctest.py0000664000175000017500000002173012254002202017162 0ustar hpkhpk00000000000000from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile import py, pytest import pdb xfail_if_pdbpp_installed = pytest.mark.xfail(hasattr(pdb, "__author__"), reason="doctest/pdbpp problem: https://bitbucket.org/antocuni/pdb/issue/24/doctests-fail-when-pdbpp-is-installed", run=False) class TestDoctests: def test_collect_testtextfile(self, testdir): w = testdir.maketxtfile(whatever="") checkfile = testdir.maketxtfile(test_something=""" alskdjalsdk >>> i = 5 >>> i-1 4 """) for x in (testdir.tmpdir, checkfile): #print "checking that %s returns custom items" % (x,) items, reprec = testdir.inline_genitems(x) assert len(items) == 1 assert isinstance(items[0], DoctestTextfile) items, reprec = testdir.inline_genitems(w) assert len(items) == 1 def test_collect_module_empty(self, testdir): path = testdir.makepyfile(whatever="#") for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, '--doctest-modules') assert len(items) == 0 def test_collect_module_single_modulelevel_doctest(self, testdir): path = testdir.makepyfile(whatever='""">>> pass"""') for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, '--doctest-modules') assert len(items) == 1 assert isinstance(items[0], DoctestItem) assert isinstance(items[0].parent, DoctestModule) def test_collect_module_two_doctest_one_modulelevel(self, testdir): path = testdir.makepyfile(whatever=""" '>>> x = None' def my_func(): ">>> magic = 42 " """) for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, '--doctest-modules') assert len(items) == 2 assert isinstance(items[0], DoctestItem) assert isinstance(items[1], DoctestItem) assert isinstance(items[0].parent, DoctestModule) assert items[0].parent is items[1].parent def test_collect_module_two_doctest_no_modulelevel(self, testdir): path = testdir.makepyfile(whatever=""" '# Empty' def my_func(): ">>> magic = 42 " def unuseful(): ''' # This is a function # >>> # it doesn't have any doctest ''' def another(): ''' # This is another function >>> import os # this one does have a doctest ''' """) for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, '--doctest-modules') assert len(items) == 2 assert isinstance(items[0], DoctestItem) assert isinstance(items[1], DoctestItem) assert isinstance(items[0].parent, DoctestModule) assert items[0].parent is items[1].parent def test_simple_doctestfile(self, testdir): p = testdir.maketxtfile(test_doc=""" >>> x = 1 >>> x == 1 False """) reprec = testdir.inline_run(p, ) reprec.assertoutcome(failed=1) def test_new_pattern(self, testdir): p = testdir.maketxtfile(xdoc =""" >>> x = 1 >>> x == 1 False """) reprec = testdir.inline_run(p, "--doctest-glob=x*.txt") reprec.assertoutcome(failed=1) def test_doctest_unexpected_exception(self, testdir): testdir.maketxtfile(""" >>> i = 0 >>> 0 / i 2 """) result = testdir.runpytest("--doctest-modules") result.stdout.fnmatch_lines([ "*unexpected_exception*", "*>>> i = 0*", "*>>> 0 / i*", "*UNEXPECTED*ZeroDivision*", ]) def test_doctest_linedata_missing(self, testdir): testdir.tmpdir.join('hello.py').write(py.code.Source(""" class Fun(object): @property def test(self): ''' >>> a = 1 >>> 1/0 ''' """)) result = testdir.runpytest("--doctest-modules") result.stdout.fnmatch_lines([ "*hello*", "*EXAMPLE LOCATION UNKNOWN, not showing all tests of that example*", "*1/0*", "*UNEXPECTED*ZeroDivision*", "*1 failed*", ]) def test_doctest_unex_importerror(self, testdir): testdir.tmpdir.join("hello.py").write(py.code.Source(""" import asdalsdkjaslkdjasd """)) testdir.maketxtfile(""" >>> import hello >>> """) result = testdir.runpytest("--doctest-modules") result.stdout.fnmatch_lines([ "*>>> import hello", "*UNEXPECTED*ImportError*", "*import asdals*", ]) def test_doctestmodule(self, testdir): p = testdir.makepyfile(""" ''' >>> x = 1 >>> x == 1 False ''' """) reprec = testdir.inline_run(p, "--doctest-modules") reprec.assertoutcome(failed=1) @xfail_if_pdbpp_installed def test_doctestmodule_external_and_issue116(self, testdir): p = testdir.mkpydir("hello") p.join("__init__.py").write(py.code.Source(""" def somefunc(): ''' >>> i = 0 >>> i + 1 2 ''' """)) result = testdir.runpytest(p, "--doctest-modules") result.stdout.fnmatch_lines([ '004 *>>> i = 0', '005 *>>> i + 1', '*Expected:', "* 2", "*Got:", "* 1", "*:5: DocTestFailure" ]) def test_txtfile_failing(self, testdir): p = testdir.maketxtfile(""" >>> i = 0 >>> i + 1 2 """) result = testdir.runpytest(p, "-s") result.stdout.fnmatch_lines([ '001 >>> i = 0', '002 >>> i + 1', 'Expected:', " 2", "Got:", " 1", "*test_txtfile_failing.txt:2: DocTestFailure" ]) @xfail_if_pdbpp_installed def test_txtfile_with_fixtures(self, testdir): p = testdir.maketxtfile(""" >>> dir = getfixture('tmpdir') >>> type(dir).__name__ 'LocalPath' """) reprec = testdir.inline_run(p, ) reprec.assertoutcome(passed=1) @xfail_if_pdbpp_installed def test_txtfile_with_usefixtures_in_ini(self, testdir): testdir.makeini(""" [pytest] usefixtures = myfixture """) testdir.makeconftest(""" import pytest @pytest.fixture def myfixture(monkeypatch): monkeypatch.setenv("HELLO", "WORLD") """) p = testdir.maketxtfile(""" >>> import os >>> os.environ["HELLO"] 'WORLD' """) reprec = testdir.inline_run(p, ) reprec.assertoutcome(passed=1) @xfail_if_pdbpp_installed def test_doctestmodule_with_fixtures(self, testdir): p = testdir.makepyfile(""" ''' >>> dir = getfixture('tmpdir') >>> type(dir).__name__ 'LocalPath' ''' """) reprec = testdir.inline_run(p, "--doctest-modules") reprec.assertoutcome(passed=1) @xfail_if_pdbpp_installed def test_doctestmodule_three_tests(self, testdir): p = testdir.makepyfile(""" ''' >>> dir = getfixture('tmpdir') >>> type(dir).__name__ 'LocalPath' ''' def my_func(): ''' >>> magic = 42 >>> magic - 42 0 ''' def unuseful(): pass def another(): ''' >>> import os >>> os is os True ''' """) reprec = testdir.inline_run(p, "--doctest-modules") reprec.assertoutcome(passed=3) @xfail_if_pdbpp_installed def test_doctestmodule_two_tests_one_fail(self, testdir): p = testdir.makepyfile(""" class MyClass: def bad_meth(self): ''' >>> magic = 42 >>> magic 0 ''' def nice_meth(self): ''' >>> magic = 42 >>> magic - 42 0 ''' """) reprec = testdir.inline_run(p, "--doctest-modules") reprec.assertoutcome(failed=1, passed=1) pytest-2.5.1/testing/test_skipping.py0000664000175000017500000004470612254002202017351 0ustar hpkhpk00000000000000import pytest import sys from _pytest.skipping import MarkEvaluator, folded_skips, pytest_runtest_setup from _pytest.runner import runtestprotocol class TestEvaluator: def test_no_marker(self, testdir): item = testdir.getitem("def test_func(): pass") evalskipif = MarkEvaluator(item, 'skipif') assert not evalskipif assert not evalskipif.istrue() def test_marked_no_args(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.xyz def test_func(): pass """) ev = MarkEvaluator(item, 'xyz') assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "" assert not ev.get("run", False) def test_marked_one_arg(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.xyz("hasattr(os, 'sep')") def test_func(): pass """) ev = MarkEvaluator(item, 'xyz') assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: hasattr(os, 'sep')" @pytest.mark.skipif('sys.version_info[0] >= 3') def test_marked_one_arg_unicode(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.xyz(u"hasattr(os, 'sep')") def test_func(): pass """) ev = MarkEvaluator(item, 'xyz') assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: hasattr(os, 'sep')" def test_marked_one_arg_with_reason(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world") def test_func(): pass """) ev = MarkEvaluator(item, 'xyz') assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "hello world" assert ev.get("attr") == 2 def test_marked_one_arg_twice(self, testdir): lines = [ '''@pytest.mark.skipif("not hasattr(os, 'murks')")''', '''@pytest.mark.skipif("hasattr(os, 'murks')")''' ] for i in range(0, 2): item = testdir.getitem(""" import pytest %s %s def test_func(): pass """ % (lines[i], lines[(i+1) %2])) ev = MarkEvaluator(item, 'skipif') assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: not hasattr(os, 'murks')" def test_marked_one_arg_twice2(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.skipif("hasattr(os, 'murks')") @pytest.mark.skipif("not hasattr(os, 'murks')") def test_func(): pass """) ev = MarkEvaluator(item, 'skipif') assert ev assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: not hasattr(os, 'murks')" def test_marked_skip_with_not_string(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.skipif(False) def test_func(): pass """) ev = MarkEvaluator(item, 'skipif') exc = pytest.raises(pytest.fail.Exception, ev.istrue) assert """Failed: you need to specify reason=STRING when using booleans as conditions.""" in exc.value.msg def test_skipif_class(self, testdir): item, = testdir.getitems(""" import pytest class TestClass: pytestmark = pytest.mark.skipif("config._hackxyz") def test_func(self): pass """) item.config._hackxyz = 3 ev = MarkEvaluator(item, 'skipif') assert ev.istrue() expl = ev.getexplanation() assert expl == "condition: config._hackxyz" class TestXFail: def test_xfail_simple(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.xfail def test_func(): assert 0 """) reports = runtestprotocol(item, log=False) assert len(reports) == 3 callreport = reports[1] assert callreport.skipped assert callreport.wasxfail == "" def test_xfail_xpassed(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.xfail def test_func(): assert 1 """) reports = runtestprotocol(item, log=False) assert len(reports) == 3 callreport = reports[1] assert callreport.failed assert callreport.wasxfail == "" def test_xfail_run_anyway(self, testdir): testdir.makepyfile(""" import pytest @pytest.mark.xfail def test_func(): assert 0 def test_func2(): pytest.xfail("hello") """) result = testdir.runpytest("--runxfail") result.stdout.fnmatch_lines([ "*def test_func():*", "*assert 0*", "*1 failed*1 pass*", ]) def test_xfail_evalfalse_but_fails(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.xfail('False') def test_func(): assert 0 """) reports = runtestprotocol(item, log=False) callreport = reports[1] assert callreport.failed assert not hasattr(callreport, "wasxfail") assert 'xfail' in callreport.keywords def test_xfail_not_report_default(self, testdir): p = testdir.makepyfile(test_one=""" import pytest @pytest.mark.xfail def test_this(): assert 0 """) testdir.runpytest(p, '-v') #result.stdout.fnmatch_lines([ # "*HINT*use*-r*" #]) def test_xfail_not_run_xfail_reporting(self, testdir): p = testdir.makepyfile(test_one=""" import pytest @pytest.mark.xfail(run=False, reason="noway") def test_this(): assert 0 @pytest.mark.xfail("True", run=False) def test_this_true(): assert 0 @pytest.mark.xfail("False", run=False, reason="huh") def test_this_false(): assert 1 """) result = testdir.runpytest(p, '--report=xfailed', ) result.stdout.fnmatch_lines([ "*test_one*test_this*", "*NOTRUN*noway", "*test_one*test_this_true*", "*NOTRUN*condition:*True*", "*1 passed*", ]) def test_xfail_not_run_no_setup_run(self, testdir): p = testdir.makepyfile(test_one=""" import pytest @pytest.mark.xfail(run=False, reason="hello") def test_this(): assert 0 def setup_module(mod): raise ValueError(42) """) result = testdir.runpytest(p, '--report=xfailed', ) result.stdout.fnmatch_lines([ "*test_one*test_this*", "*NOTRUN*hello", "*1 xfailed*", ]) def test_xfail_xpass(self, testdir): p = testdir.makepyfile(test_one=""" import pytest @pytest.mark.xfail def test_that(): assert 1 """) result = testdir.runpytest(p, '-rX') result.stdout.fnmatch_lines([ "*XPASS*test_that*", "*1 xpassed*" ]) assert result.ret == 0 def test_xfail_imperative(self, testdir): p = testdir.makepyfile(""" import pytest def test_this(): pytest.xfail("hello") """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*1 xfailed*", ]) result = testdir.runpytest(p, "-rx") result.stdout.fnmatch_lines([ "*XFAIL*test_this*", "*reason:*hello*", ]) result = testdir.runpytest(p, "--runxfail") result.stdout.fnmatch_lines("*1 pass*") def test_xfail_imperative_in_setup_function(self, testdir): p = testdir.makepyfile(""" import pytest def setup_function(function): pytest.xfail("hello") def test_this(): assert 0 """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*1 xfailed*", ]) result = testdir.runpytest(p, "-rx") result.stdout.fnmatch_lines([ "*XFAIL*test_this*", "*reason:*hello*", ]) result = testdir.runpytest(p, "--runxfail") result.stdout.fnmatch_lines(""" *def test_this* *1 fail* """) def xtest_dynamic_xfail_set_during_setup(self, testdir): p = testdir.makepyfile(""" import pytest def setup_function(function): pytest.mark.xfail(function) def test_this(): assert 0 def test_that(): assert 1 """) result = testdir.runpytest(p, '-rxX') result.stdout.fnmatch_lines([ "*XFAIL*test_this*", "*XPASS*test_that*", ]) def test_dynamic_xfail_no_run(self, testdir): p = testdir.makepyfile(""" import pytest def pytest_funcarg__arg(request): request.applymarker(pytest.mark.xfail(run=False)) def test_this(arg): assert 0 """) result = testdir.runpytest(p, '-rxX') result.stdout.fnmatch_lines([ "*XFAIL*test_this*", "*NOTRUN*", ]) def test_dynamic_xfail_set_during_funcarg_setup(self, testdir): p = testdir.makepyfile(""" import pytest def pytest_funcarg__arg(request): request.applymarker(pytest.mark.xfail) def test_this2(arg): assert 0 """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*1 xfailed*", ]) class TestXFailwithSetupTeardown: def test_failing_setup_issue9(self, testdir): testdir.makepyfile(""" import pytest def setup_function(func): assert 0 @pytest.mark.xfail def test_func(): pass """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*1 xfail*", ]) def test_failing_teardown_issue9(self, testdir): testdir.makepyfile(""" import pytest def teardown_function(func): assert 0 @pytest.mark.xfail def test_func(): pass """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*1 xfail*", ]) class TestSkipif: def test_skipif_conditional(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.skipif("hasattr(os, 'sep')") def test_func(): pass """) # noqa x = pytest.raises(pytest.skip.Exception, lambda: pytest_runtest_setup(item)) assert x.value.msg == "condition: hasattr(os, 'sep')" def test_skipif_reporting(self, testdir): p = testdir.makepyfile(""" import pytest @pytest.mark.skipif("hasattr(sys, 'platform')") def test_that(): assert 0 """) result = testdir.runpytest(p, '-s', '-rs') result.stdout.fnmatch_lines([ "*SKIP*1*platform*", "*1 skipped*" ]) assert result.ret == 0 def test_skip_not_report_default(testdir): p = testdir.makepyfile(test_one=""" import pytest def test_this(): pytest.skip("hello") """) result = testdir.runpytest(p, '-v') result.stdout.fnmatch_lines([ #"*HINT*use*-r*", "*1 skipped*", ]) def test_skipif_class(testdir): p = testdir.makepyfile(""" import pytest class TestClass: pytestmark = pytest.mark.skipif("True") def test_that(self): assert 0 def test_though(self): assert 0 """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*2 skipped*" ]) def test_skip_reasons_folding(): path = 'xyz' lineno = 3 message = "justso" longrepr = (path, lineno, message) class X: pass ev1 = X() ev1.when = "execute" ev1.skipped = True ev1.longrepr = longrepr ev2 = X() ev2.longrepr = longrepr ev2.skipped = True l = folded_skips([ev1, ev2]) assert len(l) == 1 num, fspath, lineno, reason = l[0] assert num == 2 assert fspath == path assert lineno == lineno assert reason == message def test_skipped_reasons_functional(testdir): testdir.makepyfile( test_one=""" from conftest import doskip def setup_function(func): doskip() def test_func(): pass class TestClass: def test_method(self): doskip() """, test_two = """ from conftest import doskip doskip() """, conftest = """ import pytest def doskip(): pytest.skip('test') """ ) result = testdir.runpytest('--report=skipped') result.stdout.fnmatch_lines([ "*SKIP*3*conftest.py:3: test", ]) assert result.ret == 0 def test_reportchars(testdir): testdir.makepyfile(""" import pytest def test_1(): assert 0 @pytest.mark.xfail def test_2(): assert 0 @pytest.mark.xfail def test_3(): pass def test_4(): pytest.skip("four") """) result = testdir.runpytest("-rfxXs") result.stdout.fnmatch_lines([ "FAIL*test_1*", "XFAIL*test_2*", "XPASS*test_3*", "SKIP*four*", ]) def test_reportchars_error(testdir): testdir.makepyfile( conftest=""" def pytest_runtest_teardown(): assert 0 """, test_simple=""" def test_foo(): pass """) result = testdir.runpytest('-rE') result.stdout.fnmatch_lines([ 'ERROR*test_foo*', ]) @pytest.mark.xfail("hasattr(sys, 'pypy_version_info')") def test_errors_in_xfail_skip_expressions(testdir): testdir.makepyfile(""" import pytest @pytest.mark.skipif("asd") def test_nameerror(): pass @pytest.mark.xfail("syntax error") def test_syntax(): pass def test_func(): pass """) result = testdir.runpytest() markline = " ^" if sys.platform.startswith("java"): # XXX report this to java markline = "*" + markline[8:] result.stdout.fnmatch_lines([ "*ERROR*test_nameerror*", "*evaluating*skipif*expression*", "*asd*", "*ERROR*test_syntax*", "*evaluating*xfail*expression*", " syntax error", markline, "SyntaxError: invalid syntax", "*1 pass*2 error*", ]) def test_xfail_skipif_with_globals(testdir): testdir.makepyfile(""" import pytest x = 3 @pytest.mark.skipif("x == 3") def test_skip1(): pass @pytest.mark.xfail("x == 3") def test_boolean(): assert 0 """) result = testdir.runpytest("-rsx") result.stdout.fnmatch_lines([ "*SKIP*x == 3*", "*XFAIL*test_boolean*", "*x == 3*", ]) def test_direct_gives_error(testdir): testdir.makepyfile(""" import pytest @pytest.mark.skipif(True) def test_skip1(): pass """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*1 error*", ]) def test_default_markers(testdir): result = testdir.runpytest("--markers") result.stdout.fnmatch_lines([ "*skipif(*condition)*skip*", "*xfail(*condition, reason=None, run=True)*expected failure*", ]) def test_xfail_test_setup_exception(testdir): testdir.makeconftest(""" def pytest_runtest_setup(): 0 / 0 """) p = testdir.makepyfile(""" import pytest @pytest.mark.xfail def test_func(): assert 0 """) result = testdir.runpytest(p) assert result.ret == 0 assert 'xfailed' in result.stdout.str() assert 'xpassed' not in result.stdout.str() def test_imperativeskip_on_xfail_test(testdir): testdir.makepyfile(""" import pytest @pytest.mark.xfail def test_that_fails(): assert 0 @pytest.mark.skipif("True") def test_hello(): pass """) testdir.makeconftest(""" import pytest def pytest_runtest_setup(item): pytest.skip("abc") """) result = testdir.runpytest("-rsxX") result.stdout.fnmatch_lines_random(""" *SKIP*abc* *SKIP*condition: True* *2 skipped* """) class TestBooleanCondition: def test_skipif(self, testdir): testdir.makepyfile(""" import pytest @pytest.mark.skipif(True, reason="True123") def test_func1(): pass @pytest.mark.skipif(False, reason="True123") def test_func2(): pass """) result = testdir.runpytest() result.stdout.fnmatch_lines(""" *1 passed*1 skipped* """) def test_skipif_noreason(self, testdir): testdir.makepyfile(""" import pytest @pytest.mark.skipif(True) def test_func(): pass """) result = testdir.runpytest("-rs") result.stdout.fnmatch_lines(""" *1 error* """) def test_xfail(self, testdir): testdir.makepyfile(""" import pytest @pytest.mark.xfail(True, reason="True123") def test_func(): assert 0 """) result = testdir.runpytest("-rxs") result.stdout.fnmatch_lines(""" *XFAIL* *True123* *1 xfail* """) pytest-2.5.1/testing/test_junitxml.py0000664000175000017500000004251512254002202017373 0ustar hpkhpk00000000000000# -*- coding: utf-8 -*- from xml.dom import minidom import py, sys, os from _pytest.junitxml import LogXML def runandparse(testdir, *args): resultpath = testdir.tmpdir.join("junit.xml") result = testdir.runpytest("--junitxml=%s" % resultpath, *args) xmldoc = minidom.parse(str(resultpath)) return result, xmldoc def assert_attr(node, **kwargs): __tracebackhide__ = True for name, expected in kwargs.items(): anode = node.getAttributeNode(name) assert anode, "node %r has no attribute %r" %(node, name) val = anode.value if val != str(expected): py.test.fail("%r != %r" %(str(val), str(expected))) class TestPython: def test_summing_simple(self, testdir): testdir.makepyfile(""" import pytest def test_pass(): pass def test_fail(): assert 0 def test_skip(): pytest.skip("") @pytest.mark.xfail def test_xfail(): assert 0 @pytest.mark.xfail def test_xpass(): assert 1 """) result, dom = runandparse(testdir) assert result.ret node = dom.getElementsByTagName("testsuite")[0] assert_attr(node, name="pytest", errors=0, failures=1, skips=3, tests=2) def test_timing_function(self, testdir): testdir.makepyfile(""" import time, pytest def test_sleep(): time.sleep(0.01) """) result, dom = runandparse(testdir) node = dom.getElementsByTagName("testsuite")[0] tnode = node.getElementsByTagName("testcase")[0] val = tnode.getAttributeNode("time").value assert float(val) >= 0.001 def test_setup_error(self, testdir): testdir.makepyfile(""" def pytest_funcarg__arg(request): raise ValueError() def test_function(arg): pass """) result, dom = runandparse(testdir) assert result.ret node = dom.getElementsByTagName("testsuite")[0] assert_attr(node, errors=1, tests=0) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, classname="test_setup_error", name="test_function") fnode = tnode.getElementsByTagName("error")[0] assert_attr(fnode, message="test setup failure") assert "ValueError" in fnode.toxml() def test_skip_contains_name_reason(self, testdir): testdir.makepyfile(""" import pytest def test_skip(): pytest.skip("hello23") """) result, dom = runandparse(testdir) assert result.ret == 0 node = dom.getElementsByTagName("testsuite")[0] assert_attr(node, skips=1) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, classname="test_skip_contains_name_reason", name="test_skip") snode = tnode.getElementsByTagName("skipped")[0] assert_attr(snode, type="pytest.skip", message="hello23", ) def test_classname_instance(self, testdir): testdir.makepyfile(""" class TestClass: def test_method(self): assert 0 """) result, dom = runandparse(testdir) assert result.ret node = dom.getElementsByTagName("testsuite")[0] assert_attr(node, failures=1) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, classname="test_classname_instance.TestClass", name="test_method") def test_classname_nested_dir(self, testdir): p = testdir.tmpdir.ensure("sub", "test_hello.py") p.write("def test_func(): 0/0") result, dom = runandparse(testdir) assert result.ret node = dom.getElementsByTagName("testsuite")[0] assert_attr(node, failures=1) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, classname="sub.test_hello", name="test_func") def test_internal_error(self, testdir): testdir.makeconftest("def pytest_runtest_protocol(): 0 / 0") testdir.makepyfile("def test_function(): pass") result, dom = runandparse(testdir) assert result.ret node = dom.getElementsByTagName("testsuite")[0] assert_attr(node, errors=1, tests=0) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, classname="pytest", name="internal") fnode = tnode.getElementsByTagName("error")[0] assert_attr(fnode, message="internal error") assert "Division" in fnode.toxml() def test_failure_function(self, testdir): testdir.makepyfile(""" import sys def test_fail(): print ("hello-stdout") sys.stderr.write("hello-stderr\\n") raise ValueError(42) """) result, dom = runandparse(testdir) assert result.ret node = dom.getElementsByTagName("testsuite")[0] assert_attr(node, failures=1, tests=1) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, classname="test_failure_function", name="test_fail") fnode = tnode.getElementsByTagName("failure")[0] assert_attr(fnode, message="test failure") assert "ValueError" in fnode.toxml() systemout = fnode.nextSibling assert systemout.tagName == "system-out" assert "hello-stdout" in systemout.toxml() systemerr = systemout.nextSibling assert systemerr.tagName == "system-err" assert "hello-stderr" in systemerr.toxml() def test_failure_escape(self, testdir): testdir.makepyfile(""" import pytest @pytest.mark.parametrize('arg1', "<&'", ids="<&'") def test_func(arg1): print(arg1) assert 0 """) result, dom = runandparse(testdir) assert result.ret node = dom.getElementsByTagName("testsuite")[0] assert_attr(node, failures=3, tests=3) for index, char in enumerate("<&'"): tnode = node.getElementsByTagName("testcase")[index] assert_attr(tnode, classname="test_failure_escape", name="test_func[%s]" % char) sysout = tnode.getElementsByTagName('system-out')[0] text = sysout.childNodes[0].wholeText assert text == '%s\n' % char def test_junit_prefixing(self, testdir): testdir.makepyfile(""" def test_func(): assert 0 class TestHello: def test_hello(self): pass """) result, dom = runandparse(testdir, "--junitprefix=xyz") assert result.ret node = dom.getElementsByTagName("testsuite")[0] assert_attr(node, failures=1, tests=2) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, classname="xyz.test_junit_prefixing", name="test_func") tnode = node.getElementsByTagName("testcase")[1] assert_attr(tnode, classname="xyz.test_junit_prefixing." "TestHello", name="test_hello") def test_xfailure_function(self, testdir): testdir.makepyfile(""" import pytest def test_xfail(): pytest.xfail("42") """) result, dom = runandparse(testdir) assert not result.ret node = dom.getElementsByTagName("testsuite")[0] assert_attr(node, skips=1, tests=0) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, classname="test_xfailure_function", name="test_xfail") fnode = tnode.getElementsByTagName("skipped")[0] assert_attr(fnode, message="expected test failure") #assert "ValueError" in fnode.toxml() def test_xfailure_xpass(self, testdir): testdir.makepyfile(""" import pytest @pytest.mark.xfail def test_xpass(): pass """) result, dom = runandparse(testdir) #assert result.ret node = dom.getElementsByTagName("testsuite")[0] assert_attr(node, skips=1, tests=0) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, classname="test_xfailure_xpass", name="test_xpass") fnode = tnode.getElementsByTagName("skipped")[0] assert_attr(fnode, message="xfail-marked test passes unexpectedly") #assert "ValueError" in fnode.toxml() def test_collect_error(self, testdir): testdir.makepyfile("syntax error") result, dom = runandparse(testdir) assert result.ret node = dom.getElementsByTagName("testsuite")[0] assert_attr(node, errors=1, tests=0) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, #classname="test_collect_error", name="test_collect_error") fnode = tnode.getElementsByTagName("failure")[0] assert_attr(fnode, message="collection failure") assert "SyntaxError" in fnode.toxml() def test_collect_skipped(self, testdir): testdir.makepyfile("import pytest; pytest.skip('xyz')") result, dom = runandparse(testdir) assert not result.ret node = dom.getElementsByTagName("testsuite")[0] assert_attr(node, skips=1, tests=0) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, #classname="test_collect_error", name="test_collect_skipped") fnode = tnode.getElementsByTagName("skipped")[0] assert_attr(fnode, message="collection skipped") def test_unicode(self, testdir): value = 'hx\xc4\x85\xc4\x87\n' testdir.makepyfile(""" # coding: latin1 def test_hello(): print (%r) assert 0 """ % value) result, dom = runandparse(testdir) assert result.ret == 1 tnode = dom.getElementsByTagName("testcase")[0] fnode = tnode.getElementsByTagName("failure")[0] if not sys.platform.startswith("java"): assert "hx" in fnode.toxml() def test_assertion_binchars(self, testdir): """this test did fail when the escaping wasnt strict""" testdir.makepyfile(""" M1 = '\x01\x02\x03\x04' M2 = '\x01\x02\x03\x05' def test_str_compare(): assert M1 == M2 """) result, dom = runandparse(testdir) print(dom.toxml()) def test_pass_captures_stdout(self, testdir): testdir.makepyfile(""" def test_pass(): print('hello-stdout') """) result, dom = runandparse(testdir) node = dom.getElementsByTagName("testsuite")[0] pnode = node.getElementsByTagName("testcase")[0] systemout = pnode.getElementsByTagName("system-out")[0] assert "hello-stdout" in systemout.toxml() def test_pass_captures_stderr(self, testdir): testdir.makepyfile(""" import sys def test_pass(): sys.stderr.write('hello-stderr') """) result, dom = runandparse(testdir) node = dom.getElementsByTagName("testsuite")[0] pnode = node.getElementsByTagName("testcase")[0] systemout = pnode.getElementsByTagName("system-err")[0] assert "hello-stderr" in systemout.toxml() def test_mangle_testnames(): from _pytest.junitxml import mangle_testnames names = ["a/pything.py", "Class", "()", "method"] newnames = mangle_testnames(names) assert newnames == ["a.pything", "Class", "method"] def test_dont_configure_on_slaves(tmpdir): gotten = [] class FakeConfig: def __init__(self): self.pluginmanager = self self.option = self junitprefix = None #XXX: shouldnt need tmpdir ? xmlpath = str(tmpdir.join('junix.xml')) register = gotten.append fake_config = FakeConfig() from _pytest import junitxml junitxml.pytest_configure(fake_config) assert len(gotten) == 1 FakeConfig.slaveinput = None junitxml.pytest_configure(fake_config) assert len(gotten) == 1 class TestNonPython: def test_summing_simple(self, testdir): testdir.makeconftest(""" import pytest def pytest_collect_file(path, parent): if path.ext == ".xyz": return MyItem(path, parent) class MyItem(pytest.Item): def __init__(self, path, parent): super(MyItem, self).__init__(path.basename, parent) self.fspath = path def runtest(self): raise ValueError(42) def repr_failure(self, excinfo): return "custom item runtest failed" """) testdir.tmpdir.join("myfile.xyz").write("hello") result, dom = runandparse(testdir) assert result.ret node = dom.getElementsByTagName("testsuite")[0] assert_attr(node, errors=0, failures=1, skips=0, tests=1) tnode = node.getElementsByTagName("testcase")[0] assert_attr(tnode, #classname="test_collect_error", name="myfile.xyz") fnode = tnode.getElementsByTagName("failure")[0] assert_attr(fnode, message="test failure") assert "custom item runtest failed" in fnode.toxml() def test_nullbyte(testdir): # A null byte can not occur in XML (see section 2.2 of the spec) testdir.makepyfile(""" import sys def test_print_nullbyte(): sys.stdout.write('Here the null -->' + chr(0) + '<--') sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--') assert False """) xmlf = testdir.tmpdir.join('junit.xml') testdir.runpytest('--junitxml=%s' % xmlf) text = xmlf.read() assert '\x00' not in text assert '#x00' in text def test_nullbyte_replace(testdir): # Check if the null byte gets replaced testdir.makepyfile(""" import sys def test_print_nullbyte(): sys.stdout.write('Here the null -->' + chr(0) + '<--') sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--') assert False """) xmlf = testdir.tmpdir.join('junit.xml') testdir.runpytest('--junitxml=%s' % xmlf) text = xmlf.read() assert '#x0' in text def test_invalid_xml_escape(): # Test some more invalid xml chars, the full range should be # tested really but let's just thest the edges of the ranges # intead. # XXX This only tests low unicode character points for now as # there are some issues with the testing infrastructure for # the higher ones. # XXX Testing 0xD (\r) is tricky as it overwrites the just written # line in the output, so we skip it too. global unichr try: unichr(65) except NameError: unichr = chr invalid = (0x00, 0x1, 0xB, 0xC, 0xE, 0x19, 27, # issue #126 0xD800, 0xDFFF, 0xFFFE, 0x0FFFF) #, 0x110000) valid = (0x9, 0xA, 0x20,) # 0xD, 0xD7FF, 0xE000, 0xFFFD, 0x10000, 0x10FFFF) from _pytest.junitxml import bin_xml_escape for i in invalid: got = bin_xml_escape(unichr(i)).uniobj if i <= 0xFF: expected = '#x%02X' % i else: expected = '#x%04X' % i assert got == expected for i in valid: assert chr(i) == bin_xml_escape(unichr(i)).uniobj def test_logxml_path_expansion(tmpdir, monkeypatch): home_tilde = py.path.local(os.path.expanduser('~')).join('test.xml') xml_tilde = LogXML('~%stest.xml' % tmpdir.sep, None) assert xml_tilde.logfile == home_tilde # this is here for when $HOME is not set correct monkeypatch.setenv("HOME", tmpdir) home_var = os.path.normpath(os.path.expandvars('$HOME/test.xml')) xml_var = LogXML('$HOME%stest.xml' % tmpdir.sep, None) assert xml_var.logfile == home_var def test_logxml_changingdir(testdir): testdir.makepyfile(""" def test_func(): import os os.chdir("a") """) testdir.tmpdir.mkdir("a") result = testdir.runpytest("--junitxml=a/x.xml") assert result.ret == 0 assert testdir.tmpdir.join("a/x.xml").check() def test_escaped_parametrized_names_xml(testdir): testdir.makepyfile(""" import pytest @pytest.mark.parametrize('char', ["\\x00"]) def test_func(char): assert char """) result, dom = runandparse(testdir) assert result.ret == 0 node = dom.getElementsByTagName("testcase")[0] assert_attr(node, name="test_func[#x00]") def test_unicode_issue368(testdir): path = testdir.tmpdir.join("test.xml") log = LogXML(str(path), None) ustr = py.builtin._totext("Ð’ÐИ!", "utf-8") class report: longrepr = ustr sections = [] nodeid = "something" # hopefully this is not too brittle ... log.pytest_sessionstart() log._opentestcase(report) log.append_failure(report) log.append_collect_failure(report) log.append_collect_skipped(report) log.append_error(report) report.longrepr = "filename", 1, ustr log.append_skipped(report) report.wasxfail = ustr log.append_skipped(report) log.pytest_sessionfinish() pytest-2.5.1/testing/test_pastebin.py0000664000175000017500000000360112254002202017317 0ustar hpkhpk00000000000000 class TestPasting: def pytest_funcarg__pastebinlist(self, request): mp = request.getfuncargvalue("monkeypatch") pastebinlist = [] class MockProxy: def newPaste(self, language, code): pastebinlist.append((language, code)) plugin = request.config.pluginmanager.getplugin('pastebin') mp.setattr(plugin, 'getproxy', MockProxy) return pastebinlist def test_failed(self, testdir, pastebinlist): testpath = testdir.makepyfile(""" import pytest def test_pass(): pass def test_fail(): assert 0 def test_skip(): pytest.skip("") """) reprec = testdir.inline_run(testpath, "--paste=failed") assert len(pastebinlist) == 1 assert pastebinlist[0][0] == "python" s = pastebinlist[0][1] assert s.find("def test_fail") != -1 assert reprec.countoutcomes() == [1,1,1] def test_all(self, testdir, pastebinlist): testpath = testdir.makepyfile(""" import pytest def test_pass(): pass def test_fail(): assert 0 def test_skip(): pytest.skip("") """) reprec = testdir.inline_run(testpath, "--pastebin=all") assert reprec.countoutcomes() == [1,1,1] assert len(pastebinlist) == 1 assert pastebinlist[0][0] == "python" s = pastebinlist[0][1] for x in 'test_fail test_skip skipped'.split(): assert s.find(x), (s, x) class TestRPCClient: def pytest_funcarg__pastebin(self, request): return request.config.pluginmanager.getplugin('pastebin') def test_getproxy(self, pastebin): proxy = pastebin.getproxy() assert proxy is not None assert proxy.__class__.__module__.startswith('xmlrpc') pytest-2.5.1/testing/acceptance_test.py0000664000175000017500000005003712254002202017605 0ustar hpkhpk00000000000000import py, pytest class TestGeneralUsage: def test_config_error(self, testdir): testdir.makeconftest(""" def pytest_configure(config): import pytest raise pytest.UsageError("hello") """) result = testdir.runpytest(testdir.tmpdir) assert result.ret != 0 result.stderr.fnmatch_lines([ '*ERROR: hello' ]) def test_root_conftest_syntax_error(self, testdir): testdir.makepyfile(conftest="raise SyntaxError\n") result = testdir.runpytest() result.stderr.fnmatch_lines(["*raise SyntaxError*"]) assert result.ret != 0 def test_early_hook_error_issue38_1(self, testdir): testdir.makeconftest(""" def pytest_sessionstart(): 0 / 0 """) result = testdir.runpytest(testdir.tmpdir) assert result.ret != 0 # tracestyle is native by default for hook failures result.stdout.fnmatch_lines([ '*INTERNALERROR*File*conftest.py*line 2*', '*0 / 0*', ]) result = testdir.runpytest(testdir.tmpdir, "--fulltrace") assert result.ret != 0 # tracestyle is native by default for hook failures result.stdout.fnmatch_lines([ '*INTERNALERROR*def pytest_sessionstart():*', '*INTERNALERROR*0 / 0*', ]) def test_early_hook_configure_error_issue38(self, testdir): testdir.makeconftest(""" def pytest_configure(): 0 / 0 """) result = testdir.runpytest(testdir.tmpdir) assert result.ret != 0 # here we get it on stderr result.stderr.fnmatch_lines([ '*INTERNALERROR*File*conftest.py*line 2*', '*0 / 0*', ]) def test_file_not_found(self, testdir): result = testdir.runpytest("asd") assert result.ret != 0 result.stderr.fnmatch_lines(["ERROR: file not found*asd"]) def test_file_not_found_unconfigure_issue143(self, testdir): testdir.makeconftest(""" def pytest_configure(): print("---configure") def pytest_unconfigure(): print("---unconfigure") """) result = testdir.runpytest("-s", "asd") assert result.ret == 4 # EXIT_USAGEERROR result.stderr.fnmatch_lines(["ERROR: file not found*asd"]) result.stdout.fnmatch_lines([ "*---configure", "*---unconfigure", ]) def test_config_preparse_plugin_option(self, testdir): testdir.makepyfile(pytest_xyz=""" def pytest_addoption(parser): parser.addoption("--xyz", dest="xyz", action="store") """) testdir.makepyfile(test_one=""" def test_option(pytestconfig): assert pytestconfig.option.xyz == "123" """) result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123") assert result.ret == 0 result.stdout.fnmatch_lines([ '*1 passed*', ]) def test_assertion_magic(self, testdir): p = testdir.makepyfile(""" def test_this(): x = 0 assert x """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "> assert x", "E assert 0", ]) assert result.ret == 1 def test_nested_import_error(self, testdir): p = testdir.makepyfile(""" import import_fails def test_this(): assert import_fails.a == 1 """) testdir.makepyfile(import_fails="import does_not_work") result = testdir.runpytest(p) result.stdout.fnmatch_lines([ #XXX on jython this fails: "> import import_fails", "E ImportError: No module named *does_not_work*", ]) assert result.ret == 1 def test_not_collectable_arguments(self, testdir): p1 = testdir.makepyfile("") p2 = testdir.makefile(".pyc", "123") result = testdir.runpytest(p1, p2) assert result.ret result.stderr.fnmatch_lines([ "*ERROR: not found:*%s" %(p2.basename,) ]) def test_early_skip(self, testdir): testdir.mkdir("xyz") testdir.makeconftest(""" import pytest def pytest_collect_directory(): pytest.skip("early") """) result = testdir.runpytest() assert result.ret == 0 result.stdout.fnmatch_lines([ "*1 skip*" ]) def test_issue88_initial_file_multinodes(self, testdir): testdir.makeconftest(""" import pytest class MyFile(pytest.File): def collect(self): return [MyItem("hello", parent=self)] def pytest_collect_file(path, parent): return MyFile(path, parent) class MyItem(pytest.Item): pass """) p = testdir.makepyfile("def test_hello(): pass") result = testdir.runpytest(p, "--collect-only") result.stdout.fnmatch_lines([ "*MyFile*test_issue88*", "*Module*test_issue88*", ]) def test_issue93_initialnode_importing_capturing(self, testdir): testdir.makeconftest(""" import sys print ("should not be seen") sys.stderr.write("stder42\\n") """) result = testdir.runpytest() assert result.ret == 0 assert "should not be seen" not in result.stdout.str() assert "stderr42" not in result.stderr.str() def test_conftest_printing_shows_if_error(self, testdir): testdir.makeconftest(""" print ("should be seen") assert 0 """) result = testdir.runpytest() assert result.ret != 0 assert "should be seen" in result.stdout.str() @pytest.mark.skipif("not hasattr(py.path.local, 'mksymlinkto')") def test_chdir(self, testdir): testdir.tmpdir.join("py").mksymlinkto(py._pydir) p = testdir.tmpdir.join("main.py") p.write(py.code.Source(""" import sys, os sys.path.insert(0, '') import py print (py.__file__) print (py.__path__) os.chdir(os.path.dirname(os.getcwd())) print (py.log) """)) result = testdir.runpython(p, prepend=False) assert not result.ret def test_issue109_sibling_conftests_not_loaded(self, testdir): sub1 = testdir.tmpdir.mkdir("sub1") sub2 = testdir.tmpdir.mkdir("sub2") sub1.join("conftest.py").write("assert 0") result = testdir.runpytest(sub2) assert result.ret == 0 sub2.ensure("__init__.py") p = sub2.ensure("test_hello.py") result = testdir.runpytest(p) assert result.ret == 0 result = testdir.runpytest(sub1) assert result.ret != 0 def test_directory_skipped(self, testdir): testdir.makeconftest(""" import pytest def pytest_ignore_collect(): pytest.skip("intentional") """) testdir.makepyfile("def test_hello(): pass") result = testdir.runpytest() assert result.ret == 0 result.stdout.fnmatch_lines([ "*1 skipped*" ]) def test_multiple_items_per_collector_byid(self, testdir): c = testdir.makeconftest(""" import pytest class MyItem(pytest.Item): def runtest(self): pass class MyCollector(pytest.File): def collect(self): return [MyItem(name="xyz", parent=self)] def pytest_collect_file(path, parent): if path.basename.startswith("conftest"): return MyCollector(path, parent) """) result = testdir.runpytest(c.basename+"::"+"xyz") assert result.ret == 0 result.stdout.fnmatch_lines([ "*1 pass*", ]) def test_skip_on_generated_funcarg_id(self, testdir): testdir.makeconftest(""" import pytest def pytest_generate_tests(metafunc): metafunc.addcall({'x': 3}, id='hello-123') def pytest_runtest_setup(item): print (item.keywords) if 'hello-123' in item.keywords: pytest.skip("hello") assert 0 """) p = testdir.makepyfile("""def test_func(x): pass""") res = testdir.runpytest(p) assert res.ret == 0 res.stdout.fnmatch_lines(["*1 skipped*"]) def test_direct_addressing_selects(self, testdir): p = testdir.makepyfile(""" def pytest_generate_tests(metafunc): metafunc.addcall({'i': 1}, id="1") metafunc.addcall({'i': 2}, id="2") def test_func(i): pass """) res = testdir.runpytest(p.basename + "::" + "test_func[1]") assert res.ret == 0 res.stdout.fnmatch_lines(["*1 passed*"]) def test_direct_addressing_notfound(self, testdir): p = testdir.makepyfile(""" def test_func(): pass """) res = testdir.runpytest(p.basename + "::" + "test_notfound") assert res.ret res.stderr.fnmatch_lines(["*ERROR*not found*"]) def test_docstring_on_hookspec(self): from _pytest import hookspec for name, value in vars(hookspec).items(): if name.startswith("pytest_"): assert value.__doc__, "no docstring for %s" % name def test_initialization_error_issue49(self, testdir): testdir.makeconftest(""" def pytest_configure(): x """) result = testdir.runpytest() assert result.ret == 3 # internal error result.stderr.fnmatch_lines([ "INTERNAL*pytest_configure*", "INTERNAL*x*", ]) assert 'sessionstarttime' not in result.stderr.str() @pytest.mark.parametrize('lookfor', ['test_fun.py', 'test_fun.py::test_a']) def test_issue134_report_syntaxerror_when_collecting_member(self, testdir, lookfor): testdir.makepyfile(test_fun=""" def test_a(): pass def""") result = testdir.runpytest(lookfor) result.stdout.fnmatch_lines(['*SyntaxError*']) if '::' in lookfor: result.stderr.fnmatch_lines([ '*ERROR*', ]) assert result.ret == 4 # usage error only if item not found def test_namespace_import_doesnt_confuse_import_hook(self, testdir): # Ref #383. Python 3.3's namespace package messed with our import hooks # Importing a module that didn't exist, even if the ImportError was # gracefully handled, would make our test crash. testdir.mkdir('not_a_package') p = testdir.makepyfile(""" try: from not_a_package import doesnt_exist except ImportError: # We handle the import error gracefully here pass def test_whatever(): pass """) res = testdir.runpytest(p.basename) assert res.ret == 0 class TestInvocationVariants: def test_earlyinit(self, testdir): p = testdir.makepyfile(""" import pytest assert hasattr(pytest, 'mark') """) result = testdir.runpython(p) assert result.ret == 0 @pytest.mark.xfail("sys.platform.startswith('java')") def test_pydoc(self, testdir): for name in ('py.test', 'pytest'): result = testdir.runpython_c("import %s;help(%s)" % (name, name)) assert result.ret == 0 s = result.stdout.str() assert 'MarkGenerator' in s def test_import_star_py_dot_test(self, testdir): p = testdir.makepyfile(""" from py.test import * #collect #cmdline #Item #assert collect.Item is Item #assert collect.Collector is Collector main skip xfail """) result = testdir.runpython(p) assert result.ret == 0 def test_import_star_pytest(self, testdir): p = testdir.makepyfile(""" from pytest import * #Item #File main skip xfail """) result = testdir.runpython(p) assert result.ret == 0 def test_double_pytestcmdline(self, testdir): p = testdir.makepyfile(run=""" import pytest pytest.main() pytest.main() """) testdir.makepyfile(""" def test_hello(): pass """) result = testdir.runpython(p) result.stdout.fnmatch_lines([ "*1 passed*", "*1 passed*", ]) @pytest.mark.skipif("sys.version_info < (2,5)") def test_python_minus_m_invocation_ok(self, testdir): p1 = testdir.makepyfile("def test_hello(): pass") res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1)) assert res.ret == 0 @pytest.mark.skipif("sys.version_info < (2,5)") def test_python_minus_m_invocation_fail(self, testdir): p1 = testdir.makepyfile("def test_fail(): 0/0") res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1)) assert res.ret == 1 @pytest.mark.skipif("sys.version_info < (2,5)") def test_python_pytest_package(self, testdir): p1 = testdir.makepyfile("def test_pass(): pass") res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1)) assert res.ret == 0 res.stdout.fnmatch_lines(["*1 passed*"]) def test_equivalence_pytest_pytest(self): assert pytest.main == py.test.cmdline.main def test_invoke_with_string(self, capsys): retcode = pytest.main("-h") assert not retcode out, err = capsys.readouterr() assert "--help" in out pytest.raises(ValueError, lambda: pytest.main(0)) def test_invoke_with_path(self, tmpdir, capsys): retcode = pytest.main(tmpdir) assert not retcode out, err = capsys.readouterr() def test_invoke_plugin_api(self, testdir, capsys): class MyPlugin: def pytest_addoption(self, parser): parser.addoption("--myopt") pytest.main(["-h"], plugins=[MyPlugin()]) out, err = capsys.readouterr() assert "--myopt" in out def test_pyargs_importerror(self, testdir, monkeypatch): monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False) path = testdir.mkpydir("tpkg") path.join("test_hello.py").write('raise ImportError') result = testdir.runpytest("--pyargs", "tpkg.test_hello") assert result.ret != 0 # FIXME: It would be more natural to match NOT # "ERROR*file*or*package*not*found*". result.stdout.fnmatch_lines([ "*collected 0 items*" ]) def test_cmdline_python_package(self, testdir, monkeypatch): monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False) path = testdir.mkpydir("tpkg") path.join("test_hello.py").write("def test_hello(): pass") path.join("test_world.py").write("def test_world(): pass") result = testdir.runpytest("--pyargs", "tpkg") assert result.ret == 0 result.stdout.fnmatch_lines([ "*2 passed*" ]) result = testdir.runpytest("--pyargs", "tpkg.test_hello") assert result.ret == 0 result.stdout.fnmatch_lines([ "*1 passed*" ]) def join_pythonpath(what): cur = py.std.os.environ.get('PYTHONPATH') if cur: return str(what) + ':' + cur return what empty_package = testdir.mkpydir("empty_package") monkeypatch.setenv('PYTHONPATH', join_pythonpath(empty_package)) result = testdir.runpytest("--pyargs", ".") assert result.ret == 0 result.stdout.fnmatch_lines([ "*2 passed*" ]) monkeypatch.setenv('PYTHONPATH', join_pythonpath(testdir)) path.join('test_hello.py').remove() result = testdir.runpytest("--pyargs", "tpkg.test_hello") assert result.ret != 0 result.stderr.fnmatch_lines([ "*not*found*test_hello*", ]) def test_cmdline_python_package_not_exists(self, testdir): result = testdir.runpytest("--pyargs", "tpkgwhatv") assert result.ret result.stderr.fnmatch_lines([ "ERROR*file*or*package*not*found*", ]) @pytest.mark.xfail(reason="decide: feature or bug") def test_noclass_discovery_if_not_testcase(self, testdir): testpath = testdir.makepyfile(""" import unittest class TestHello(object): def test_hello(self): assert self.attr class RealTest(unittest.TestCase, TestHello): attr = 42 """) reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=1) def test_doctest_id(self, testdir): testdir.makefile('.txt', """ >>> x=3 >>> x 4 """) result = testdir.runpytest("-rf") lines = result.stdout.str().splitlines() for line in lines: if line.startswith("FAIL "): testid = line[5:].strip() break result = testdir.runpytest(testid, '-rf') result.stdout.fnmatch_lines([ line, "*1 failed*", ]) class TestDurations: source = """ import time frag = 0.002 def test_something(): pass def test_2(): time.sleep(frag*5) def test_1(): time.sleep(frag) def test_3(): time.sleep(frag*10) """ def test_calls(self, testdir): testdir.makepyfile(self.source) result = testdir.runpytest("--durations=10") assert result.ret == 0 result.stdout.fnmatch_lines_random([ "*durations*", "*call*test_3*", "*call*test_2*", "*call*test_1*", ]) def test_calls_show_2(self, testdir): testdir.makepyfile(self.source) result = testdir.runpytest("--durations=2") assert result.ret == 0 lines = result.stdout.get_lines_after("*slowest*durations*") assert "4 passed" in lines[2] def test_calls_showall(self, testdir): testdir.makepyfile(self.source) result = testdir.runpytest("--durations=0") assert result.ret == 0 for x in "123": for y in 'call',: #'setup', 'call', 'teardown': for line in result.stdout.lines: if ("test_%s" % x) in line and y in line: break else: raise AssertionError("not found %s %s" % (x,y)) def test_with_deselected(self, testdir): testdir.makepyfile(self.source) result = testdir.runpytest("--durations=2", "-k test_1") assert result.ret == 0 result.stdout.fnmatch_lines([ "*durations*", "*call*test_1*", ]) def test_with_failing_collection(self, testdir): testdir.makepyfile(self.source) testdir.makepyfile(test_collecterror="""xyz""") result = testdir.runpytest("--durations=2", "-k test_1") assert result.ret != 0 result.stdout.fnmatch_lines([ "*durations*", "*call*test_1*", ]) class TestDurationWithFixture: source = """ import time frag = 0.001 def setup_function(func): time.sleep(frag * 3) def test_1(): time.sleep(frag*2) def test_2(): time.sleep(frag) """ def test_setup_function(self, testdir): testdir.makepyfile(self.source) result = testdir.runpytest("--durations=10") assert result.ret == 0 result.stdout.fnmatch_lines_random(""" *durations* * setup *test_1* * call *test_1* """) pytest-2.5.1/testing/test_genscript.py0000664000175000017500000000237112254002202017513 0ustar hpkhpk00000000000000import pytest import sys @pytest.fixture(scope="module") def standalone(request): return Standalone(request) class Standalone: def __init__(self, request): self.testdir = request.getfuncargvalue("testdir") script = "mypytest" result = self.testdir.runpytest("--genscript=%s" % script) assert result.ret == 0 self.script = self.testdir.tmpdir.join(script) assert self.script.check() def run(self, anypython, testdir, *args): testdir.chdir() return testdir._run(anypython, self.script, *args) def test_gen(testdir, anypython, standalone): if sys.version_info >= (2,7): result = testdir._run(anypython, "-c", "import sys;print (sys.version_info >=(2,7))") assert result.ret == 0 if result.stdout.str() == "False": pytest.skip("genscript called from python2.7 cannot work " "earlier python versions") result = standalone.run(anypython, testdir, '--version') assert result.ret == 0 result.stderr.fnmatch_lines([ "*imported from*mypytest*" ]) p = testdir.makepyfile("def test_func(): assert 0") result = standalone.run(anypython, testdir, p) assert result.ret != 0 pytest-2.5.1/testing/test_session.py0000664000175000017500000002024212254002202017175 0ustar hpkhpk00000000000000import pytest, py class SessionTests: def test_basic_testitem_events(self, testdir): tfile = testdir.makepyfile(""" def test_one(): pass def test_one_one(): assert 0 def test_other(): raise ValueError(23) class TestClass: def test_two(self, someargs): pass """) reprec = testdir.inline_run(tfile) passed, skipped, failed = reprec.listoutcomes() assert len(skipped) == 0 assert len(passed) == 1 assert len(failed) == 3 end = lambda x: x.nodeid.split("::")[-1] assert end(failed[0]) == "test_one_one" assert end(failed[1]) == "test_other" itemstarted = reprec.getcalls("pytest_itemcollected") assert len(itemstarted) == 4 # XXX check for failing funcarg setup #colreports = reprec.getcalls("pytest_collectreport") #assert len(colreports) == 4 #assert colreports[1].report.failed def test_nested_import_error(self, testdir): tfile = testdir.makepyfile(""" import import_fails def test_this(): assert import_fails.a == 1 """, import_fails=""" import does_not_work a = 1 """) reprec = testdir.inline_run(tfile) l = reprec.getfailedcollections() assert len(l) == 1 out = l[0].longrepr.reprcrash.message assert out.find('does_not_work') != -1 def test_raises_output(self, testdir): reprec = testdir.inline_runsource(""" import pytest def test_raises_doesnt(): pytest.raises(ValueError, int, "3") """) passed, skipped, failed = reprec.listoutcomes() assert len(failed) == 1 out = failed[0].longrepr.reprcrash.message if not out.find("DID NOT RAISE") != -1: print(out) py.test.fail("incorrect raises() output") def test_generator_yields_None(self, testdir): reprec = testdir.inline_runsource(""" def test_1(): yield None """) failures = reprec.getfailedcollections() out = failures[0].longrepr.reprcrash.message i = out.find('TypeError') assert i != -1 def test_syntax_error_module(self, testdir): reprec = testdir.inline_runsource("this is really not python") l = reprec.getfailedcollections() assert len(l) == 1 out = str(l[0].longrepr) assert out.find(str('not python')) != -1 def test_exit_first_problem(self, testdir): reprec = testdir.inline_runsource(""" def test_one(): assert 0 def test_two(): assert 0 """, '--exitfirst') passed, skipped, failed = reprec.countoutcomes() assert failed == 1 assert passed == skipped == 0 def test_maxfail(self, testdir): reprec = testdir.inline_runsource(""" def test_one(): assert 0 def test_two(): assert 0 def test_three(): assert 0 """, '--maxfail=2') passed, skipped, failed = reprec.countoutcomes() assert failed == 2 assert passed == skipped == 0 def test_broken_repr(self, testdir): p = testdir.makepyfile(""" import pytest class BrokenRepr1: foo=0 def __repr__(self): raise Exception("Ha Ha fooled you, I'm a broken repr().") class TestBrokenClass: def test_explicit_bad_repr(self): t = BrokenRepr1() pytest.raises(Exception, 'repr(t)') def test_implicit_bad_repr1(self): t = BrokenRepr1() assert t.foo == 1 """) reprec = testdir.inline_run(p) passed, skipped, failed = reprec.listoutcomes() assert len(failed) == 1 out = failed[0].longrepr.reprcrash.message assert out.find("""[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]""") != -1 #' def test_skip_file_by_conftest(self, testdir): testdir.makepyfile(conftest=""" import pytest def pytest_collect_file(): pytest.skip("intentional") """, test_file=""" def test_one(): pass """) try: reprec = testdir.inline_run(testdir.tmpdir) except pytest.skip.Exception: py.test.fail("wrong skipped caught") reports = reprec.getreports("pytest_collectreport") assert len(reports) == 1 assert reports[0].skipped class TestNewSession(SessionTests): def test_order_of_execution(self, testdir): reprec = testdir.inline_runsource(""" l = [] def test_1(): l.append(1) def test_2(): l.append(2) def test_3(): assert l == [1,2] class Testmygroup: reslist = l def test_1(self): self.reslist.append(1) def test_2(self): self.reslist.append(2) def test_3(self): self.reslist.append(3) def test_4(self): assert self.reslist == [1,2,1,2,3] """) passed, skipped, failed = reprec.countoutcomes() assert failed == skipped == 0 assert passed == 7 # also test listnames() here ... def test_collect_only_with_various_situations(self, testdir): p = testdir.makepyfile( test_one=""" def test_one(): raise ValueError() class TestX: def test_method_one(self): pass class TestY(TestX): pass """, test_two=""" import pytest pytest.skip('xxx') """, test_three="xxxdsadsadsadsa", __init__="" ) reprec = testdir.inline_run('--collect-only', p.dirpath()) itemstarted = reprec.getcalls("pytest_itemcollected") assert len(itemstarted) == 3 assert not reprec.getreports("pytest_runtest_logreport") started = reprec.getcalls("pytest_collectstart") finished = reprec.getreports("pytest_collectreport") assert len(started) == len(finished) assert len(started) == 8 # XXX extra TopCollector colfail = [x for x in finished if x.failed] colskipped = [x for x in finished if x.skipped] assert len(colfail) == 1 assert len(colskipped) == 1 def test_minus_x_import_error(self, testdir): testdir.makepyfile(__init__="") testdir.makepyfile(test_one="xxxx", test_two="yyyy") reprec = testdir.inline_run("-x", testdir.tmpdir) finished = reprec.getreports("pytest_collectreport") colfail = [x for x in finished if x.failed] assert len(colfail) == 1 def test_plugin_specify(testdir): testdir.chdir() pytest.raises(ImportError, """ testdir.parseconfig("-p", "nqweotexistent") """) #pytest.raises(ImportError, # "config.do_configure(config)" #) def test_plugin_already_exists(testdir): config = testdir.parseconfig("-p", "terminal") assert config.option.plugins == ['terminal'] config.do_configure() config.do_unconfigure() def test_exclude(testdir): hellodir = testdir.mkdir("hello") hellodir.join("test_hello.py").write("x y syntaxerror") hello2dir = testdir.mkdir("hello2") hello2dir.join("test_hello2.py").write("x y syntaxerror") testdir.makepyfile(test_ok="def test_pass(): pass") result = testdir.runpytest("--ignore=hello", "--ignore=hello2") assert result.ret == 0 result.stdout.fnmatch_lines(["*1 passed*"]) def test_sessionfinish_with_start(testdir): testdir.makeconftest(""" import os l = [] def pytest_sessionstart(): l.append(os.getcwd()) os.chdir("..") def pytest_sessionfinish(): assert l[0] == os.getcwd() """) res = testdir.runpytest("--collect-only") assert res.ret == 0 pytest-2.5.1/testing/python/0000775000175000017500000000000012254002202015422 5ustar hpkhpk00000000000000pytest-2.5.1/testing/python/integration.py0000664000175000017500000001462412254002202020326 0ustar hpkhpk00000000000000import pytest from _pytest import runner from _pytest import python class TestOEJSKITSpecials: def test_funcarg_non_pycollectobj(self, testdir): # rough jstests usage testdir.makeconftest(""" import pytest def pytest_pycollect_makeitem(collector, name, obj): if name == "MyClass": return MyCollector(name, parent=collector) class MyCollector(pytest.Collector): def reportinfo(self): return self.fspath, 3, "xyz" """) modcol = testdir.getmodulecol(""" def pytest_funcarg__arg1(request): return 42 class MyClass: pass """) # this hook finds funcarg factories rep = runner.collect_one_node(collector=modcol) clscol = rep.result[0] clscol.obj = lambda arg1: None clscol.funcargs = {} pytest._fillfuncargs(clscol) assert clscol.funcargs['arg1'] == 42 def test_autouse_fixture(self, testdir): # rough jstests usage testdir.makeconftest(""" import pytest def pytest_pycollect_makeitem(collector, name, obj): if name == "MyClass": return MyCollector(name, parent=collector) class MyCollector(pytest.Collector): def reportinfo(self): return self.fspath, 3, "xyz" """) modcol = testdir.getmodulecol(""" import pytest @pytest.fixture(autouse=True) def hello(): pass def pytest_funcarg__arg1(request): return 42 class MyClass: pass """) # this hook finds funcarg factories rep = runner.collect_one_node(modcol) clscol = rep.result[0] clscol.obj = lambda: None clscol.funcargs = {} pytest._fillfuncargs(clscol) assert not clscol.funcargs def test_wrapped_getfslineno(): def func(): pass def wrap(f): func.__wrapped__ = f func.patchings = ["qwe"] return func @wrap def wrapped_func(x, y, z): pass fs, lineno = python.getfslineno(wrapped_func) fs2, lineno2 = python.getfslineno(wrap) assert lineno > lineno2, "getfslineno does not unwrap correctly" class TestMockDecoration: def test_wrapped_getfuncargnames(self): from _pytest.python import getfuncargnames def wrap(f): def func(): pass func.__wrapped__ = f return func @wrap def f(x): pass l = getfuncargnames(f) assert l == ("x",) def test_wrapped_getfuncargnames_patching(self): from _pytest.python import getfuncargnames def wrap(f): def func(): pass func.__wrapped__ = f func.patchings = ["qwe"] return func @wrap def f(x, y, z): pass l = getfuncargnames(f) assert l == ("y", "z") def test_unittest_mock(self, testdir): pytest.importorskip("unittest.mock") testdir.makepyfile(""" import unittest.mock class T(unittest.TestCase): @unittest.mock.patch("os.path.abspath") def test_hello(self, abspath): import os os.path.abspath("hello") abspath.assert_any_call("hello") """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_mock(self, testdir): pytest.importorskip("mock", "1.0.1") testdir.makepyfile(""" import os import unittest import mock class T(unittest.TestCase): @mock.patch("os.path.abspath") def test_hello(self, abspath): os.path.abspath("hello") abspath.assert_any_call("hello") @mock.patch("os.path.abspath") @mock.patch("os.path.normpath") def test_someting(normpath, abspath, tmpdir): abspath.return_value = "this" os.path.normpath(os.path.abspath("hello")) normpath.assert_any_call("this") """) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) calls = reprec.getcalls("pytest_runtest_logreport") funcnames = [call.report.location[2] for call in calls if call.report.when == "call"] assert funcnames == ["T.test_hello", "test_someting"] def test_mock_sorting(self, testdir): pytest.importorskip("mock", "1.0.1") testdir.makepyfile(""" import os import mock @mock.patch("os.path.abspath") def test_one(abspath): pass @mock.patch("os.path.abspath") def test_two(abspath): pass @mock.patch("os.path.abspath") def test_three(abspath): pass """) reprec = testdir.inline_run() calls = reprec.getreports("pytest_runtest_logreport") calls = [x for x in calls if x.when == "call"] names = [x.nodeid.split("::")[-1] for x in calls] assert names == ["test_one", "test_two", "test_three"] class TestReRunTests: def test_rerun(self, testdir): testdir.makeconftest(""" from _pytest.runner import runtestprotocol def pytest_runtest_protocol(item, nextitem): runtestprotocol(item, log=False, nextitem=nextitem) runtestprotocol(item, log=True, nextitem=nextitem) """) testdir.makepyfile(""" import pytest count = 0 req = None @pytest.fixture def fix(request): global count, req assert request != req req = request print ("fix count %s" % count) count += 1 def test_fix(fix): pass """) result = testdir.runpytest("-s") result.stdout.fnmatch_lines(""" *fix count 0* *fix count 1* """) result.stdout.fnmatch_lines(""" *2 passed* """) def test_pytestconfig_is_session_scoped(): from _pytest.python import pytestconfig assert pytestconfig._pytestfixturefunction.scope == "session" pytest-2.5.1/testing/python/raises.py0000664000175000017500000000402112254002202017257 0ustar hpkhpk00000000000000import pytest class TestRaises: def test_raises(self): source = "int('qwe')" excinfo = pytest.raises(ValueError, source) code = excinfo.traceback[-1].frame.code s = str(code.fullsource) assert s == source def test_raises_exec(self): pytest.raises(ValueError, "a,x = []") def test_raises_syntax_error(self): pytest.raises(SyntaxError, "qwe qwe qwe") def test_raises_function(self): pytest.raises(ValueError, int, 'hello') def test_raises_callable_no_exception(self): class A: def __call__(self): pass try: pytest.raises(ValueError, A()) except pytest.raises.Exception: pass def test_raises_flip_builtin_AssertionError(self): # we replace AssertionError on python level # however c code might still raise the builtin one from _pytest.assertion.util import BuiltinAssertionError # noqa pytest.raises(AssertionError,""" raise BuiltinAssertionError """) @pytest.mark.skipif('sys.version < "2.5"') def test_raises_as_contextmanager(self, testdir): testdir.makepyfile(""" from __future__ import with_statement import py, pytest def test_simple(): with pytest.raises(ZeroDivisionError) as excinfo: assert isinstance(excinfo, py.code.ExceptionInfo) 1/0 print (excinfo) assert excinfo.type == ZeroDivisionError def test_noraise(): with pytest.raises(pytest.raises.Exception): with pytest.raises(ValueError): int() def test_raise_wrong_exception_passes_by(): with pytest.raises(ZeroDivisionError): with pytest.raises(ValueError): 1/0 """) result = testdir.runpytest() result.stdout.fnmatch_lines([ '*3 passed*', ]) pytest-2.5.1/testing/python/collect.py0000664000175000017500000006665112254002202017437 0ustar hpkhpk00000000000000import pytest, py class TestModule: def test_failing_import(self, testdir): modcol = testdir.getmodulecol("import alksdjalskdjalkjals") pytest.raises(ImportError, modcol.collect) pytest.raises(ImportError, modcol.collect) def test_import_duplicate(self, testdir): a = testdir.mkdir("a") b = testdir.mkdir("b") p = a.ensure("test_whatever.py") p.pyimport() del py.std.sys.modules['test_whatever'] b.ensure("test_whatever.py") result = testdir.runpytest() result.stdout.fnmatch_lines([ "*import*mismatch*", "*imported*test_whatever*", "*%s*" % a.join("test_whatever.py"), "*not the same*", "*%s*" % b.join("test_whatever.py"), "*HINT*", ]) def test_syntax_error_in_module(self, testdir): modcol = testdir.getmodulecol("this is a syntax error") pytest.raises(modcol.CollectError, modcol.collect) pytest.raises(modcol.CollectError, modcol.collect) def test_module_considers_pluginmanager_at_import(self, testdir): modcol = testdir.getmodulecol("pytest_plugins='xasdlkj',") pytest.raises(ImportError, lambda: modcol.obj) class TestClass: def test_class_with_init_skip_collect(self, testdir): modcol = testdir.getmodulecol(""" class TestClass1: def __init__(self): pass class TestClass2(object): def __init__(self): pass """) l = modcol.collect() assert len(l) == 2 for classcol in l: pytest.raises(pytest.skip.Exception, classcol.collect) def test_class_subclassobject(self, testdir): testdir.getmodulecol(""" class test(object): pass """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*collected 0*", ]) def test_setup_teardown_class_as_classmethod(self, testdir): testdir.makepyfile(test_mod1=""" class TestClassMethod: @classmethod def setup_class(cls): pass def test_1(self): pass @classmethod def teardown_class(cls): pass """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*1 passed*", ]) class TestGenerator: def test_generative_functions(self, testdir): modcol = testdir.getmodulecol(""" def func1(arg, arg2): assert arg == arg2 def test_gen(): yield func1, 17, 3*5 yield func1, 42, 6*7 """) colitems = modcol.collect() assert len(colitems) == 1 gencol = colitems[0] assert isinstance(gencol, pytest.Generator) gencolitems = gencol.collect() assert len(gencolitems) == 2 assert isinstance(gencolitems[0], pytest.Function) assert isinstance(gencolitems[1], pytest.Function) assert gencolitems[0].name == '[0]' assert gencolitems[0].obj.__name__ == 'func1' def test_generative_methods(self, testdir): modcol = testdir.getmodulecol(""" def func1(arg, arg2): assert arg == arg2 class TestGenMethods: def test_gen(self): yield func1, 17, 3*5 yield func1, 42, 6*7 """) gencol = modcol.collect()[0].collect()[0].collect()[0] assert isinstance(gencol, pytest.Generator) gencolitems = gencol.collect() assert len(gencolitems) == 2 assert isinstance(gencolitems[0], pytest.Function) assert isinstance(gencolitems[1], pytest.Function) assert gencolitems[0].name == '[0]' assert gencolitems[0].obj.__name__ == 'func1' def test_generative_functions_with_explicit_names(self, testdir): modcol = testdir.getmodulecol(""" def func1(arg, arg2): assert arg == arg2 def test_gen(): yield "seventeen", func1, 17, 3*5 yield "fortytwo", func1, 42, 6*7 """) colitems = modcol.collect() assert len(colitems) == 1 gencol = colitems[0] assert isinstance(gencol, pytest.Generator) gencolitems = gencol.collect() assert len(gencolitems) == 2 assert isinstance(gencolitems[0], pytest.Function) assert isinstance(gencolitems[1], pytest.Function) assert gencolitems[0].name == "['seventeen']" assert gencolitems[0].obj.__name__ == 'func1' assert gencolitems[1].name == "['fortytwo']" assert gencolitems[1].obj.__name__ == 'func1' def test_generative_functions_unique_explicit_names(self, testdir): # generative modcol = testdir.getmodulecol(""" def func(): pass def test_gen(): yield "name", func yield "name", func """) colitems = modcol.collect() assert len(colitems) == 1 gencol = colitems[0] assert isinstance(gencol, pytest.Generator) pytest.raises(ValueError, "gencol.collect()") def test_generative_methods_with_explicit_names(self, testdir): modcol = testdir.getmodulecol(""" def func1(arg, arg2): assert arg == arg2 class TestGenMethods: def test_gen(self): yield "m1", func1, 17, 3*5 yield "m2", func1, 42, 6*7 """) gencol = modcol.collect()[0].collect()[0].collect()[0] assert isinstance(gencol, pytest.Generator) gencolitems = gencol.collect() assert len(gencolitems) == 2 assert isinstance(gencolitems[0], pytest.Function) assert isinstance(gencolitems[1], pytest.Function) assert gencolitems[0].name == "['m1']" assert gencolitems[0].obj.__name__ == 'func1' assert gencolitems[1].name == "['m2']" assert gencolitems[1].obj.__name__ == 'func1' def test_order_of_execution_generator_same_codeline(self, testdir, tmpdir): o = testdir.makepyfile(""" def test_generative_order_of_execution(): import py, pytest test_list = [] expected_list = list(range(6)) def list_append(item): test_list.append(item) def assert_order_of_execution(): py.builtin.print_('expected order', expected_list) py.builtin.print_('but got ', test_list) assert test_list == expected_list for i in expected_list: yield list_append, i yield assert_order_of_execution """) reprec = testdir.inline_run(o) passed, skipped, failed = reprec.countoutcomes() assert passed == 7 assert not skipped and not failed def test_order_of_execution_generator_different_codeline(self, testdir): o = testdir.makepyfile(""" def test_generative_tests_different_codeline(): import py, pytest test_list = [] expected_list = list(range(3)) def list_append_2(): test_list.append(2) def list_append_1(): test_list.append(1) def list_append_0(): test_list.append(0) def assert_order_of_execution(): py.builtin.print_('expected order', expected_list) py.builtin.print_('but got ', test_list) assert test_list == expected_list yield list_append_0 yield list_append_1 yield list_append_2 yield assert_order_of_execution """) reprec = testdir.inline_run(o) passed, skipped, failed = reprec.countoutcomes() assert passed == 4 assert not skipped and not failed def test_setupstate_is_preserved_134(self, testdir): # yield-based tests are messy wrt to setupstate because # during collection they already invoke setup functions # and then again when they are run. For now, we want to make sure # that the old 1.3.4 behaviour is preserved such that all # yielded functions all share the same "self" instance that # has been used during collection. o = testdir.makepyfile(""" setuplist = [] class TestClass: def setup_method(self, func): #print "setup_method", self, func setuplist.append(self) self.init = 42 def teardown_method(self, func): self.init = None def test_func1(self): pass def test_func2(self): yield self.func2 yield self.func2 def func2(self): assert self.init def test_setuplist(): # once for test_func2 during collection # once for test_func1 during test run # once for test_func2 during test run #print setuplist assert len(setuplist) == 3, len(setuplist) assert setuplist[0] == setuplist[2], setuplist assert setuplist[1] != setuplist[2], setuplist """) reprec = testdir.inline_run(o, '-v') passed, skipped, failed = reprec.countoutcomes() assert passed == 4 assert not skipped and not failed class TestFunction: def test_getmodulecollector(self, testdir): item = testdir.getitem("def test_func(): pass") modcol = item.getparent(pytest.Module) assert isinstance(modcol, pytest.Module) assert hasattr(modcol.obj, 'test_func') def test_function_equality(self, testdir, tmpdir): from _pytest.python import FixtureManager config = testdir.parseconfigure() session = testdir.Session(config) session._fixturemanager = FixtureManager(session) def func1(): pass def func2(): pass f1 = pytest.Function(name="name", parent=session, config=config, args=(1,), callobj=func1) assert f1 == f1 f2 = pytest.Function(name="name",config=config, callobj=func2, parent=session) assert f1 != f2 def test_issue197_parametrize_emptyset(self, testdir): testdir.makepyfile(""" import pytest @pytest.mark.parametrize('arg', []) def test_function(arg): pass """) reprec = testdir.inline_run() reprec.assertoutcome(skipped=1) def test_single_tuple_unwraps_values(self, testdir): testdir.makepyfile(""" import pytest @pytest.mark.parametrize(('arg',), [(1,)]) def test_function(arg): assert arg == 1 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_issue213_parametrize_value_no_equal(self, testdir): testdir.makepyfile(""" import pytest class A: def __eq__(self, other): raise ValueError("not possible") @pytest.mark.parametrize('arg', [A()]) def test_function(arg): assert arg.__class__.__name__ == "A" """) reprec = testdir.inline_run("--fulltrace") reprec.assertoutcome(passed=1) def test_parametrize_with_non_hashable_values(self, testdir): """Test parametrization with non-hashable values.""" testdir.makepyfile(""" archival_mapping = { '1.0': {'tag': '1.0'}, '1.2.2a1': {'tag': 'release-1.2.2a1'}, } import pytest @pytest.mark.parametrize('key value'.split(), archival_mapping.items()) def test_archival_to_version(key, value): assert key in archival_mapping assert value == archival_mapping[key] """) rec = testdir.inline_run() rec.assertoutcome(passed=2) def test_parametrize_with_non_hashable_values_indirect(self, testdir): """Test parametrization with non-hashable values with indirect parametrization.""" testdir.makepyfile(""" archival_mapping = { '1.0': {'tag': '1.0'}, '1.2.2a1': {'tag': 'release-1.2.2a1'}, } import pytest @pytest.fixture def key(request): return request.param @pytest.fixture def value(request): return request.param @pytest.mark.parametrize('key value'.split(), archival_mapping.items(), indirect=True) def test_archival_to_version(key, value): assert key in archival_mapping assert value == archival_mapping[key] """) rec = testdir.inline_run() rec.assertoutcome(passed=2) def test_parametrize_overrides_fixture(self, testdir): """Test parametrization when parameter overrides existing fixture with same name.""" testdir.makepyfile(""" import pytest @pytest.fixture def value(): return 'value' @pytest.mark.parametrize('value', ['overrided']) def test_overrided_via_param(value): assert value == 'overrided' """) rec = testdir.inline_run() rec.assertoutcome(passed=1) def test_parametrize_with_mark(selfself, testdir): items = testdir.getitems(""" import pytest @pytest.mark.foo @pytest.mark.parametrize('arg', [ 1, pytest.mark.bar(pytest.mark.baz(2)) ]) def test_function(arg): pass """) keywords = [item.keywords for item in items] assert 'foo' in keywords[0] and 'bar' not in keywords[0] and 'baz' not in keywords[0] assert 'foo' in keywords[1] and 'bar' in keywords[1] and 'baz' in keywords[1] def test_function_equality_with_callspec(self, testdir, tmpdir): items = testdir.getitems(""" import pytest @pytest.mark.parametrize('arg', [1,2]) def test_function(arg): pass """) assert items[0] != items[1] assert not (items[0] == items[1]) def test_pyfunc_call(self, testdir): item = testdir.getitem("def test_func(): raise ValueError") config = item.config class MyPlugin1: def pytest_pyfunc_call(self, pyfuncitem): raise ValueError class MyPlugin2: def pytest_pyfunc_call(self, pyfuncitem): return True config.pluginmanager.register(MyPlugin1()) config.pluginmanager.register(MyPlugin2()) config.hook.pytest_pyfunc_call(pyfuncitem=item) class TestSorting: def test_check_equality(self, testdir): modcol = testdir.getmodulecol(""" def test_pass(): pass def test_fail(): assert 0 """) fn1 = testdir.collect_by_name(modcol, "test_pass") assert isinstance(fn1, pytest.Function) fn2 = testdir.collect_by_name(modcol, "test_pass") assert isinstance(fn2, pytest.Function) assert fn1 == fn2 assert fn1 != modcol if py.std.sys.version_info < (3, 0): assert cmp(fn1, fn2) == 0 assert hash(fn1) == hash(fn2) fn3 = testdir.collect_by_name(modcol, "test_fail") assert isinstance(fn3, pytest.Function) assert not (fn1 == fn3) assert fn1 != fn3 for fn in fn1,fn2,fn3: assert fn != 3 assert fn != modcol assert fn != [1,2,3] assert [1,2,3] != fn assert modcol != fn def test_allow_sane_sorting_for_decorators(self, testdir): modcol = testdir.getmodulecol(""" def dec(f): g = lambda: f(2) g.place_as = f return g def test_b(y): pass test_b = dec(test_b) def test_a(y): pass test_a = dec(test_a) """) colitems = modcol.collect() assert len(colitems) == 2 assert [item.name for item in colitems] == ['test_b', 'test_a'] class TestConftestCustomization: def test_pytest_pycollect_module(self, testdir): testdir.makeconftest(""" import pytest class MyModule(pytest.Module): pass def pytest_pycollect_makemodule(path, parent): if path.basename == "test_xyz.py": return MyModule(path, parent) """) testdir.makepyfile("def test_some(): pass") testdir.makepyfile(test_xyz="def test_func(): pass") result = testdir.runpytest("--collect-only") result.stdout.fnmatch_lines([ "* 3 def test_traceback_error_during_import(self, testdir): testdir.makepyfile(""" x = 1 x = 2 x = 17 asd """) result = testdir.runpytest() assert result.ret != 0 out = result.stdout.str() assert "x = 1" not in out assert "x = 2" not in out result.stdout.fnmatch_lines([ ">*asd*", "E*NameError*", ]) result = testdir.runpytest("--fulltrace") out = result.stdout.str() assert "x = 1" in out assert "x = 2" in out result.stdout.fnmatch_lines([ ">*asd*", "E*NameError*", ]) class TestReportInfo: def test_itemreport_reportinfo(self, testdir, linecomp): testdir.makeconftest(""" import pytest class MyFunction(pytest.Function): def reportinfo(self): return "ABCDE", 42, "custom" def pytest_pycollect_makeitem(collector, name, obj): if name == "test_func": return MyFunction(name, parent=collector) """) item = testdir.getitem("def test_func(): pass") item.config.pluginmanager.getplugin("runner") assert item.location == ("ABCDE", 42, "custom") def test_func_reportinfo(self, testdir): item = testdir.getitem("def test_func(): pass") fspath, lineno, modpath = item.reportinfo() assert fspath == item.fspath assert lineno == 0 assert modpath == "test_func" def test_class_reportinfo(self, testdir): modcol = testdir.getmodulecol(""" # lineno 0 class TestClass: def test_hello(self): pass """) classcol = testdir.collect_by_name(modcol, "TestClass") fspath, lineno, msg = classcol.reportinfo() assert fspath == modcol.fspath assert lineno == 1 assert msg == "TestClass" def test_generator_reportinfo(self, testdir): modcol = testdir.getmodulecol(""" # lineno 0 def test_gen(): def check(x): assert x yield check, 3 """) gencol = testdir.collect_by_name(modcol, "test_gen") fspath, lineno, modpath = gencol.reportinfo() assert fspath == modcol.fspath assert lineno == 1 assert modpath == "test_gen" genitem = gencol.collect()[0] fspath, lineno, modpath = genitem.reportinfo() assert fspath == modcol.fspath assert lineno == 2 assert modpath == "test_gen[0]" """ def test_func(): pass def test_genfunc(): def check(x): pass yield check, 3 class TestClass: def test_method(self): pass """ def test_customized_python_discovery(testdir): testdir.makeini(""" [pytest] python_files=check_*.py python_classes=Check python_functions=check """) p = testdir.makepyfile(""" def check_simple(): pass class CheckMyApp: def check_meth(self): pass """) p2 = p.new(basename=p.basename.replace("test", "check")) p.move(p2) result = testdir.runpytest("--collect-only", "-s") result.stdout.fnmatch_lines([ "*check_customized*", "*check_simple*", "*CheckMyApp*", "*check_meth*", ]) result = testdir.runpytest() assert result.ret == 0 result.stdout.fnmatch_lines([ "*2 passed*", ]) def test_customized_python_discovery_functions(testdir): testdir.makeini(""" [pytest] python_functions=_test """) testdir.makepyfile(""" def _test_underscore(): pass """) result = testdir.runpytest("--collect-only", "-s") result.stdout.fnmatch_lines([ "*_test_underscore*", ]) result = testdir.runpytest() assert result.ret == 0 result.stdout.fnmatch_lines([ "*1 passed*", ]) def test_collector_attributes(testdir): testdir.makeconftest(""" import pytest def pytest_pycollect_makeitem(collector): assert collector.Function == pytest.Function assert collector.Class == pytest.Class assert collector.Instance == pytest.Instance assert collector.Module == pytest.Module """) testdir.makepyfile(""" def test_hello(): pass """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*1 passed*", ]) def test_customize_through_attributes(testdir): testdir.makeconftest(""" import pytest class MyFunction(pytest.Function): pass class MyInstance(pytest.Instance): Function = MyFunction class MyClass(pytest.Class): Instance = MyInstance def pytest_pycollect_makeitem(collector, name, obj): if name.startswith("MyTestClass"): return MyClass(name, parent=collector) """) testdir.makepyfile(""" class MyTestClass: def test_hello(self): pass """) result = testdir.runpytest("--collect-only") result.stdout.fnmatch_lines([ "*MyClass*", "*MyInstance*", "*MyFunction*test_hello*", ]) def test_unorderable_types(testdir): testdir.makepyfile(""" class TestJoinEmpty: pass def make_test(): class Test: pass Test.__name__ = "TestFoo" return Test TestFoo = make_test() """) result = testdir.runpytest() assert "TypeError" not in result.stdout.str() assert result.ret == 0 pytest-2.5.1/testing/python/fixture.py0000664000175000017500000022262512254002202017473 0ustar hpkhpk00000000000000import pytest, py, sys from _pytest import python as funcargs from _pytest.python import FixtureLookupError from _pytest.pytester import get_public_names from textwrap import dedent def test_getfuncargnames(): def f(): pass assert not funcargs.getfuncargnames(f) def g(arg): pass assert funcargs.getfuncargnames(g) == ('arg',) def h(arg1, arg2="hello"): pass assert funcargs.getfuncargnames(h) == ('arg1',) def h(arg1, arg2, arg3="hello"): pass assert funcargs.getfuncargnames(h) == ('arg1', 'arg2') class A: def f(self, arg1, arg2="hello"): pass assert funcargs.getfuncargnames(A().f) == ('arg1',) if sys.version_info < (3,0): assert funcargs.getfuncargnames(A.f) == ('arg1',) class TestFillFixtures: def test_fillfuncargs_exposed(self): # used by oejskit, kept for compatibility assert pytest._fillfuncargs == funcargs.fillfixtures def test_funcarg_lookupfails(self, testdir): testdir.makepyfile(""" def pytest_funcarg__xyzsomething(request): return 42 def test_func(some): pass """) result = testdir.runpytest() # "--collect-only") assert result.ret != 0 result.stdout.fnmatch_lines([ "*def test_func(some)*", "*fixture*some*not found*", "*xyzsomething*", ]) def test_funcarg_basic(self, testdir): item = testdir.getitem(""" def pytest_funcarg__some(request): return request.function.__name__ def pytest_funcarg__other(request): return 42 def test_func(some, other): pass """) funcargs.fillfixtures(item) del item.funcargs["request"] assert len(get_public_names(item.funcargs)) == 2 assert item.funcargs['some'] == "test_func" assert item.funcargs['other'] == 42 def test_funcarg_lookup_modulelevel(self, testdir): testdir.makepyfile(""" def pytest_funcarg__something(request): return request.function.__name__ class TestClass: def test_method(self, something): assert something == "test_method" def test_func(something): assert something == "test_func" """) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_funcarg_lookup_classlevel(self, testdir): p = testdir.makepyfile(""" class TestClass: def pytest_funcarg__something(self, request): return request.instance def test_method(self, something): assert something is self """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*1 passed*" ]) def test_conftest_funcargs_only_available_in_subdir(self, testdir): sub1 = testdir.mkpydir("sub1") sub2 = testdir.mkpydir("sub2") sub1.join("conftest.py").write(py.code.Source(""" import pytest def pytest_funcarg__arg1(request): pytest.raises(Exception, "request.getfuncargvalue('arg2')") """)) sub2.join("conftest.py").write(py.code.Source(""" import pytest def pytest_funcarg__arg2(request): pytest.raises(Exception, "request.getfuncargvalue('arg1')") """)) sub1.join("test_in_sub1.py").write("def test_1(arg1): pass") sub2.join("test_in_sub2.py").write("def test_2(arg2): pass") result = testdir.runpytest("-v") result.stdout.fnmatch_lines([ "*2 passed*" ]) def test_extend_fixture_module_class(self, testdir): testfile = testdir.makepyfile(""" import pytest @pytest.fixture def spam(): return 'spam' class TestSpam: @pytest.fixture def spam(self, spam): return spam * 2 def test_spam(self, spam): assert spam == 'spamspam' """) result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 passed*"]) result = testdir.runpytest(testfile) result.stdout.fnmatch_lines(["*1 passed*"]) def test_extend_fixture_conftest_module(self, testdir): testdir.makeconftest(""" import pytest @pytest.fixture def spam(): return 'spam' """) testfile = testdir.makepyfile(""" import pytest @pytest.fixture def spam(spam): return spam * 2 def test_spam(spam): assert spam == 'spamspam' """) result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 passed*"]) result = testdir.runpytest(testfile) result.stdout.fnmatch_lines(["*1 passed*"]) def test_extend_fixture_conftest_conftest(self, testdir): testdir.makeconftest(""" import pytest @pytest.fixture def spam(): return 'spam' """) pkg = testdir.mkpydir("pkg") pkg.join("conftest.py").write(py.code.Source(""" import pytest @pytest.fixture def spam(spam): return spam * 2 """)) testfile = pkg.join("test_spam.py") testfile.write(py.code.Source(""" def test_spam(spam): assert spam == "spamspam" """)) result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 passed*"]) result = testdir.runpytest(testfile) result.stdout.fnmatch_lines(["*1 passed*"]) def test_extend_fixture_conftest_plugin(self, testdir): testdir.makepyfile(testplugin=""" import pytest @pytest.fixture def foo(): return 7 """) testdir.syspathinsert() testdir.makeconftest(""" import pytest pytest_plugins = 'testplugin' @pytest.fixture def foo(foo): return foo + 7 """) testdir.makepyfile(""" def test_foo(foo): assert foo == 14 """) result = testdir.runpytest('-s') assert result.ret == 0 def test_extend_fixture_plugin_plugin(self, testdir): # Two plugins should extend each order in loading order testdir.makepyfile(testplugin0=""" import pytest @pytest.fixture def foo(): return 7 """) testdir.makepyfile(testplugin1=""" import pytest @pytest.fixture def foo(foo): return foo + 7 """) testdir.syspathinsert() testdir.makepyfile(""" pytest_plugins = ['testplugin0', 'testplugin1'] def test_foo(foo): assert foo == 14 """) result = testdir.runpytest() assert result.ret == 0 def test_autouse_fixture_plugin(self, testdir): # A fixture from a plugin has no baseid set, which screwed up # the autouse fixture handling. testdir.makepyfile(testplugin=""" import pytest @pytest.fixture(autouse=True) def foo(request): request.function.foo = 7 """) testdir.syspathinsert() testdir.makepyfile(""" pytest_plugins = 'testplugin' def test_foo(request): assert request.function.foo == 7 """) result = testdir.runpytest() assert result.ret == 0 def test_funcarg_lookup_error(self, testdir): testdir.makepyfile(""" def test_lookup_error(unknown): pass """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*ERROR*test_lookup_error*", "*def test_lookup_error(unknown):*", "*fixture*unknown*not found*", "*available fixtures*", "*1 error*", ]) assert "INTERNAL" not in result.stdout.str() class TestRequestBasic: def test_request_attributes(self, testdir): item = testdir.getitem(""" def pytest_funcarg__something(request): pass def test_func(something): pass """) req = funcargs.FixtureRequest(item) assert req.function == item.obj assert req.keywords == item.keywords assert hasattr(req.module, 'test_func') assert req.cls is None assert req.function.__name__ == "test_func" assert req.config == item.config assert repr(req).find(req.function.__name__) != -1 def test_request_attributes_method(self, testdir): item, = testdir.getitems(""" class TestB: def pytest_funcarg__something(self, request): return 1 def test_func(self, something): pass """) req = item._request assert req.cls.__name__ == "TestB" assert req.instance.__class__ == req.cls def XXXtest_request_contains_funcarg_arg2fixturedefs(self, testdir): modcol = testdir.getmodulecol(""" def pytest_funcarg__something(request): pass class TestClass: def test_method(self, something): pass """) item1, = testdir.genitems([modcol]) assert item1.name == "test_method" arg2fixturedefs = funcargs.FixtureRequest(item1)._arg2fixturedefs assert len(arg2fixturedefs) == 1 assert arg2fixturedefs[0].__name__ == "pytest_funcarg__something" def test_getfuncargvalue_recursive(self, testdir): testdir.makeconftest(""" def pytest_funcarg__something(request): return 1 """) testdir.makepyfile(""" def pytest_funcarg__something(request): return request.getfuncargvalue("something") + 1 def test_func(something): assert something == 2 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_getfuncargvalue(self, testdir): item = testdir.getitem(""" l = [2] def pytest_funcarg__something(request): return 1 def pytest_funcarg__other(request): return l.pop() def test_func(something): pass """) req = item._request pytest.raises(FixtureLookupError, req.getfuncargvalue, "notexists") val = req.getfuncargvalue("something") assert val == 1 val = req.getfuncargvalue("something") assert val == 1 val2 = req.getfuncargvalue("other") assert val2 == 2 val2 = req.getfuncargvalue("other") # see about caching assert val2 == 2 pytest._fillfuncargs(item) assert item.funcargs["something"] == 1 assert len(get_public_names(item.funcargs)) == 2 assert "request" in item.funcargs #assert item.funcargs == {'something': 1, "other": 2} def test_request_addfinalizer(self, testdir): item = testdir.getitem(""" teardownlist = [] def pytest_funcarg__something(request): request.addfinalizer(lambda: teardownlist.append(1)) def test_func(something): pass """) item.session._setupstate.prepare(item) pytest._fillfuncargs(item) # successively check finalization calls teardownlist = item.getparent(pytest.Module).obj.teardownlist ss = item.session._setupstate assert not teardownlist ss.teardown_exact(item, None) print(ss.stack) assert teardownlist == [1] def test_request_addfinalizer_failing_setup(self, testdir): testdir.makepyfile(""" import pytest l = [1] @pytest.fixture def myfix(request): request.addfinalizer(l.pop) assert 0 def test_fix(myfix): pass def test_finalizer_ran(): assert not l """) reprec = testdir.inline_run("-s") reprec.assertoutcome(failed=1, passed=1) def test_request_addfinalizer_failing_setup_module(self, testdir): testdir.makepyfile(""" import pytest l = [1, 2] @pytest.fixture(scope="module") def myfix(request): request.addfinalizer(l.pop) request.addfinalizer(l.pop) assert 0 def test_fix(myfix): pass """) reprec = testdir.inline_run("-s") mod = reprec.getcalls("pytest_runtest_setup")[0].item.module assert not mod.l def test_request_addfinalizer_partial_setup_failure(self, testdir): p = testdir.makepyfile(""" l = [] def pytest_funcarg__something(request): request.addfinalizer(lambda: l.append(None)) def test_func(something, missingarg): pass def test_second(): assert len(l) == 1 """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*1 error*" # XXX the whole module collection fails ]) def test_request_getmodulepath(self, testdir): modcol = testdir.getmodulecol("def test_somefunc(): pass") item, = testdir.genitems([modcol]) req = funcargs.FixtureRequest(item) assert req.fspath == modcol.fspath def test_request_fixturenames(self, testdir): testdir.makepyfile(""" import pytest from _pytest.pytester import get_public_names @pytest.fixture() def arg1(): pass @pytest.fixture() def farg(arg1): pass @pytest.fixture(autouse=True) def sarg(tmpdir): pass def test_function(request, farg): assert set(get_public_names(request.fixturenames)) == \ set(["tmpdir", "sarg", "arg1", "request", "farg"]) """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_funcargnames_compatattr(self, testdir): testdir.makepyfile(""" def pytest_generate_tests(metafunc): assert metafunc.funcargnames == metafunc.fixturenames def pytest_funcarg__fn(request): assert request._pyfuncitem.funcargnames == \ request._pyfuncitem.fixturenames return request.funcargnames, request.fixturenames def test_hello(fn): assert fn[0] == fn[1] """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_setupdecorator_and_xunit(self, testdir): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(scope='module', autouse=True) def setup_module(): l.append("module") @pytest.fixture(autouse=True) def setup_function(): l.append("function") def test_func(): pass class TestClass: @pytest.fixture(scope="class", autouse=True) def setup_class(self): l.append("class") @pytest.fixture(autouse=True) def setup_method(self): l.append("method") def test_method(self): pass def test_all(): assert l == ["module", "function", "class", "function", "method", "function"] """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=3) def test_fixtures_sub_subdir_normalize_sep(self, testdir): # this tests that normlization of nodeids takes place b = testdir.mkdir("tests").mkdir("unit") b.join("conftest.py").write(py.code.Source(""" def pytest_funcarg__arg1(): pass """)) p = b.join("test_module.py") p.write("def test_func(arg1): pass") result = testdir.runpytest(p, "--fixtures") assert result.ret == 0 result.stdout.fnmatch_lines(""" *fixtures defined*conftest* *arg1* """) def test_newstyle_with_request(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture() def arg(request): pass def test_1(arg): pass """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_setupcontext_no_param(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(params=[1,2]) def arg(request): return request.param @pytest.fixture(autouse=True) def mysetup(request, arg): assert not hasattr(request, "param") def test_1(arg): assert arg in (1,2) """) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) class TestRequestMarking: def test_applymarker(self, testdir): item1,item2 = testdir.getitems(""" def pytest_funcarg__something(request): pass class TestClass: def test_func1(self, something): pass def test_func2(self, something): pass """) req1 = funcargs.FixtureRequest(item1) assert 'xfail' not in item1.keywords req1.applymarker(pytest.mark.xfail) assert 'xfail' in item1.keywords assert 'skipif' not in item1.keywords req1.applymarker(pytest.mark.skipif) assert 'skipif' in item1.keywords pytest.raises(ValueError, "req1.applymarker(42)") def test_accesskeywords(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture() def keywords(request): return request.keywords @pytest.mark.XYZ def test_function(keywords): assert keywords["XYZ"] assert "abc" not in keywords """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_accessmarker_dynamic(self, testdir): testdir.makeconftest(""" import pytest @pytest.fixture() def keywords(request): return request.keywords @pytest.fixture(scope="class", autouse=True) def marking(request): request.applymarker(pytest.mark.XYZ("hello")) """) testdir.makepyfile(""" import pytest def test_fun1(keywords): assert keywords["XYZ"] is not None assert "abc" not in keywords def test_fun2(keywords): assert keywords["XYZ"] is not None assert "abc" not in keywords """) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) class TestRequestCachedSetup: def test_request_cachedsetup_defaultmodule(self, testdir): reprec = testdir.inline_runsource(""" mysetup = ["hello",].pop def pytest_funcarg__something(request): return request.cached_setup(mysetup, scope="module") def test_func1(something): assert something == "hello" class TestClass: def test_func1a(self, something): assert something == "hello" """) reprec.assertoutcome(passed=2) def test_request_cachedsetup_class(self, testdir): reprec = testdir.inline_runsource(""" mysetup = ["hello", "hello2", "hello3"].pop def pytest_funcarg__something(request): return request.cached_setup(mysetup, scope="class") def test_func1(something): assert something == "hello3" def test_func2(something): assert something == "hello2" class TestClass: def test_func1a(self, something): assert something == "hello" def test_func2b(self, something): assert something == "hello" """) reprec.assertoutcome(passed=4) def test_request_cachedsetup_extrakey(self, testdir): item1 = testdir.getitem("def test_func(): pass") req1 = funcargs.FixtureRequest(item1) l = ["hello", "world"] def setup(): return l.pop() ret1 = req1.cached_setup(setup, extrakey=1) ret2 = req1.cached_setup(setup, extrakey=2) assert ret2 == "hello" assert ret1 == "world" ret1b = req1.cached_setup(setup, extrakey=1) ret2b = req1.cached_setup(setup, extrakey=2) assert ret1 == ret1b assert ret2 == ret2b def test_request_cachedsetup_cache_deletion(self, testdir): item1 = testdir.getitem("def test_func(): pass") req1 = funcargs.FixtureRequest(item1) l = [] def setup(): l.append("setup") def teardown(val): l.append("teardown") req1.cached_setup(setup, teardown, scope="function") assert l == ['setup'] # artificial call of finalizer setupstate = req1._pyfuncitem.session._setupstate setupstate._callfinalizers(item1) assert l == ["setup", "teardown"] req1.cached_setup(setup, teardown, scope="function") assert l == ["setup", "teardown", "setup"] setupstate._callfinalizers(item1) assert l == ["setup", "teardown", "setup", "teardown"] def test_request_cached_setup_two_args(self, testdir): testdir.makepyfile(""" def pytest_funcarg__arg1(request): return request.cached_setup(lambda: 42) def pytest_funcarg__arg2(request): return request.cached_setup(lambda: 17) def test_two_different_setups(arg1, arg2): assert arg1 != arg2 """) result = testdir.runpytest("-v") result.stdout.fnmatch_lines([ "*1 passed*" ]) def test_request_cached_setup_getfuncargvalue(self, testdir): testdir.makepyfile(""" def pytest_funcarg__arg1(request): arg1 = request.getfuncargvalue("arg2") return request.cached_setup(lambda: arg1 + 1) def pytest_funcarg__arg2(request): return request.cached_setup(lambda: 10) def test_two_funcarg(arg1): assert arg1 == 11 """) result = testdir.runpytest("-v") result.stdout.fnmatch_lines([ "*1 passed*" ]) def test_request_cached_setup_functional(self, testdir): testdir.makepyfile(test_0=""" l = [] def pytest_funcarg__something(request): val = request.cached_setup(fsetup, fteardown) return val def fsetup(mycache=[1]): l.append(mycache.pop()) return l def fteardown(something): l.remove(something[0]) l.append(2) def test_list_once(something): assert something == [1] def test_list_twice(something): assert something == [1] """) testdir.makepyfile(test_1=""" import test_0 # should have run already def test_check_test0_has_teardown_correct(): assert test_0.l == [2] """) result = testdir.runpytest("-v") result.stdout.fnmatch_lines([ "*3 passed*" ]) def test_issue117_sessionscopeteardown(self, testdir): testdir.makepyfile(""" def pytest_funcarg__app(request): app = request.cached_setup( scope='session', setup=lambda: 0, teardown=lambda x: 3/x) return app def test_func(app): pass """) result = testdir.runpytest() assert result.ret != 0 result.stdout.fnmatch_lines([ "*3/x*", "*ZeroDivisionError*", ]) class TestFixtureUsages: def test_noargfixturedec(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture def arg1(): return 1 def test_func(arg1): assert arg1 == 1 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_receives_funcargs(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture() def arg1(): return 1 @pytest.fixture() def arg2(arg1): return arg1 + 1 def test_add(arg2): assert arg2 == 2 def test_all(arg1, arg2): assert arg1 == 1 assert arg2 == 2 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_receives_funcargs_scope_mismatch(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(scope="function") def arg1(): return 1 @pytest.fixture(scope="module") def arg2(arg1): return arg1 + 1 def test_add(arg2): assert arg2 == 2 """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*ScopeMismatch*involved factories*", "* def arg2*", "* def arg1*", "*1 error*" ]) def test_funcarg_parametrized_and_used_twice(self, testdir): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(params=[1,2]) def arg1(request): l.append(1) return request.param @pytest.fixture() def arg2(arg1): return arg1 + 1 def test_add(arg1, arg2): assert arg2 == arg1 + 1 assert len(l) == arg1 """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*2 passed*" ]) def test_factory_uses_unknown_funcarg_as_dependency_error(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture() def fail(missing): return @pytest.fixture() def call_fail(fail): return def test_missing(call_fail): pass """) result = testdir.runpytest() result.stdout.fnmatch_lines(""" *pytest.fixture()* *def call_fail(fail)* *pytest.fixture()* *def fail* *fixture*'missing'*not found* """) def test_factory_setup_as_classes_fails(self, testdir): testdir.makepyfile(""" import pytest class arg1: def __init__(self, request): self.x = 1 arg1 = pytest.fixture()(arg1) """) reprec = testdir.inline_run() l = reprec.getfailedcollections() assert len(l) == 1 def test_request_can_be_overridden(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture() def request(request): request.a = 1 return request def test_request(request): assert request.a == 1 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_usefixtures_marker(self, testdir): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(scope="class") def myfix(request): request.cls.hello = "world" l.append(1) class TestClass: def test_one(self): assert self.hello == "world" assert len(l) == 1 def test_two(self): assert self.hello == "world" assert len(l) == 1 pytest.mark.usefixtures("myfix")(TestClass) """) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_usefixtures_ini(self, testdir): testdir.makeini(""" [pytest] usefixtures = myfix """) testdir.makeconftest(""" import pytest @pytest.fixture(scope="class") def myfix(request): request.cls.hello = "world" """) testdir.makepyfile(""" class TestClass: def test_one(self): assert self.hello == "world" def test_two(self): assert self.hello == "world" """) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_usefixtures_seen_in_showmarkers(self, testdir): result = testdir.runpytest("--markers") result.stdout.fnmatch_lines(""" *usefixtures(fixturename1*mark tests*fixtures* """) def test_request_instance_issue203(self, testdir): testdir.makepyfile(""" import pytest class TestClass: @pytest.fixture def setup1(self, request): assert self == request.instance self.arg1 = 1 def test_hello(self, setup1): assert self.arg1 == 1 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_fixture_parametrized_with_iterator(self, testdir): testdir.makepyfile(""" import pytest l = [] def f(): yield 1 yield 2 dec = pytest.fixture(scope="module", params=f()) @dec def arg(request): return request.param @dec def arg2(request): return request.param def test_1(arg): l.append(arg) def test_2(arg2): l.append(arg2*10) """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=4) l = reprec.getcalls("pytest_runtest_call")[0].item.module.l assert l == [1,2, 10,20] class TestFixtureManagerParseFactories: def pytest_funcarg__testdir(self, request): testdir = request.getfuncargvalue("testdir") testdir.makeconftest(""" def pytest_funcarg__hello(request): return "conftest" def pytest_funcarg__fm(request): return request._fixturemanager def pytest_funcarg__item(request): return request._pyfuncitem """) return testdir def test_parsefactories_evil_objects_issue214(self, testdir): testdir.makepyfile(""" class A: def __call__(self): pass def __getattr__(self, name): raise RuntimeError() a = A() def test_hello(): pass """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1, failed=0) def test_parsefactories_conftest(self, testdir): testdir.makepyfile(""" def test_hello(item, fm): for name in ("fm", "hello", "item"): faclist = fm.getfixturedefs(name, item.nodeid) assert len(faclist) == 1 fac = faclist[0] assert fac.func.__name__ == "pytest_funcarg__" + name """) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=1) def test_parsefactories_conftest_and_module_and_class(self, testdir): testdir.makepyfile(""" def pytest_funcarg__hello(request): return "module" class TestClass: def pytest_funcarg__hello(self, request): return "class" def test_hello(self, item, fm): faclist = fm.getfixturedefs("hello", item.nodeid) print (faclist) assert len(faclist) == 3 assert faclist[0].func(item._request) == "conftest" assert faclist[1].func(item._request) == "module" assert faclist[2].func(item._request) == "class" """) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=1) class TestAutouseDiscovery: def pytest_funcarg__testdir(self, testdir): testdir.makeconftest(""" import pytest @pytest.fixture(autouse=True) def perfunction(request, tmpdir): pass @pytest.fixture() def arg1(tmpdir): pass @pytest.fixture(autouse=True) def perfunction2(arg1): pass def pytest_funcarg__fm(request): return request._fixturemanager def pytest_funcarg__item(request): return request._pyfuncitem """) return testdir def test_parsefactories_conftest(self, testdir): testdir.makepyfile(""" from _pytest.pytester import get_public_names def test_check_setup(item, fm): autousenames = fm._getautousenames(item.nodeid) assert len(get_public_names(autousenames)) == 2 assert "perfunction2" in autousenames assert "perfunction" in autousenames """) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=1) def test_two_classes_separated_autouse(self, testdir): testdir.makepyfile(""" import pytest class TestA: l = [] @pytest.fixture(autouse=True) def setup1(self): self.l.append(1) def test_setup1(self): assert self.l == [1] class TestB: l = [] @pytest.fixture(autouse=True) def setup2(self): self.l.append(1) def test_setup2(self): assert self.l == [1] """) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_setup_at_classlevel(self, testdir): testdir.makepyfile(""" import pytest class TestClass: @pytest.fixture(autouse=True) def permethod(self, request): request.instance.funcname = request.function.__name__ def test_method1(self): assert self.funcname == "test_method1" def test_method2(self): assert self.funcname == "test_method2" """) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=2) @pytest.mark.xfail(reason="'enabled' feature not implemented") def test_setup_enabled_functionnode(self, testdir): testdir.makepyfile(""" import pytest def enabled(parentnode, markers): return "needsdb" in markers @pytest.fixture(params=[1,2]) def db(request): return request.param @pytest.fixture(enabled=enabled, autouse=True) def createdb(db): pass def test_func1(request): assert "db" not in request.fixturenames @pytest.mark.needsdb def test_func2(request): assert "db" in request.fixturenames """) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=2) def test_callables_nocode(self, testdir): """ a imported mock.call would break setup/factory discovery due to it being callable and __code__ not being a code object """ testdir.makepyfile(""" class _call(tuple): def __call__(self, *k, **kw): pass def __getattr__(self, k): return self call = _call() """) reprec = testdir.inline_run("-s") reprec.assertoutcome(failed=0, passed=0) def test_autouse_in_conftests(self, testdir): a = testdir.mkdir("a") b = testdir.mkdir("a1") conftest = testdir.makeconftest(""" import pytest @pytest.fixture(autouse=True) def hello(): xxx """) conftest.move(a.join(conftest.basename)) a.join("test_something.py").write("def test_func(): pass") b.join("test_otherthing.py").write("def test_func(): pass") result = testdir.runpytest() result.stdout.fnmatch_lines(""" *1 passed*1 error* """) def test_autouse_in_module_and_two_classes(self, testdir): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(autouse=True) def append1(): l.append("module") def test_x(): assert l == ["module"] class TestA: @pytest.fixture(autouse=True) def append2(self): l.append("A") def test_hello(self): assert l == ["module", "module", "A"], l class TestA2: def test_world(self): assert l == ["module", "module", "A", "module"], l """) reprec = testdir.inline_run() reprec.assertoutcome(passed=3) class TestAutouseManagement: def test_autouse_conftest_mid_directory(self, testdir): pkgdir = testdir.mkpydir("xyz123") pkgdir.join("conftest.py").write(py.code.Source(""" import pytest @pytest.fixture(autouse=True) def app(): import sys sys._myapp = "hello" """)) t = pkgdir.ensure("tests", "test_app.py") t.write(py.code.Source(""" import sys def test_app(): assert sys._myapp == "hello" """)) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=1) def test_autouse_honored_for_yield(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(autouse=True) def tst(): global x x = 3 def test_gen(): def f(hello): assert x == abs(hello) yield f, 3 yield f, -3 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_funcarg_and_setup(self, testdir): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(scope="module") def arg(): l.append(1) return 0 @pytest.fixture(scope="module", autouse=True) def something(arg): l.append(2) def test_hello(arg): assert len(l) == 2 assert l == [1,2] assert arg == 0 def test_hello2(arg): assert len(l) == 2 assert l == [1,2] assert arg == 0 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_uses_parametrized_resource(self, testdir): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(params=[1,2]) def arg(request): return request.param @pytest.fixture(autouse=True) def something(arg): l.append(arg) def test_hello(): if len(l) == 1: assert l == [1] elif len(l) == 2: assert l == [1, 2] else: 0/0 """) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=2) def test_session_parametrized_function(self, testdir): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(scope="session", params=[1,2]) def arg(request): return request.param @pytest.fixture(scope="function", autouse=True) def append(request, arg): if request.function.__name__ == "test_some": l.append(arg) def test_some(): pass def test_result(arg): assert len(l) == arg assert l[:arg] == [1,2][:arg] """) reprec = testdir.inline_run("-v", "-s") reprec.assertoutcome(passed=4) def test_class_function_parametrization_finalization(self, testdir): p = testdir.makeconftest(""" import pytest import pprint l = [] @pytest.fixture(scope="function", params=[1,2]) def farg(request): return request.param @pytest.fixture(scope="class", params=list("ab")) def carg(request): return request.param @pytest.fixture(scope="function", autouse=True) def append(request, farg, carg): def fin(): l.append("fin_%s%s" % (carg, farg)) request.addfinalizer(fin) """) testdir.makepyfile(""" import pytest class TestClass: def test_1(self): pass class TestClass2: def test_2(self): pass """) reprec = testdir.inline_run("-v","-s") reprec.assertoutcome(passed=8) config = reprec.getcalls("pytest_unconfigure")[0].config l = config._conftest.getconftestmodules(p)[0].l assert l == ["fin_a1", "fin_a2", "fin_b1", "fin_b2"] * 2 def test_scope_ordering(self, testdir): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(scope="function", autouse=True) def fappend2(): l.append(2) @pytest.fixture(scope="class", autouse=True) def classappend3(): l.append(3) @pytest.fixture(scope="module", autouse=True) def mappend(): l.append(1) class TestHallo: def test_method(self): assert l == [1,3,2] """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_parametrization_setup_teardown_ordering(self, testdir): testdir.makepyfile(""" import pytest l = [] def pytest_generate_tests(metafunc): if metafunc.cls is not None: metafunc.parametrize("item", [1,2], scope="class") class TestClass: @pytest.fixture(scope="class", autouse=True) def addteardown(self, item, request): l.append("setup-%d" % item) request.addfinalizer(lambda: l.append("teardown-%d" % item)) def test_step1(self, item): l.append("step1-%d" % item) def test_step2(self, item): l.append("step2-%d" % item) def test_finish(): print (l) assert l == ["setup-1", "step1-1", "step2-1", "teardown-1", "setup-2", "step1-2", "step2-2", "teardown-2",] """) reprec = testdir.inline_run() reprec.assertoutcome(passed=5) def test_ordering_autouse_before_explicit(self, testdir): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(autouse=True) def fix1(): l.append(1) @pytest.fixture() def arg1(): l.append(2) def test_hello(arg1): assert l == [1,2] """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @pytest.mark.issue226 @pytest.mark.parametrize("param1", ["", "params=[1]"], ids=["p00","p01"]) @pytest.mark.parametrize("param2", ["", "params=[1]"], ids=["p10","p11"]) def test_ordering_dependencies_torndown_first(self, testdir, param1, param2): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(%(param1)s) def arg1(request): request.addfinalizer(lambda: l.append("fin1")) l.append("new1") @pytest.fixture(%(param2)s) def arg2(request, arg1): request.addfinalizer(lambda: l.append("fin2")) l.append("new2") def test_arg(arg2): pass def test_check(): assert l == ["new1", "new2", "fin2", "fin1"] """ % locals()) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=2) class TestFixtureMarker: def test_parametrize(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(params=["a", "b", "c"]) def arg(request): return request.param l = [] def test_param(arg): l.append(arg) def test_result(): assert l == list("abc") """) reprec = testdir.inline_run() reprec.assertoutcome(passed=4) def test_scope_session(self, testdir): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(scope="module") def arg(): l.append(1) return 1 def test_1(arg): assert arg == 1 def test_2(arg): assert arg == 1 assert len(l) == 1 class TestClass: def test3(self, arg): assert arg == 1 assert len(l) == 1 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=3) def test_scope_module_uses_session(self, testdir): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(scope="module") def arg(): l.append(1) return 1 def test_1(arg): assert arg == 1 def test_2(arg): assert arg == 1 assert len(l) == 1 class TestClass: def test3(self, arg): assert arg == 1 assert len(l) == 1 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=3) def test_scope_module_and_finalizer(self, testdir): testdir.makeconftest(""" import pytest finalized = [] created = [] @pytest.fixture(scope="module") def arg(request): created.append(1) assert request.scope == "module" request.addfinalizer(lambda: finalized.append(1)) def pytest_funcarg__created(request): return len(created) def pytest_funcarg__finalized(request): return len(finalized) """) testdir.makepyfile( test_mod1=""" def test_1(arg, created, finalized): assert created == 1 assert finalized == 0 def test_2(arg, created, finalized): assert created == 1 assert finalized == 0""", test_mod2=""" def test_3(arg, created, finalized): assert created == 2 assert finalized == 1""", test_mode3=""" def test_4(arg, created, finalized): assert created == 3 assert finalized == 2 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=4) @pytest.mark.parametrize("method", [ 'request.getfuncargvalue("arg")', 'request.cached_setup(lambda: None, scope="function")', ], ids=["getfuncargvalue", "cached_setup"]) def test_scope_mismatch_various(self, testdir, method): testdir.makeconftest(""" import pytest finalized = [] created = [] @pytest.fixture(scope="function") def arg(request): pass """) testdir.makepyfile( test_mod1=""" import pytest @pytest.fixture(scope="session") def arg(request): %s def test_1(arg): pass """ % method) result = testdir.runpytest() assert result.ret != 0 result.stdout.fnmatch_lines([ "*ScopeMismatch*You tried*function*session*request*", ]) def test_register_only_with_mark(self, testdir): testdir.makeconftest(""" import pytest @pytest.fixture() def arg(): return 1 """) testdir.makepyfile( test_mod1=""" import pytest @pytest.fixture() def arg(arg): return arg + 1 def test_1(arg): assert arg == 2 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_parametrize_and_scope(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(scope="module", params=["a", "b", "c"]) def arg(request): return request.param l = [] def test_param(arg): l.append(arg) """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=3) l = reprec.getcalls("pytest_runtest_call")[0].item.module.l assert len(l) == 3 assert "a" in l assert "b" in l assert "c" in l def test_scope_mismatch(self, testdir): testdir.makeconftest(""" import pytest @pytest.fixture(scope="function") def arg(request): pass """) testdir.makepyfile(""" import pytest @pytest.fixture(scope="session") def arg(arg): pass def test_mismatch(arg): pass """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*ScopeMismatch*", "*1 error*", ]) def test_parametrize_separated_order(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(scope="module", params=[1, 2]) def arg(request): return request.param l = [] def test_1(arg): l.append(arg) def test_2(arg): l.append(arg) """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=4) l = reprec.getcalls("pytest_runtest_call")[0].item.module.l assert l == [1,1,2,2] def test_module_parametrized_ordering(self, testdir): testdir.makeconftest(""" import pytest @pytest.fixture(scope="session", params="s1 s2".split()) def sarg(): pass @pytest.fixture(scope="module", params="m1 m2".split()) def marg(): pass """) testdir.makepyfile(test_mod1=""" def test_func(sarg): pass def test_func1(marg): pass """, test_mod2=""" def test_func2(sarg): pass def test_func3(sarg, marg): pass def test_func3b(sarg, marg): pass def test_func4(marg): pass """) result = testdir.runpytest("-v") result.stdout.fnmatch_lines(""" test_mod1.py:1: test_func[s1] PASSED test_mod2.py:1: test_func2[s1] PASSED test_mod2.py:3: test_func3[s1-m1] PASSED test_mod2.py:5: test_func3b[s1-m1] PASSED test_mod2.py:3: test_func3[s1-m2] PASSED test_mod2.py:5: test_func3b[s1-m2] PASSED test_mod1.py:1: test_func[s2] PASSED test_mod2.py:1: test_func2[s2] PASSED test_mod2.py:3: test_func3[s2-m1] PASSED test_mod2.py:5: test_func3b[s2-m1] PASSED test_mod2.py:7: test_func4[m1] PASSED test_mod2.py:3: test_func3[s2-m2] PASSED test_mod2.py:5: test_func3b[s2-m2] PASSED test_mod2.py:7: test_func4[m2] PASSED test_mod1.py:3: test_func1[m1] PASSED test_mod1.py:3: test_func1[m2] PASSED """) def test_class_ordering(self, testdir): testdir.makeconftest(""" import pytest l = [] @pytest.fixture(scope="function", params=[1,2]) def farg(request): return request.param @pytest.fixture(scope="class", params=list("ab")) def carg(request): return request.param @pytest.fixture(scope="function", autouse=True) def append(request, farg, carg): def fin(): l.append("fin_%s%s" % (carg, farg)) request.addfinalizer(fin) """) testdir.makepyfile(""" import pytest class TestClass2: def test_1(self): pass def test_2(self): pass class TestClass: def test_3(self): pass """) result = testdir.runpytest("-vs") result.stdout.fnmatch_lines(""" test_class_ordering.py:4: TestClass2.test_1[1-a] PASSED test_class_ordering.py:4: TestClass2.test_1[2-a] PASSED test_class_ordering.py:6: TestClass2.test_2[1-a] PASSED test_class_ordering.py:6: TestClass2.test_2[2-a] PASSED test_class_ordering.py:4: TestClass2.test_1[1-b] PASSED test_class_ordering.py:4: TestClass2.test_1[2-b] PASSED test_class_ordering.py:6: TestClass2.test_2[1-b] PASSED test_class_ordering.py:6: TestClass2.test_2[2-b] PASSED test_class_ordering.py:9: TestClass.test_3[1-a] PASSED test_class_ordering.py:9: TestClass.test_3[2-a] PASSED test_class_ordering.py:9: TestClass.test_3[1-b] PASSED test_class_ordering.py:9: TestClass.test_3[2-b] PASSED """) def test_parametrize_separated_order_higher_scope_first(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(scope="function", params=[1, 2]) def arg(request): param = request.param request.addfinalizer(lambda: l.append("fin:%s" % param)) l.append("create:%s" % param) return request.param @pytest.fixture(scope="module", params=["mod1", "mod2"]) def modarg(request): param = request.param request.addfinalizer(lambda: l.append("fin:%s" % param)) l.append("create:%s" % param) return request.param l = [] def test_1(arg): l.append("test1") def test_2(modarg): l.append("test2") def test_3(arg, modarg): l.append("test3") def test_4(modarg, arg): l.append("test4") """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=12) l = reprec.getcalls("pytest_runtest_call")[0].item.module.l expected = [ 'create:1', 'test1', 'fin:1', 'create:2', 'test1', 'fin:2', 'create:mod1', 'test2', 'create:1', 'test3', 'fin:1', 'create:2', 'test3', 'fin:2', 'create:1', 'test4', 'fin:1', 'create:2', 'test4', 'fin:2', 'fin:mod1', 'create:mod2', 'test2', 'create:1', 'test3', 'fin:1', 'create:2', 'test3', 'fin:2', 'create:1', 'test4', 'fin:1', 'create:2', 'test4', 'fin:2', 'fin:mod2'] import pprint pprint.pprint(list(zip(l, expected))) assert l == expected def test_parametrized_fixture_teardown_order(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(params=[1,2], scope="class") def param1(request): return request.param l = [] class TestClass: @classmethod @pytest.fixture(scope="class", autouse=True) def setup1(self, request, param1): l.append(1) request.addfinalizer(self.teardown1) @classmethod def teardown1(self): assert l.pop() == 1 @pytest.fixture(scope="class", autouse=True) def setup2(self, request, param1): l.append(2) request.addfinalizer(self.teardown2) @classmethod def teardown2(self): assert l.pop() == 2 def test(self): pass def test_finish(): assert not l """) result = testdir.runpytest("-v") result.stdout.fnmatch_lines(""" *3 passed* """) assert "error" not in result.stdout.str() def test_fixture_finalizer(self, testdir): testdir.makeconftest(""" import pytest import sys @pytest.fixture def browser(request): def finalize(): sys.stdout.write('Finalized') request.addfinalizer(finalize) return {} """) b = testdir.mkdir("subdir") b.join("test_overriden_fixture_finalizer.py").write(dedent(""" import pytest @pytest.fixture def browser(browser): browser['visited'] = True return browser def test_browser(browser): assert browser['visited'] is True """)) reprec = testdir.runpytest("-s") for test in ['test_browser']: reprec.stdout.fnmatch_lines('*Finalized*') def test_class_scope_with_normal_tests(self, testdir): testpath = testdir.makepyfile(""" import pytest class Box: value = 0 @pytest.fixture(scope='class') def a(request): Box.value += 1 return Box.value def test_a(a): assert a == 1 class Test1: def test_b(self, a): assert a == 2 class Test2: def test_c(self, a): assert a == 3""") reprec = testdir.inline_run(testpath) for test in ['test_a', 'test_b', 'test_c']: assert reprec.matchreport(test).passed def test_request_is_clean(self, testdir): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(params=[1, 2]) def fix(request): request.addfinalizer(lambda: l.append(request.param)) def test_fix(fix): pass """) reprec = testdir.inline_run("-s") l = reprec.getcalls("pytest_runtest_call")[0].item.module.l assert l == [1,2] def test_parametrize_separated_lifecycle(self, testdir): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(scope="module", params=[1, 2]) def arg(request): x = request.param request.addfinalizer(lambda: l.append("fin%s" % x)) return request.param def test_1(arg): l.append(arg) def test_2(arg): l.append(arg) """) reprec = testdir.inline_run("-vs") reprec.assertoutcome(passed=4) l = reprec.getcalls("pytest_runtest_call")[0].item.module.l import pprint pprint.pprint(l) #assert len(l) == 6 assert l[0] == l[1] == 1 assert l[2] == "fin1" assert l[3] == l[4] == 2 assert l[5] == "fin2" def test_parametrize_function_scoped_finalizers_called(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(scope="function", params=[1, 2]) def arg(request): x = request.param request.addfinalizer(lambda: l.append("fin%s" % x)) return request.param l = [] def test_1(arg): l.append(arg) def test_2(arg): l.append(arg) def test_3(): assert len(l) == 8 assert l == [1, "fin1", 2, "fin2", 1, "fin1", 2, "fin2"] """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=5) @pytest.mark.issue246 @pytest.mark.parametrize("scope", ["session", "function", "module"]) def test_finalizer_order_on_parametrization(self, scope, testdir): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(scope=%(scope)r, params=["1"]) def fix1(request): return request.param @pytest.fixture(scope=%(scope)r) def fix2(request, base): def cleanup_fix2(): assert not l, "base should not have been finalized" request.addfinalizer(cleanup_fix2) @pytest.fixture(scope=%(scope)r) def base(request, fix1): def cleanup_base(): l.append("fin_base") print ("finalizing base") request.addfinalizer(cleanup_base) def test_begin(): pass def test_baz(base, fix2): pass def test_other(): pass """ % {"scope": scope}) reprec = testdir.inline_run("-lvs") reprec.assertoutcome(passed=3) @pytest.mark.issue396 def test_class_scope_parametrization_ordering(self, testdir): testdir.makepyfile(""" import pytest l = [] @pytest.fixture(params=["John", "Doe"], scope="class") def human(request): request.addfinalizer(lambda: l.append("fin %s" % request.param)) return request.param class TestGreetings: def test_hello(self, human): l.append("test_hello") class TestMetrics: def test_name(self, human): l.append("test_name") def test_population(self, human): l.append("test_population") """) reprec = testdir.inline_run() reprec.assertoutcome(passed=6) l = reprec.getcalls("pytest_runtest_call")[0].item.module.l assert l == ["test_hello", "fin John", "test_hello", "fin Doe", "test_name", "test_population", "fin John", "test_name", "test_population", "fin Doe"] def test_parametrize_setup_function(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(scope="module", params=[1, 2]) def arg(request): return request.param @pytest.fixture(scope="module", autouse=True) def mysetup(request, arg): request.addfinalizer(lambda: l.append("fin%s" % arg)) l.append("setup%s" % arg) l = [] def test_1(arg): l.append(arg) def test_2(arg): l.append(arg) def test_3(): import pprint pprint.pprint(l) if arg == 1: assert l == ["setup1", 1, 1, ] elif arg == 2: assert l == ["setup1", 1, 1, "fin1", "setup2", 2, 2, ] """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=6) def test_fixture_marked_function_not_collected_as_test(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture def test_app(): return 1 def test_something(test_app): assert test_app == 1 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_params_and_ids(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(params=[object(), object()], ids=['alpha', 'beta']) def fix(request): return request.param def test_foo(fix): assert 1 """) res = testdir.runpytest('-v') res.stdout.fnmatch_lines([ '*test_foo*alpha*', '*test_foo*beta*']) def test_params_and_ids_yieldfixture(self, testdir): testdir.makepyfile(""" import pytest @pytest.yield_fixture(params=[object(), object()], ids=['alpha', 'beta']) def fix(request): yield request.param def test_foo(fix): assert 1 """) res = testdir.runpytest('-v') res.stdout.fnmatch_lines([ '*test_foo*alpha*', '*test_foo*beta*']) class TestRequestScopeAccess: pytestmark = pytest.mark.parametrize(("scope", "ok", "error"),[ ["session", "", "fspath class function module"], ["module", "module fspath", "cls function"], ["class", "module fspath cls", "function"], ["function", "module fspath cls function", ""] ]) def test_setup(self, testdir, scope, ok, error): testdir.makepyfile(""" import pytest @pytest.fixture(scope=%r, autouse=True) def myscoped(request): for x in %r: assert hasattr(request, x) for x in %r: pytest.raises(AttributeError, lambda: getattr(request, x)) assert request.session assert request.config def test_func(): pass """ %(scope, ok.split(), error.split())) reprec = testdir.inline_run("-l") reprec.assertoutcome(passed=1) def test_funcarg(self, testdir, scope, ok, error): testdir.makepyfile(""" import pytest @pytest.fixture(scope=%r) def arg(request): for x in %r: assert hasattr(request, x) for x in %r: pytest.raises(AttributeError, lambda: getattr(request, x)) assert request.session assert request.config def test_func(arg): pass """ %(scope, ok.split(), error.split())) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) class TestErrors: def test_subfactory_missing_funcarg(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture() def gen(qwe123): return 1 def test_something(gen): pass """) result = testdir.runpytest() assert result.ret != 0 result.stdout.fnmatch_lines([ "*def gen(qwe123):*", "*fixture*qwe123*not found*", "*1 error*", ]) def test_setupfunc_missing_funcarg(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(autouse=True) def gen(qwe123): return 1 def test_something(): pass """) result = testdir.runpytest() assert result.ret != 0 result.stdout.fnmatch_lines([ "*def gen(qwe123):*", "*fixture*qwe123*not found*", "*1 error*", ]) class TestShowFixtures: def test_funcarg_compat(self, testdir): config = testdir.parseconfigure("--funcargs") assert config.option.showfixtures def test_show_fixtures(self, testdir): result = testdir.runpytest("--fixtures") result.stdout.fnmatch_lines([ "*tmpdir*", "*temporary directory*", ] ) def test_show_fixtures_verbose(self, testdir): result = testdir.runpytest("--fixtures", "-v") result.stdout.fnmatch_lines([ "*tmpdir*--*tmpdir.py*", "*temporary directory*", ] ) def test_show_fixtures_testmodule(self, testdir): p = testdir.makepyfile(''' import pytest @pytest.fixture def _arg0(): """ hidden """ @pytest.fixture def arg1(): """ hello world """ ''') result = testdir.runpytest("--fixtures", p) result.stdout.fnmatch_lines(""" *tmpdir *fixtures defined from* *arg1* *hello world* """) assert "arg0" not in result.stdout.str() @pytest.mark.parametrize("testmod", [True, False]) def test_show_fixtures_conftest(self, testdir, testmod): testdir.makeconftest(''' import pytest @pytest.fixture def arg1(): """ hello world """ ''') if testmod: testdir.makepyfile(""" def test_hello(): pass """) result = testdir.runpytest("--fixtures") result.stdout.fnmatch_lines(""" *tmpdir* *fixtures defined from*conftest* *arg1* *hello world* """) class TestContextManagerFixtureFuncs: def test_simple(self, testdir): testdir.makepyfile(""" import pytest @pytest.yield_fixture def arg1(): print ("setup") yield 1 print ("teardown") def test_1(arg1): print ("test1 %s" % arg1) def test_2(arg1): print ("test2 %s" % arg1) assert 0 """) result = testdir.runpytest("-s") result.stdout.fnmatch_lines(""" *setup* *test1 1* *teardown* *setup* *test2 1* *teardown* """) def test_scoped(self, testdir): testdir.makepyfile(""" import pytest @pytest.yield_fixture(scope="module") def arg1(): print ("setup") yield 1 print ("teardown") def test_1(arg1): print ("test1 %s" % arg1) def test_2(arg1): print ("test2 %s" % arg1) """) result = testdir.runpytest("-s") result.stdout.fnmatch_lines(""" *setup* *test1 1* *test2 1* *teardown* """) def test_setup_exception(self, testdir): testdir.makepyfile(""" import pytest @pytest.yield_fixture(scope="module") def arg1(): pytest.fail("setup") yield 1 def test_1(arg1): pass """) result = testdir.runpytest("-s") result.stdout.fnmatch_lines(""" *pytest.fail*setup* *1 error* """) def test_teardown_exception(self, testdir): testdir.makepyfile(""" import pytest @pytest.yield_fixture(scope="module") def arg1(): yield 1 pytest.fail("teardown") def test_1(arg1): pass """) result = testdir.runpytest("-s") result.stdout.fnmatch_lines(""" *pytest.fail*teardown* *1 passed*1 error* """) def test_yields_more_than_one(self, testdir): testdir.makepyfile(""" import pytest @pytest.yield_fixture(scope="module") def arg1(): yield 1 yield 2 def test_1(arg1): pass """) result = testdir.runpytest("-s") result.stdout.fnmatch_lines(""" *fixture function* *test_yields*:2* """) def test_no_yield(self, testdir): testdir.makepyfile(""" import pytest @pytest.yield_fixture(scope="module") def arg1(): return 1 def test_1(arg1): pass """) result = testdir.runpytest("-s") result.stdout.fnmatch_lines(""" *yield_fixture*requires*yield* *yield_fixture* *def arg1* """) def test_yield_not_allowed_in_non_yield(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(scope="module") def arg1(): yield 1 def test_1(arg1): pass """) result = testdir.runpytest("-s") result.stdout.fnmatch_lines(""" *fixture*cannot use*yield* *def arg1* """) pytest-2.5.1/testing/python/metafunc.py0000664000175000017500000007160012254002202017602 0ustar hpkhpk00000000000000 import pytest, py from _pytest import python as funcargs class TestMetafunc: def Metafunc(self, func): # the unit tests of this class check if things work correctly # on the funcarg level, so we don't need a full blown # initiliazation class FixtureInfo: name2fixturedefs = None def __init__(self, names): self.names_closure = names names = funcargs.getfuncargnames(func) fixtureinfo = FixtureInfo(names) return funcargs.Metafunc(func, fixtureinfo, None) def test_no_funcargs(self, testdir): def function(): pass metafunc = self.Metafunc(function) assert not metafunc.fixturenames repr(metafunc._calls) def test_function_basic(self): def func(arg1, arg2="qwe"): pass metafunc = self.Metafunc(func) assert len(metafunc.fixturenames) == 1 assert 'arg1' in metafunc.fixturenames assert metafunc.function is func assert metafunc.cls is None def test_addcall_no_args(self): def func(arg1): pass metafunc = self.Metafunc(func) metafunc.addcall() assert len(metafunc._calls) == 1 call = metafunc._calls[0] assert call.id == "0" assert not hasattr(call, 'param') def test_addcall_id(self): def func(arg1): pass metafunc = self.Metafunc(func) pytest.raises(ValueError, "metafunc.addcall(id=None)") metafunc.addcall(id=1) pytest.raises(ValueError, "metafunc.addcall(id=1)") pytest.raises(ValueError, "metafunc.addcall(id='1')") metafunc.addcall(id=2) assert len(metafunc._calls) == 2 assert metafunc._calls[0].id == "1" assert metafunc._calls[1].id == "2" def test_addcall_param(self): def func(arg1): pass metafunc = self.Metafunc(func) class obj: pass metafunc.addcall(param=obj) metafunc.addcall(param=obj) metafunc.addcall(param=1) assert len(metafunc._calls) == 3 assert metafunc._calls[0].getparam("arg1") == obj assert metafunc._calls[1].getparam("arg1") == obj assert metafunc._calls[2].getparam("arg1") == 1 def test_addcall_funcargs(self): def func(x): pass metafunc = self.Metafunc(func) class obj: pass metafunc.addcall(funcargs={"x": 2}) metafunc.addcall(funcargs={"x": 3}) pytest.raises(pytest.fail.Exception, "metafunc.addcall({'xyz': 0})") assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == {'x': 2} assert metafunc._calls[1].funcargs == {'x': 3} assert not hasattr(metafunc._calls[1], 'param') def test_parametrize_error(self): def func(x, y): pass metafunc = self.Metafunc(func) metafunc.parametrize("x", [1,2]) pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6])) pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6])) metafunc.parametrize("y", [1,2]) pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6])) pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6])) def test_parametrize_and_id(self): def func(x, y): pass metafunc = self.Metafunc(func) metafunc.parametrize("x", [1,2], ids=['basic', 'advanced']) metafunc.parametrize("y", ["abc", "def"]) ids = [x.id for x in metafunc._calls] assert ids == ["basic-abc", "basic-def", "advanced-abc", "advanced-def"] def test_parametrize_with_wrong_number_of_ids(self, testdir): def func(x, y): pass metafunc = self.Metafunc(func) pytest.raises(ValueError, lambda: metafunc.parametrize("x", [1,2], ids=['basic'])) pytest.raises(ValueError, lambda: metafunc.parametrize(("x","y"), [("abc", "def"), ("ghi", "jkl")], ids=["one"])) def test_parametrize_with_userobjects(self): def func(x, y): pass metafunc = self.Metafunc(func) class A: pass metafunc.parametrize("x", [A(), A()]) metafunc.parametrize("y", list("ab")) assert metafunc._calls[0].id == "x0-a" assert metafunc._calls[1].id == "x0-b" assert metafunc._calls[2].id == "x1-a" assert metafunc._calls[3].id == "x1-b" @pytest.mark.issue250 def test_idmaker_autoname(self): from _pytest.python import idmaker result = idmaker(("a", "b"), [("string", 1.0), ("st-ring", 2.0)]) assert result == ["string-1.0", "st-ring-2.0"] result = idmaker(("a", "b"), [(object(), 1.0), (object(), object())]) assert result == ["a0-1.0", "a1-b1"] # unicode mixing, issue250 result = idmaker((py.builtin._totext("a"), "b"), [({}, '\xc3\xb4')]) assert result == ['a0-\xc3\xb4'] def test_idmaker_native_strings(self): from _pytest.python import idmaker result = idmaker(("a", "b"), [(1.0, -1.1), (2, -202), ("three", "three hundred"), (True, False), (None, None), (list("six"), [66, 66]), (set([7]), set("seven")), (tuple("eight"), (8, -8, 8)) ]) assert result == ["1.0--1.1", "2--202", "three-three hundred", "True-False", "None-None", "a5-b5", "a6-b6", "a7-b7"] def test_addcall_and_parametrize(self): def func(x, y): pass metafunc = self.Metafunc(func) metafunc.addcall({'x': 1}) metafunc.parametrize('y', [2,3]) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == {'x': 1, 'y': 2} assert metafunc._calls[1].funcargs == {'x': 1, 'y': 3} assert metafunc._calls[0].id == "0-2" assert metafunc._calls[1].id == "0-3" def test_parametrize_indirect(self): def func(x, y): pass metafunc = self.Metafunc(func) metafunc.parametrize('x', [1], indirect=True) metafunc.parametrize('y', [2,3], indirect=True) metafunc.parametrize('unnamed', [1], indirect=True) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == {} assert metafunc._calls[1].funcargs == {} assert metafunc._calls[0].params == dict(x=1,y=2, unnamed=1) assert metafunc._calls[1].params == dict(x=1,y=3, unnamed=1) def test_addcalls_and_parametrize_indirect(self): def func(x, y): pass metafunc = self.Metafunc(func) metafunc.addcall(param="123") metafunc.parametrize('x', [1], indirect=True) metafunc.parametrize('y', [2,3], indirect=True) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == {} assert metafunc._calls[1].funcargs == {} assert metafunc._calls[0].params == dict(x=1,y=2) assert metafunc._calls[1].params == dict(x=1,y=3) def test_parametrize_functional(self, testdir): testdir.makepyfile(""" def pytest_generate_tests(metafunc): metafunc.parametrize('x', [1,2], indirect=True) metafunc.parametrize('y', [2]) def pytest_funcarg__x(request): return request.param * 10 #def pytest_funcarg__y(request): # return request.param def test_simple(x,y): assert x in (10,20) assert y == 2 """) result = testdir.runpytest("-v") result.stdout.fnmatch_lines([ "*test_simple*1-2*", "*test_simple*2-2*", "*2 passed*", ]) def test_parametrize_onearg(self): metafunc = self.Metafunc(lambda x: None) metafunc.parametrize("x", [1,2]) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == dict(x=1) assert metafunc._calls[0].id == "1" assert metafunc._calls[1].funcargs == dict(x=2) assert metafunc._calls[1].id == "2" def test_parametrize_onearg_indirect(self): metafunc = self.Metafunc(lambda x: None) metafunc.parametrize("x", [1,2], indirect=True) assert metafunc._calls[0].params == dict(x=1) assert metafunc._calls[0].id == "1" assert metafunc._calls[1].params == dict(x=2) assert metafunc._calls[1].id == "2" def test_parametrize_twoargs(self): metafunc = self.Metafunc(lambda x,y: None) metafunc.parametrize(("x", "y"), [(1,2), (3,4)]) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == dict(x=1, y=2) assert metafunc._calls[0].id == "1-2" assert metafunc._calls[1].funcargs == dict(x=3, y=4) assert metafunc._calls[1].id == "3-4" def test_parametrize_multiple_times(self, testdir): testdir.makepyfile(""" import pytest pytestmark = pytest.mark.parametrize("x", [1,2]) def test_func(x): assert 0, x class TestClass: pytestmark = pytest.mark.parametrize("y", [3,4]) def test_meth(self, x, y): assert 0, x """) result = testdir.runpytest() assert result.ret == 1 result.stdout.fnmatch_lines([ "*6 fail*", ]) def test_parametrize_CSV(self, testdir): testdir.makepyfile(""" import pytest @pytest.mark.parametrize("x, y,", [(1,2), (2,3)]) def test_func(x, y): assert x+1 == y """) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_parametrize_class_scenarios(self, testdir): testdir.makepyfile(""" # same as doc/en/example/parametrize scenario example def pytest_generate_tests(metafunc): idlist = [] argvalues = [] for scenario in metafunc.cls.scenarios: idlist.append(scenario[0]) items = scenario[1].items() argnames = [x[0] for x in items] argvalues.append(([x[1] for x in items])) metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class") class Test(object): scenarios = [['1', {'arg': {1: 2}, "arg2": "value2"}], ['2', {'arg':'value2', "arg2": "value2"}]] def test_1(self, arg, arg2): pass def test_2(self, arg2, arg): pass def test_3(self, arg, arg2): pass """) result = testdir.runpytest("-v") assert result.ret == 0 result.stdout.fnmatch_lines(""" *test_1*1* *test_2*1* *test_3*1* *test_1*2* *test_2*2* *test_3*2* *6 passed* """) class TestMetafuncFunctional: def test_attributes(self, testdir): p = testdir.makepyfile(""" # assumes that generate/provide runs in the same process import py, pytest def pytest_generate_tests(metafunc): metafunc.addcall(param=metafunc) def pytest_funcarg__metafunc(request): assert request._pyfuncitem._genid == "0" return request.param def test_function(metafunc, pytestconfig): assert metafunc.config == pytestconfig assert metafunc.module.__name__ == __name__ assert metafunc.function == test_function assert metafunc.cls is None class TestClass: def test_method(self, metafunc, pytestconfig): assert metafunc.config == pytestconfig assert metafunc.module.__name__ == __name__ if py.std.sys.version_info > (3, 0): unbound = TestClass.test_method else: unbound = TestClass.test_method.im_func # XXX actually have an unbound test function here? assert metafunc.function == unbound assert metafunc.cls == TestClass """) result = testdir.runpytest(p, "-v") result.stdout.fnmatch_lines([ "*2 passed in*", ]) def test_addcall_with_two_funcargs_generators(self, testdir): testdir.makeconftest(""" def pytest_generate_tests(metafunc): assert "arg1" in metafunc.fixturenames metafunc.addcall(funcargs=dict(arg1=1, arg2=2)) """) p = testdir.makepyfile(""" def pytest_generate_tests(metafunc): metafunc.addcall(funcargs=dict(arg1=1, arg2=1)) class TestClass: def test_myfunc(self, arg1, arg2): assert arg1 == arg2 """) result = testdir.runpytest("-v", p) result.stdout.fnmatch_lines([ "*test_myfunc*0*PASS*", "*test_myfunc*1*FAIL*", "*1 failed, 1 passed*" ]) def test_two_functions(self, testdir): p = testdir.makepyfile(""" def pytest_generate_tests(metafunc): metafunc.addcall(param=10) metafunc.addcall(param=20) def pytest_funcarg__arg1(request): return request.param def test_func1(arg1): assert arg1 == 10 def test_func2(arg1): assert arg1 in (10, 20) """) result = testdir.runpytest("-v", p) result.stdout.fnmatch_lines([ "*test_func1*0*PASS*", "*test_func1*1*FAIL*", "*test_func2*PASS*", "*1 failed, 3 passed*" ]) def test_noself_in_method(self, testdir): p = testdir.makepyfile(""" def pytest_generate_tests(metafunc): assert 'xyz' not in metafunc.fixturenames class TestHello: def test_hello(xyz): pass """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*1 pass*", ]) def test_generate_plugin_and_module(self, testdir): testdir.makeconftest(""" def pytest_generate_tests(metafunc): assert "arg1" in metafunc.fixturenames metafunc.addcall(id="world", param=(2,100)) """) p = testdir.makepyfile(""" def pytest_generate_tests(metafunc): metafunc.addcall(param=(1,1), id="hello") def pytest_funcarg__arg1(request): return request.param[0] def pytest_funcarg__arg2(request): return request.param[1] class TestClass: def test_myfunc(self, arg1, arg2): assert arg1 == arg2 """) result = testdir.runpytest("-v", p) result.stdout.fnmatch_lines([ "*test_myfunc*hello*PASS*", "*test_myfunc*world*FAIL*", "*1 failed, 1 passed*" ]) def test_generate_tests_in_class(self, testdir): p = testdir.makepyfile(""" class TestClass: def pytest_generate_tests(self, metafunc): metafunc.addcall(funcargs={'hello': 'world'}, id="hello") def test_myfunc(self, hello): assert hello == "world" """) result = testdir.runpytest("-v", p) result.stdout.fnmatch_lines([ "*test_myfunc*hello*PASS*", "*1 passed*" ]) def test_two_functions_not_same_instance(self, testdir): p = testdir.makepyfile(""" def pytest_generate_tests(metafunc): metafunc.addcall({'arg1': 10}) metafunc.addcall({'arg1': 20}) class TestClass: def test_func(self, arg1): assert not hasattr(self, 'x') self.x = 1 """) result = testdir.runpytest("-v", p) result.stdout.fnmatch_lines([ "*test_func*0*PASS*", "*test_func*1*PASS*", "*2 pass*", ]) def test_issue28_setup_method_in_generate_tests(self, testdir): p = testdir.makepyfile(""" def pytest_generate_tests(metafunc): metafunc.addcall({'arg1': 1}) class TestClass: def test_method(self, arg1): assert arg1 == self.val def setup_method(self, func): self.val = 1 """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*1 pass*", ]) def test_parametrize_functional2(self, testdir): testdir.makepyfile(""" def pytest_generate_tests(metafunc): metafunc.parametrize("arg1", [1,2]) metafunc.parametrize("arg2", [4,5]) def test_hello(arg1, arg2): assert 0, (arg1, arg2) """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*(1, 4)*", "*(1, 5)*", "*(2, 4)*", "*(2, 5)*", "*4 failed*", ]) def test_parametrize_and_inner_getfuncargvalue(self, testdir): p = testdir.makepyfile(""" def pytest_generate_tests(metafunc): metafunc.parametrize("arg1", [1], indirect=True) metafunc.parametrize("arg2", [10], indirect=True) def pytest_funcarg__arg1(request): x = request.getfuncargvalue("arg2") return x + request.param def pytest_funcarg__arg2(request): return request.param def test_func1(arg1, arg2): assert arg1 == 11 """) result = testdir.runpytest("-v", p) result.stdout.fnmatch_lines([ "*test_func1*1*PASS*", "*1 passed*" ]) def test_parametrize_on_setup_arg(self, testdir): p = testdir.makepyfile(""" def pytest_generate_tests(metafunc): assert "arg1" in metafunc.fixturenames metafunc.parametrize("arg1", [1], indirect=True) def pytest_funcarg__arg1(request): return request.param def pytest_funcarg__arg2(request, arg1): return 10 * arg1 def test_func(arg2): assert arg2 == 10 """) result = testdir.runpytest("-v", p) result.stdout.fnmatch_lines([ "*test_func*1*PASS*", "*1 passed*" ]) def test_parametrize_with_ids(self, testdir): testdir.makepyfile(""" import pytest def pytest_generate_tests(metafunc): metafunc.parametrize(("a", "b"), [(1,1), (1,2)], ids=["basic", "advanced"]) def test_function(a, b): assert a == b """) result = testdir.runpytest("-v") assert result.ret == 1 result.stdout.fnmatch_lines_random([ "*test_function*basic*PASSED", "*test_function*advanced*FAILED", ]) def test_parametrize_without_ids(self, testdir): testdir.makepyfile(""" import pytest def pytest_generate_tests(metafunc): metafunc.parametrize(("a", "b"), [(1,object()), (1.3,object())]) def test_function(a, b): assert 1 """) result = testdir.runpytest("-v") result.stdout.fnmatch_lines(""" *test_function*1-b0* *test_function*1.3-b1* """) @pytest.mark.parametrize(("scope", "length"), [("module", 2), ("function", 4)]) def test_parametrize_scope_overrides(self, testdir, scope, length): testdir.makepyfile(""" import pytest l = [] def pytest_generate_tests(metafunc): if "arg" in metafunc.funcargnames: metafunc.parametrize("arg", [1,2], indirect=True, scope=%r) def pytest_funcarg__arg(request): l.append(request.param) return request.param def test_hello(arg): assert arg in (1,2) def test_world(arg): assert arg in (1,2) def test_checklength(): assert len(l) == %d """ % (scope, length)) reprec = testdir.inline_run() reprec.assertoutcome(passed=5) def test_parametrize_issue323(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(scope='module', params=range(966)) def foo(request): return request.param def test_it(foo): pass def test_it2(foo): pass """) reprec = testdir.inline_run("--collect-only") assert not reprec.getcalls("pytest_internalerror") def test_usefixtures_seen_in_generate_tests(self, testdir): testdir.makepyfile(""" import pytest def pytest_generate_tests(metafunc): assert "abc" in metafunc.fixturenames metafunc.parametrize("abc", [1]) @pytest.mark.usefixtures("abc") def test_function(): pass """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_generate_tests_only_done_in_subdir(self, testdir): sub1 = testdir.mkpydir("sub1") sub2 = testdir.mkpydir("sub2") sub1.join("conftest.py").write(py.code.Source(""" def pytest_generate_tests(metafunc): assert metafunc.function.__name__ == "test_1" """)) sub2.join("conftest.py").write(py.code.Source(""" def pytest_generate_tests(metafunc): assert metafunc.function.__name__ == "test_2" """)) sub1.join("test_in_sub1.py").write("def test_1(): pass") sub2.join("test_in_sub2.py").write("def test_2(): pass") result = testdir.runpytest("-v", "-s", sub1, sub2, sub1) result.stdout.fnmatch_lines([ "*3 passed*" ]) def test_generate_same_function_names_issue403(self, testdir): testdir.makepyfile(""" import pytest def make_tests(): @pytest.mark.parametrize("x", range(2)) def test_foo(x): pass return test_foo test_x = make_tests() test_y = make_tests() """) reprec = testdir.inline_run() reprec.assertoutcome(passed=4) class TestMarkersWithParametrization: pytestmark = pytest.mark.issue308 def test_simple_mark(self, testdir): s = """ import pytest @pytest.mark.foo @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.bar((1, 3)), (2, 3), ]) def test_increment(n, expected): assert n + 1 == expected """ items = testdir.getitems(s) assert len(items) == 3 for item in items: assert 'foo' in item.keywords assert 'bar' not in items[0].keywords assert 'bar' in items[1].keywords assert 'bar' not in items[2].keywords def test_select_based_on_mark(self, testdir): s = """ import pytest @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.foo((2, 3)), (3, 4), ]) def test_increment(n, expected): assert n + 1 == expected """ testdir.makepyfile(s) rec = testdir.inline_run("-m", 'foo') passed, skipped, fail = rec.listoutcomes() assert len(passed) == 1 assert len(skipped) == 0 assert len(fail) == 0 @pytest.mark.xfail(reason="is this important to support??") def test_nested_marks(self, testdir): s = """ import pytest mastermark = pytest.mark.foo(pytest.mark.bar) @pytest.mark.parametrize(("n", "expected"), [ (1, 2), mastermark((1, 3)), (2, 3), ]) def test_increment(n, expected): assert n + 1 == expected """ items = testdir.getitems(s) assert len(items) == 3 for mark in ['foo', 'bar']: assert mark not in items[0].keywords assert mark in items[1].keywords assert mark not in items[2].keywords def test_simple_xfail(self, testdir): s = """ import pytest @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.xfail((1, 3)), (2, 3), ]) def test_increment(n, expected): assert n + 1 == expected """ testdir.makepyfile(s) reprec = testdir.inline_run() # xfail is skip?? reprec.assertoutcome(passed=2, skipped=1) def test_simple_xfail_single_argname(self, testdir): s = """ import pytest @pytest.mark.parametrize("n", [ 2, pytest.mark.xfail(3), 4, ]) def test_isEven(n): assert n % 2 == 0 """ testdir.makepyfile(s) reprec = testdir.inline_run() reprec.assertoutcome(passed=2, skipped=1) def test_xfail_with_arg(self, testdir): s = """ import pytest @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.xfail("True")((1, 3)), (2, 3), ]) def test_increment(n, expected): assert n + 1 == expected """ testdir.makepyfile(s) reprec = testdir.inline_run() reprec.assertoutcome(passed=2, skipped=1) def test_xfail_with_kwarg(self, testdir): s = """ import pytest @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.xfail(reason="some bug")((1, 3)), (2, 3), ]) def test_increment(n, expected): assert n + 1 == expected """ testdir.makepyfile(s) reprec = testdir.inline_run() reprec.assertoutcome(passed=2, skipped=1) def test_xfail_with_arg_and_kwarg(self, testdir): s = """ import pytest @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.xfail("True", reason="some bug")((1, 3)), (2, 3), ]) def test_increment(n, expected): assert n + 1 == expected """ testdir.makepyfile(s) reprec = testdir.inline_run() reprec.assertoutcome(passed=2, skipped=1) def test_xfail_passing_is_xpass(self, testdir): s = """ import pytest @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.xfail("sys.version > 0", reason="some bug")((2, 3)), (3, 4), ]) def test_increment(n, expected): assert n + 1 == expected """ testdir.makepyfile(s) reprec = testdir.inline_run() # xpass is fail, obviously :) reprec.assertoutcome(passed=2, failed=1) def test_parametrize_called_in_generate_tests(self, testdir): s = """ import pytest def pytest_generate_tests(metafunc): passingTestData = [(1, 2), (2, 3)] failingTestData = [(1, 3), (2, 2)] testData = passingTestData + [pytest.mark.xfail(d) for d in failingTestData] metafunc.parametrize(("n", "expected"), testData) def test_increment(n, expected): assert n + 1 == expected """ testdir.makepyfile(s) reprec = testdir.inline_run() reprec.assertoutcome(passed=2, skipped=2) @pytest.mark.issue290 def test_parametrize_ID_generation_string_int_works(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture def myfixture(): return 'example' @pytest.mark.parametrize( 'limit', (0, '0')) def test_limit(limit, myfixture): return """) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) pytest-2.5.1/testing/test_argcomplete.py0000664000175000017500000000731312254002202020020 0ustar hpkhpk00000000000000from __future__ import with_statement import py, pytest # test for _argcomplete but not specific for any application def equal_with_bash(prefix, ffc, fc, out=None): res = ffc(prefix) res_bash = set(fc(prefix)) retval = set(res) == res_bash if out: out.write('equal_with_bash %s %s\n' % (retval, res)) if not retval: out.write(' python - bash: %s\n' % (set(res) - res_bash)) out.write(' bash - python: %s\n' % (res_bash - set(res))) return retval # copied from argcomplete.completers as import from there # also pulls in argcomplete.__init__ which opens filedescriptor 9 # this gives an IOError at the end of testrun def _wrapcall(*args, **kargs): try: if py.std.sys.version_info > (2,7): return py.std.subprocess.check_output(*args,**kargs).decode().splitlines() if 'stdout' in kargs: raise ValueError('stdout argument not allowed, it will be overridden.') process = py.std.subprocess.Popen( stdout=py.std.subprocess.PIPE, *args, **kargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kargs.get("args") if cmd is None: cmd = args[0] raise py.std.subprocess.CalledProcessError(retcode, cmd) return output.decode().splitlines() except py.std.subprocess.CalledProcessError: return [] class FilesCompleter(object): 'File completer class, optionally takes a list of allowed extensions' def __init__(self,allowednames=(),directories=True): # Fix if someone passes in a string instead of a list if type(allowednames) is str: allowednames = [allowednames] self.allowednames = [x.lstrip('*').lstrip('.') for x in allowednames] self.directories = directories def __call__(self, prefix, **kwargs): completion = [] if self.allowednames: if self.directories: files = _wrapcall(['bash','-c', "compgen -A directory -- '{p}'".format(p=prefix)]) completion += [ f + '/' for f in files] for x in self.allowednames: completion += _wrapcall(['bash', '-c', "compgen -A file -X '!*.{0}' -- '{p}'".format(x,p=prefix)]) else: completion += _wrapcall(['bash', '-c', "compgen -A file -- '{p}'".format(p=prefix)]) anticomp = _wrapcall(['bash', '-c', "compgen -A directory -- '{p}'".format(p=prefix)]) completion = list( set(completion) - set(anticomp)) if self.directories: completion += [f + '/' for f in anticomp] return completion # the following barfs with a syntax error on py2.5 # @pytest.mark.skipif("sys.version_info < (2,6)") class TestArgComplete: @pytest.mark.skipif("sys.platform in ('win32', 'darwin')") @pytest.mark.skipif("sys.version_info < (2,6)") def test_compare_with_compgen(self): from _pytest._argcomplete import FastFilesCompleter ffc = FastFilesCompleter() fc = FilesCompleter() for x in '/ /d /data qqq'.split(): assert equal_with_bash(x, ffc, fc, out=py.std.sys.stdout) @pytest.mark.skipif("sys.platform in ('win32', 'darwin')") @pytest.mark.skipif("sys.version_info < (2,6)") def test_remove_dir_prefix(self): """this is not compatible with compgen but it is with bash itself: ls /usr/ """ from _pytest._argcomplete import FastFilesCompleter ffc = FastFilesCompleter() fc = FilesCompleter() for x in '/usr/'.split(): assert not equal_with_bash(x, ffc, fc, out=py.std.sys.stdout) pytest-2.5.1/testing/test_mark.py0000664000175000017500000004311512254002202016450 0ustar hpkhpk00000000000000import py, pytest from _pytest.mark import MarkGenerator as Mark class TestMark: def test_markinfo_repr(self): from _pytest.mark import MarkInfo m = MarkInfo("hello", (1,2), {}) repr(m) def test_pytest_exists_in_namespace_all(self): assert 'mark' in py.test.__all__ assert 'mark' in pytest.__all__ def test_pytest_mark_notcallable(self): mark = Mark() pytest.raises((AttributeError, TypeError), mark) def test_pytest_mark_bare(self): mark = Mark() def f(): pass mark.hello(f) assert f.hello def test_pytest_mark_keywords(self): mark = Mark() def f(): pass mark.world(x=3, y=4)(f) assert f.world assert f.world.kwargs['x'] == 3 assert f.world.kwargs['y'] == 4 def test_apply_multiple_and_merge(self): mark = Mark() def f(): pass mark.world mark.world(x=3)(f) assert f.world.kwargs['x'] == 3 mark.world(y=4)(f) assert f.world.kwargs['x'] == 3 assert f.world.kwargs['y'] == 4 mark.world(y=1)(f) assert f.world.kwargs['y'] == 1 assert len(f.world.args) == 0 def test_pytest_mark_positional(self): mark = Mark() def f(): pass mark.world("hello")(f) assert f.world.args[0] == "hello" mark.world("world")(f) def test_pytest_mark_reuse(self): mark = Mark() def f(): pass w = mark.some w("hello", reason="123")(f) assert f.some.args[0] == "hello" assert f.some.kwargs['reason'] == "123" def g(): pass w("world", reason2="456")(g) assert g.some.args[0] == "world" assert 'reason' not in g.some.kwargs assert g.some.kwargs['reason2'] == "456" def test_ini_markers(testdir): testdir.makeini(""" [pytest] markers = a1: this is a webtest marker a2: this is a smoke marker """) testdir.makepyfile(""" def test_markers(pytestconfig): markers = pytestconfig.getini("markers") print (markers) assert len(markers) >= 2 assert markers[0].startswith("a1:") assert markers[1].startswith("a2:") """) rec = testdir.inline_run() rec.assertoutcome(passed=1) def test_markers_option(testdir): testdir.makeini(""" [pytest] markers = a1: this is a webtest marker a1some: another marker """) result = testdir.runpytest("--markers", ) result.stdout.fnmatch_lines([ "*a1*this is a webtest*", "*a1some*another marker", ]) def test_mark_on_pseudo_function(testdir): testdir.makepyfile(""" import pytest @pytest.mark.r(lambda x: 0/0) def test_hello(): pass """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_strict_prohibits_unregistered_markers(testdir): testdir.makepyfile(""" import pytest @pytest.mark.unregisteredmark def test_hello(): pass """) result = testdir.runpytest("--strict") assert result.ret != 0 result.stdout.fnmatch_lines([ "*unregisteredmark*not*registered*", ]) @pytest.mark.parametrize("spec", [ ("xyz", ("test_one",)), ("xyz and xyz2", ()), ("xyz2", ("test_two",)), ("xyz or xyz2", ("test_one", "test_two"),) ]) def test_mark_option(spec, testdir): testdir.makepyfile(""" import pytest @pytest.mark.xyz def test_one(): pass @pytest.mark.xyz2 def test_two(): pass """) opt, passed_result = spec rec = testdir.inline_run("-m", opt) passed, skipped, fail = rec.listoutcomes() passed = [x.nodeid.split("::")[-1] for x in passed] assert len(passed) == len(passed_result) assert list(passed) == list(passed_result) @pytest.mark.parametrize("spec", [ ("interface", ("test_interface",)), ("not interface", ("test_nointer",)), ]) def test_mark_option_custom(spec, testdir): testdir.makeconftest(""" import pytest def pytest_collection_modifyitems(items): for item in items: if "interface" in item.nodeid: item.keywords["interface"] = pytest.mark.interface """) testdir.makepyfile(""" def test_interface(): pass def test_nointer(): pass """) opt, passed_result = spec rec = testdir.inline_run("-m", opt) passed, skipped, fail = rec.listoutcomes() passed = [x.nodeid.split("::")[-1] for x in passed] assert len(passed) == len(passed_result) assert list(passed) == list(passed_result) @pytest.mark.parametrize("spec", [ ("interface", ("test_interface",)), ("not interface", ("test_nointer", "test_pass")), ("pass", ("test_pass",)), ("not pass", ("test_interface", "test_nointer")), ]) def test_keyword_option_custom(spec, testdir): testdir.makepyfile(""" def test_interface(): pass def test_nointer(): pass def test_pass(): pass """) opt, passed_result = spec rec = testdir.inline_run("-k", opt) passed, skipped, fail = rec.listoutcomes() passed = [x.nodeid.split("::")[-1] for x in passed] assert len(passed) == len(passed_result) assert list(passed) == list(passed_result) @pytest.mark.parametrize("spec", [ ("None", ("test_func[None]",)), ("1.3", ("test_func[1.3]",)) ]) def test_keyword_option_parametrize(spec, testdir): testdir.makepyfile(""" import pytest @pytest.mark.parametrize("arg", [None, 1.3]) def test_func(arg): pass """) opt, passed_result = spec rec = testdir.inline_run("-k", opt) passed, skipped, fail = rec.listoutcomes() passed = [x.nodeid.split("::")[-1] for x in passed] assert len(passed) == len(passed_result) assert list(passed) == list(passed_result) class TestFunctional: def test_mark_per_function(self, testdir): p = testdir.makepyfile(""" import pytest @pytest.mark.hello def test_hello(): assert hasattr(test_hello, 'hello') """) result = testdir.runpytest(p) result.stdout.fnmatch_lines(["*1 passed*"]) def test_mark_per_module(self, testdir): item = testdir.getitem(""" import pytest pytestmark = pytest.mark.hello def test_func(): pass """) keywords = item.keywords assert 'hello' in keywords def test_marklist_per_class(self, testdir): item = testdir.getitem(""" import pytest class TestClass: pytestmark = [pytest.mark.hello, pytest.mark.world] def test_func(self): assert TestClass.test_func.hello assert TestClass.test_func.world """) keywords = item.keywords assert 'hello' in keywords def test_marklist_per_module(self, testdir): item = testdir.getitem(""" import pytest pytestmark = [pytest.mark.hello, pytest.mark.world] class TestClass: def test_func(self): assert TestClass.test_func.hello assert TestClass.test_func.world """) keywords = item.keywords assert 'hello' in keywords assert 'world' in keywords @pytest.mark.skipif("sys.version_info < (2,6)") def test_mark_per_class_decorator(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.hello class TestClass: def test_func(self): assert TestClass.test_func.hello """) keywords = item.keywords assert 'hello' in keywords @pytest.mark.skipif("sys.version_info < (2,6)") def test_mark_per_class_decorator_plus_existing_dec(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.hello class TestClass: pytestmark = pytest.mark.world def test_func(self): assert TestClass.test_func.hello assert TestClass.test_func.world """) keywords = item.keywords assert 'hello' in keywords assert 'world' in keywords def test_merging_markers(self, testdir): p = testdir.makepyfile(""" import pytest pytestmark = pytest.mark.hello("pos1", x=1, y=2) class TestClass: # classlevel overrides module level pytestmark = pytest.mark.hello(x=3) @pytest.mark.hello("pos0", z=4) def test_func(self): pass """) items, rec = testdir.inline_genitems(p) item, = items keywords = item.keywords marker = keywords['hello'] assert marker.args == ("pos0", "pos1") assert marker.kwargs == {'x': 1, 'y': 2, 'z': 4} # test the new __iter__ interface l = list(marker) assert len(l) == 3 assert l[0].args == ("pos0",) assert l[1].args == () assert l[2].args == ("pos1", ) @pytest.mark.xfail(reason='unfixed') def test_merging_markers_deep(self, testdir): # issue 199 - propagate markers into nested classes p = testdir.makepyfile(""" import pytest class TestA: pytestmark = pytest.mark.a def test_b(self): assert True class TestC: # this one didnt get marked def test_d(self): assert True """) items, rec = testdir.inline_genitems(p) for item in items: print (item, item.keywords) assert 'a' in item.keywords def test_mark_with_wrong_marker(self, testdir): reprec = testdir.inline_runsource(""" import pytest class pytestmark: pass def test_func(): pass """) l = reprec.getfailedcollections() assert len(l) == 1 assert "TypeError" in str(l[0].longrepr) def test_mark_dynamically_in_funcarg(self, testdir): testdir.makeconftest(""" import pytest def pytest_funcarg__arg(request): request.applymarker(pytest.mark.hello) def pytest_terminal_summary(terminalreporter): l = terminalreporter.stats['passed'] terminalreporter.writer.line("keyword: %s" % l[0].keywords) """) testdir.makepyfile(""" def test_func(arg): pass """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "keyword: *hello*" ]) def test_merging_markers_two_functions(self, testdir): p = testdir.makepyfile(""" import pytest @pytest.mark.hello("pos1", z=4) @pytest.mark.hello("pos0", z=3) def test_func(): pass """) items, rec = testdir.inline_genitems(p) item, = items keywords = item.keywords marker = keywords['hello'] l = list(marker) assert len(l) == 2 assert l[0].args == ("pos0",) assert l[1].args == ("pos1",) def test_no_marker_match_on_unmarked_names(self, testdir): p = testdir.makepyfile(""" import pytest @pytest.mark.shouldmatch def test_marked(): assert 1 def test_unmarked(): assert 1 """) reprec = testdir.inline_run("-m", "test_unmarked", p) passed, skipped, failed = reprec.listoutcomes() assert len(passed) + len(skipped) + len(failed) == 0 dlist = reprec.getcalls("pytest_deselected") deselected_tests = dlist[0].items assert len(deselected_tests) == 2 def test_keywords_at_node_level(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(scope="session", autouse=True) def some(request): request.keywords["hello"] = 42 assert "world" not in request.keywords @pytest.fixture(scope="function", autouse=True) def funcsetup(request): assert "world" in request.keywords assert "hello" in request.keywords @pytest.mark.world def test_function(): pass """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_keyword_added_for_session(self, testdir): testdir.makeconftest(""" import pytest def pytest_collection_modifyitems(session): session.add_marker("mark1") session.add_marker(pytest.mark.mark2) session.add_marker(pytest.mark.mark3) pytest.raises(ValueError, lambda: session.add_marker(10)) """) testdir.makepyfile(""" def test_some(request): assert "mark1" in request.keywords assert "mark2" in request.keywords assert "mark3" in request.keywords assert 10 not in request.keywords marker = request.node.get_marker("mark1") assert marker.name == "mark1" assert marker.args == () assert marker.kwargs == {} """) reprec = testdir.inline_run("-m", "mark1") reprec.assertoutcome(passed=1) class TestKeywordSelection: def test_select_simple(self, testdir): file_test = testdir.makepyfile(""" def test_one(): assert 0 class TestClass(object): def test_method_one(self): assert 42 == 43 """) def check(keyword, name): reprec = testdir.inline_run("-s", "-k", keyword, file_test) passed, skipped, failed = reprec.listoutcomes() assert len(failed) == 1 assert failed[0].nodeid.split("::")[-1] == name assert len(reprec.getcalls('pytest_deselected')) == 1 for keyword in ['test_one', 'est_on']: check(keyword, 'test_one') check('TestClass and test', 'test_method_one') @pytest.mark.parametrize("keyword", [ 'xxx', 'xxx and test_2', 'TestClass', 'xxx and -test_1', 'TestClass and test_2', 'xxx and TestClass and test_2']) def test_select_extra_keywords(self, testdir, keyword): p = testdir.makepyfile(test_select=""" def test_1(): pass class TestClass: def test_2(self): pass """) testdir.makepyfile(conftest=""" def pytest_pycollect_makeitem(__multicall__, name): if name == "TestClass": item = __multicall__.execute() item.extra_keyword_matches.add("xxx") return item """) reprec = testdir.inline_run(p.dirpath(), '-s', '-k', keyword) py.builtin.print_("keyword", repr(keyword)) passed, skipped, failed = reprec.listoutcomes() assert len(passed) == 1 assert passed[0].nodeid.endswith("test_2") dlist = reprec.getcalls("pytest_deselected") assert len(dlist) == 1 assert dlist[0].items[0].name == 'test_1' def test_select_starton(self, testdir): threepass = testdir.makepyfile(test_threepass=""" def test_one(): assert 1 def test_two(): assert 1 def test_three(): assert 1 """) reprec = testdir.inline_run("-k", "test_two:", threepass) passed, skipped, failed = reprec.listoutcomes() assert len(passed) == 2 assert not failed dlist = reprec.getcalls("pytest_deselected") assert len(dlist) == 1 item = dlist[0].items[0] assert item.name == "test_one" def test_keyword_extra(self, testdir): p = testdir.makepyfile(""" def test_one(): assert 0 test_one.mykeyword = True """) reprec = testdir.inline_run("-k", "mykeyword", p) passed, skipped, failed = reprec.countoutcomes() assert failed == 1 @pytest.mark.xfail def test_keyword_extra_dash(self, testdir): p = testdir.makepyfile(""" def test_one(): assert 0 test_one.mykeyword = True """) # with argparse the argument to an option cannot # start with '-' reprec = testdir.inline_run("-k", "-mykeyword", p) passed, skipped, failed = reprec.countoutcomes() assert passed + skipped + failed == 0 def test_no_magic_values(self, testdir): """Make sure the tests do not match on magic values, no double underscored values, like '__dict__', and no instance values, like '()'. """ p = testdir.makepyfile(""" def test_one(): assert 1 """) def assert_test_is_not_selected(keyword): reprec = testdir.inline_run("-k", keyword, p) passed, skipped, failed = reprec.countoutcomes() dlist = reprec.getcalls("pytest_deselected") assert passed + skipped + failed == 0 deselected_tests = dlist[0].items assert len(deselected_tests) == 1 assert_test_is_not_selected("__") assert_test_is_not_selected("()") pytest-2.5.1/testing/test_monkeypatch.py0000664000175000017500000001634412254002202020044 0ustar hpkhpk00000000000000import os, sys import pytest from _pytest.monkeypatch import monkeypatch as MonkeyPatch def pytest_funcarg__mp(request): cwd = os.getcwd() sys_path = list(sys.path) def cleanup(): sys.path[:] = sys_path os.chdir(cwd) request.addfinalizer(cleanup) return MonkeyPatch() def test_setattr(): class A: x = 1 monkeypatch = MonkeyPatch() pytest.raises(AttributeError, "monkeypatch.setattr(A, 'notexists', 2)") monkeypatch.setattr(A, 'y', 2, raising=False) assert A.y == 2 monkeypatch.undo() assert not hasattr(A, 'y') monkeypatch = MonkeyPatch() monkeypatch.setattr(A, 'x', 2) assert A.x == 2 monkeypatch.setattr(A, 'x', 3) assert A.x == 3 monkeypatch.undo() assert A.x == 1 A.x = 5 monkeypatch.undo() # double-undo makes no modification assert A.x == 5 class TestSetattrWithImportPath: def test_string_expression(self, monkeypatch): monkeypatch.setattr("os.path.abspath", lambda x: "hello2") assert os.path.abspath("123") == "hello2" def test_string_expression_class(self, monkeypatch): monkeypatch.setattr("_pytest.config.Config", 42) import _pytest assert _pytest.config.Config == 42 def test_unicode_string(self, monkeypatch): monkeypatch.setattr("_pytest.config.Config", 42) import _pytest assert _pytest.config.Config == 42 monkeypatch.delattr("_pytest.config.Config") def test_wrong_target(self, monkeypatch): pytest.raises(TypeError, lambda: monkeypatch.setattr(None, None)) def test_unknown_import(self, monkeypatch): pytest.raises(pytest.fail.Exception, lambda: monkeypatch.setattr("unkn123.classx", None)) def test_unknown_attr(self, monkeypatch): pytest.raises(pytest.fail.Exception, lambda: monkeypatch.setattr("os.path.qweqwe", None)) def test_delattr(self, monkeypatch): monkeypatch.delattr("os.path.abspath") assert not hasattr(os.path, "abspath") monkeypatch.undo() assert os.path.abspath def test_delattr(): class A: x = 1 monkeypatch = MonkeyPatch() monkeypatch.delattr(A, 'x') assert not hasattr(A, 'x') monkeypatch.undo() assert A.x == 1 monkeypatch = MonkeyPatch() monkeypatch.delattr(A, 'x') pytest.raises(AttributeError, "monkeypatch.delattr(A, 'y')") monkeypatch.delattr(A, 'y', raising=False) monkeypatch.setattr(A, 'x', 5, raising=False) assert A.x == 5 monkeypatch.undo() assert A.x == 1 def test_setitem(): d = {'x': 1} monkeypatch = MonkeyPatch() monkeypatch.setitem(d, 'x', 2) monkeypatch.setitem(d, 'y', 1700) monkeypatch.setitem(d, 'y', 1700) assert d['x'] == 2 assert d['y'] == 1700 monkeypatch.setitem(d, 'x', 3) assert d['x'] == 3 monkeypatch.undo() assert d['x'] == 1 assert 'y' not in d d['x'] = 5 monkeypatch.undo() assert d['x'] == 5 def test_setitem_deleted_meanwhile(): d = {} monkeypatch = MonkeyPatch() monkeypatch.setitem(d, 'x', 2) del d['x'] monkeypatch.undo() assert not d @pytest.mark.parametrize("before", [True, False]) def test_setenv_deleted_meanwhile(before): key = "qwpeoip123" if before: os.environ[key] = "world" monkeypatch = MonkeyPatch() monkeypatch.setenv(key, 'hello') del os.environ[key] monkeypatch.undo() if before: assert os.environ[key] == "world" del os.environ[key] else: assert key not in os.environ def test_delitem(): d = {'x': 1} monkeypatch = MonkeyPatch() monkeypatch.delitem(d, 'x') assert 'x' not in d monkeypatch.delitem(d, 'y', raising=False) pytest.raises(KeyError, "monkeypatch.delitem(d, 'y')") assert not d monkeypatch.setitem(d, 'y', 1700) assert d['y'] == 1700 d['hello'] = 'world' monkeypatch.setitem(d, 'x', 1500) assert d['x'] == 1500 monkeypatch.undo() assert d == {'hello': 'world', 'x': 1} def test_setenv(): monkeypatch = MonkeyPatch() monkeypatch.setenv('XYZ123', 2) import os assert os.environ['XYZ123'] == "2" monkeypatch.undo() assert 'XYZ123' not in os.environ def test_delenv(): name = 'xyz1234' assert name not in os.environ monkeypatch = MonkeyPatch() pytest.raises(KeyError, "monkeypatch.delenv(%r, raising=True)" % name) monkeypatch.delenv(name, raising=False) monkeypatch.undo() os.environ[name] = "1" try: monkeypatch = MonkeyPatch() monkeypatch.delenv(name) assert name not in os.environ monkeypatch.setenv(name, "3") assert os.environ[name] == "3" monkeypatch.undo() assert os.environ[name] == "1" finally: if name in os.environ: del os.environ[name] def test_setenv_prepend(): import os monkeypatch = MonkeyPatch() monkeypatch.setenv('XYZ123', 2, prepend="-") assert os.environ['XYZ123'] == "2" monkeypatch.setenv('XYZ123', 3, prepend="-") assert os.environ['XYZ123'] == "3-2" monkeypatch.undo() assert 'XYZ123' not in os.environ def test_monkeypatch_plugin(testdir): reprec = testdir.inline_runsource(""" def test_method(monkeypatch): assert monkeypatch.__class__.__name__ == "monkeypatch" """) res = reprec.countoutcomes() assert tuple(res) == (1, 0, 0), res def test_syspath_prepend(mp): old = list(sys.path) mp.syspath_prepend('world') mp.syspath_prepend('hello') assert sys.path[0] == "hello" assert sys.path[1] == "world" mp.undo() assert sys.path == old mp.undo() assert sys.path == old def test_syspath_prepend_double_undo(mp): mp.syspath_prepend('hello world') mp.undo() sys.path.append('more hello world') mp.undo() assert sys.path[-1] == 'more hello world' def test_chdir_with_path_local(mp, tmpdir): mp.chdir(tmpdir) assert os.getcwd() == tmpdir.strpath def test_chdir_with_str(mp, tmpdir): mp.chdir(tmpdir.strpath) assert os.getcwd() == tmpdir.strpath def test_chdir_undo(mp, tmpdir): cwd = os.getcwd() mp.chdir(tmpdir) mp.undo() assert os.getcwd() == cwd def test_chdir_double_undo(mp, tmpdir): mp.chdir(tmpdir.strpath) mp.undo() tmpdir.chdir() mp.undo() assert os.getcwd() == tmpdir.strpath def test_issue185_time_breaks(testdir): testdir.makepyfile(""" import time def test_m(monkeypatch): def f(): raise Exception monkeypatch.setattr(time, "time", f) """) result = testdir.runpytest() result.stdout.fnmatch_lines(""" *1 passed* """) class SampleNew(object): @staticmethod def hello(): return True class SampleNewInherit(SampleNew): pass class SampleOld: #oldstyle on python2 @staticmethod def hello(): return True class SampleOldInherit(SampleOld): pass @pytest.mark.parametrize('Sample', [ SampleNew, SampleNewInherit, SampleOld, SampleOldInherit, ], ids=['new', 'new-inherit', 'old', 'old-inherit']) def test_issue156_undo_staticmethod(Sample): monkeypatch = MonkeyPatch() monkeypatch.setattr(Sample, 'hello', None) assert Sample.hello is None monkeypatch.undo() assert Sample.hello() pytest-2.5.1/testing/test_core.py0000664000175000017500000005236712254002202016457 0ustar hpkhpk00000000000000import pytest, py, os from _pytest.core import * # noqa from _pytest.config import get_plugin_manager class TestBootstrapping: def test_consider_env_fails_to_import(self, monkeypatch): pluginmanager = PluginManager() monkeypatch.setenv('PYTEST_PLUGINS', 'nonexisting', prepend=",") pytest.raises(ImportError, lambda: pluginmanager.consider_env()) def test_preparse_args(self): pluginmanager = PluginManager() pytest.raises(ImportError, lambda: pluginmanager.consider_preparse(["xyz", "-p", "hello123"])) def test_plugin_prevent_register(self): pluginmanager = PluginManager() pluginmanager.consider_preparse(["xyz", "-p", "no:abc"]) l1 = pluginmanager.getplugins() pluginmanager.register(42, name="abc") l2 = pluginmanager.getplugins() assert len(l2) == len(l1) def test_plugin_prevent_register_unregistered_alredy_registered(self): pluginmanager = PluginManager() pluginmanager.register(42, name="abc") l1 = pluginmanager.getplugins() assert 42 in l1 pluginmanager.consider_preparse(["xyz", "-p", "no:abc"]) l2 = pluginmanager.getplugins() assert 42 not in l2 def test_plugin_double_register(self): pm = PluginManager() pm.register(42, name="abc") pytest.raises(ValueError, lambda: pm.register(42, name="abc")) def test_plugin_skip(self, testdir, monkeypatch): p = testdir.makepyfile(skipping1=""" import pytest pytest.skip("hello") """) p.copy(p.dirpath("skipping2.py")) monkeypatch.setenv("PYTEST_PLUGINS", "skipping2") result = testdir.runpytest("-p", "skipping1", "--traceconfig") assert result.ret == 0 result.stdout.fnmatch_lines([ "*hint*skipping1*hello*", "*hint*skipping2*hello*", ]) def test_consider_env_plugin_instantiation(self, testdir, monkeypatch): pluginmanager = PluginManager() testdir.syspathinsert() testdir.makepyfile(xy123="#") monkeypatch.setitem(os.environ, 'PYTEST_PLUGINS', 'xy123') l1 = len(pluginmanager.getplugins()) pluginmanager.consider_env() l2 = len(pluginmanager.getplugins()) assert l2 == l1 + 1 assert pluginmanager.getplugin('xy123') pluginmanager.consider_env() l3 = len(pluginmanager.getplugins()) assert l2 == l3 def test_consider_setuptools_instantiation(self, monkeypatch): pkg_resources = py.test.importorskip("pkg_resources") def my_iter(name): assert name == "pytest11" class EntryPoint: name = "pytest_mytestplugin" dist = None def load(self): class PseudoPlugin: x = 42 return PseudoPlugin() return iter([EntryPoint()]) monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter) pluginmanager = PluginManager() pluginmanager.consider_setuptools_entrypoints() plugin = pluginmanager.getplugin("mytestplugin") assert plugin.x == 42 def test_consider_setuptools_not_installed(self, monkeypatch): monkeypatch.setitem(py.std.sys.modules, 'pkg_resources', py.std.types.ModuleType("pkg_resources")) pluginmanager = PluginManager() pluginmanager.consider_setuptools_entrypoints() # ok, we did not explode def test_pluginmanager_ENV_startup(self, testdir, monkeypatch): testdir.makepyfile(pytest_x500="#") p = testdir.makepyfile(""" import pytest def test_hello(pytestconfig): plugin = pytestconfig.pluginmanager.getplugin('pytest_x500') assert plugin is not None """) monkeypatch.setenv('PYTEST_PLUGINS', 'pytest_x500', prepend=",") result = testdir.runpytest(p) assert result.ret == 0 result.stdout.fnmatch_lines(["*1 passed in*"]) def test_import_plugin_importname(self, testdir): pluginmanager = PluginManager() pytest.raises(ImportError, 'pluginmanager.import_plugin("qweqwex.y")') pytest.raises(ImportError, 'pluginmanager.import_plugin("pytest_qweqwx.y")') testdir.syspathinsert() pluginname = "pytest_hello" testdir.makepyfile(**{pluginname: ""}) pluginmanager.import_plugin("pytest_hello") len1 = len(pluginmanager.getplugins()) pluginmanager.import_plugin("pytest_hello") len2 = len(pluginmanager.getplugins()) assert len1 == len2 plugin1 = pluginmanager.getplugin("pytest_hello") assert plugin1.__name__.endswith('pytest_hello') plugin2 = pluginmanager.getplugin("pytest_hello") assert plugin2 is plugin1 def test_import_plugin_dotted_name(self, testdir): pluginmanager = PluginManager() pytest.raises(ImportError, 'pluginmanager.import_plugin("qweqwex.y")') pytest.raises(ImportError, 'pluginmanager.import_plugin("pytest_qweqwex.y")') testdir.syspathinsert() testdir.mkpydir("pkg").join("plug.py").write("x=3") pluginname = "pkg.plug" pluginmanager.import_plugin(pluginname) mod = pluginmanager.getplugin("pkg.plug") assert mod.x == 3 def test_consider_module(self, testdir): pluginmanager = PluginManager() testdir.syspathinsert() testdir.makepyfile(pytest_p1="#") testdir.makepyfile(pytest_p2="#") mod = py.std.types.ModuleType("temp") mod.pytest_plugins = ["pytest_p1", "pytest_p2"] pluginmanager.consider_module(mod) assert pluginmanager.getplugin("pytest_p1").__name__ == "pytest_p1" assert pluginmanager.getplugin("pytest_p2").__name__ == "pytest_p2" def test_consider_module_import_module(self, testdir): mod = py.std.types.ModuleType("x") mod.pytest_plugins = "pytest_a" aplugin = testdir.makepyfile(pytest_a="#") pluginmanager = get_plugin_manager() reprec = testdir.getreportrecorder(pluginmanager) #syspath.prepend(aplugin.dirpath()) py.std.sys.path.insert(0, str(aplugin.dirpath())) pluginmanager.consider_module(mod) call = reprec.getcall(pluginmanager.hook.pytest_plugin_registered.name) assert call.plugin.__name__ == "pytest_a" # check that it is not registered twice pluginmanager.consider_module(mod) l = reprec.getcalls("pytest_plugin_registered") assert len(l) == 1 def test_config_sets_conftesthandle_onimport(self, testdir): config = testdir.parseconfig([]) assert config._conftest._onimport == config._onimportconftest def test_consider_conftest_deps(self, testdir): mod = testdir.makepyfile("pytest_plugins='xyz'").pyimport() pp = PluginManager() pytest.raises(ImportError, lambda: pp.consider_conftest(mod)) def test_pm(self): pp = PluginManager() class A: pass a1, a2 = A(), A() pp.register(a1) assert pp.isregistered(a1) pp.register(a2, "hello") assert pp.isregistered(a2) l = pp.getplugins() assert a1 in l assert a2 in l assert pp.getplugin('hello') == a2 pp.unregister(a1) assert not pp.isregistered(a1) pp.unregister(name="hello") assert not pp.isregistered(a2) def test_pm_ordering(self): pp = PluginManager() class A: pass a1, a2 = A(), A() pp.register(a1) pp.register(a2, "hello") l = pp.getplugins() assert l.index(a1) < l.index(a2) a3 = A() pp.register(a3, prepend=True) l = pp.getplugins() assert l.index(a3) == 0 def test_register_imported_modules(self): pp = PluginManager() mod = py.std.types.ModuleType("x.y.pytest_hello") pp.register(mod) assert pp.isregistered(mod) l = pp.getplugins() assert mod in l pytest.raises(ValueError, "pp.register(mod)") pytest.raises(ValueError, lambda: pp.register(mod)) #assert not pp.isregistered(mod2) assert pp.getplugins() == l def test_canonical_import(self, monkeypatch): mod = py.std.types.ModuleType("pytest_xyz") monkeypatch.setitem(py.std.sys.modules, 'pytest_xyz', mod) pp = PluginManager() pp.import_plugin('pytest_xyz') assert pp.getplugin('pytest_xyz') == mod assert pp.isregistered(mod) def test_register_mismatch_method(self): pp = get_plugin_manager() class hello: def pytest_gurgel(self): pass pytest.raises(Exception, lambda: pp.register(hello())) def test_register_mismatch_arg(self): pp = get_plugin_manager() class hello: def pytest_configure(self, asd): pass pytest.raises(Exception, lambda: pp.register(hello())) def test_register(self): pm = get_plugin_manager() class MyPlugin: pass my = MyPlugin() pm.register(my) assert pm.getplugins() my2 = MyPlugin() pm.register(my2) assert pm.getplugins()[-2:] == [my, my2] assert pm.isregistered(my) assert pm.isregistered(my2) pm.unregister(my) assert not pm.isregistered(my) assert pm.getplugins()[-1:] == [my2] def test_listattr(self): plugins = PluginManager() class api1: x = 41 class api2: x = 42 class api3: x = 43 plugins.register(api1()) plugins.register(api2()) plugins.register(api3()) l = list(plugins.listattr('x')) assert l == [41, 42, 43] def test_hook_tracing(self): pm = get_plugin_manager() saveindent = [] class api1: x = 41 def pytest_plugin_registered(self, plugin): saveindent.append(pm.trace.root.indent) raise ValueError(42) l = [] pm.trace.root.setwriter(l.append) indent = pm.trace.root.indent p = api1() pm.register(p) assert pm.trace.root.indent == indent assert len(l) == 1 assert 'pytest_plugin_registered' in l[0] pytest.raises(ValueError, lambda: pm.register(api1())) assert pm.trace.root.indent == indent assert saveindent[0] > indent class TestPytestPluginInteractions: def test_addhooks_conftestplugin(self, testdir): testdir.makepyfile(newhooks=""" def pytest_myhook(xyz): "new hook" """) conf = testdir.makeconftest(""" import sys ; sys.path.insert(0, '.') import newhooks def pytest_addhooks(pluginmanager): pluginmanager.addhooks(newhooks) def pytest_myhook(xyz): return xyz + 1 """) config = get_plugin_manager().config config._conftest.importconftest(conf) print(config.pluginmanager.getplugins()) res = config.hook.pytest_myhook(xyz=10) assert res == [11] def test_addhooks_nohooks(self, testdir): testdir.makeconftest(""" import sys def pytest_addhooks(pluginmanager): pluginmanager.addhooks(sys) """) res = testdir.runpytest() assert res.ret != 0 res.stderr.fnmatch_lines([ "*did not find*sys*" ]) def test_namespace_early_from_import(self, testdir): p = testdir.makepyfile(""" from pytest import Item from pytest import Item as Item2 assert Item is Item2 """) result = testdir.runpython(p) assert result.ret == 0 def test_do_ext_namespace(self, testdir): testdir.makeconftest(""" def pytest_namespace(): return {'hello': 'world'} """) p = testdir.makepyfile(""" from py.test import hello import py def test_hello(): assert hello == "world" assert 'hello' in py.test.__all__ """) reprec = testdir.inline_run(p) reprec.assertoutcome(passed=1) def test_do_option_postinitialize(self, testdir): config = testdir.parseconfigure() assert not hasattr(config.option, 'test123') p = testdir.makepyfile(""" def pytest_addoption(parser): parser.addoption('--test123', action="store_true", default=True) """) config._conftest.importconftest(p) assert config.option.test123 def test_configure(self, testdir): config = testdir.parseconfig() l = [] class A: def pytest_configure(self, config): l.append(self) config.pluginmanager.register(A()) assert len(l) == 0 config.do_configure() assert len(l) == 1 config.pluginmanager.register(A()) # leads to a configured() plugin assert len(l) == 2 assert l[0] != l[1] config.do_unconfigure() config.pluginmanager.register(A()) assert len(l) == 2 # lower level API def test_listattr(self): pluginmanager = PluginManager() class My2: x = 42 pluginmanager.register(My2()) assert not pluginmanager.listattr("hello") assert pluginmanager.listattr("x") == [42] def test_listattr_tryfirst(self): class P1: @pytest.mark.tryfirst def m(self): return 17 class P2: def m(self): return 23 class P3: def m(self): return 19 pluginmanager = PluginManager() p1 = P1() p2 = P2() p3 = P3() pluginmanager.register(p1) pluginmanager.register(p2) pluginmanager.register(p3) methods = pluginmanager.listattr('m') assert methods == [p2.m, p3.m, p1.m] # listattr keeps a cache and deleting # a function attribute requires clearing it pluginmanager._listattrcache.clear() del P1.m.__dict__['tryfirst'] pytest.mark.trylast(getattr(P2.m, 'im_func', P2.m)) methods = pluginmanager.listattr('m') assert methods == [p2.m, p1.m, p3.m] def test_namespace_has_default_and_env_plugins(testdir): p = testdir.makepyfile(""" import pytest pytest.mark """) result = testdir.runpython(p) assert result.ret == 0 def test_varnames(): def f(x): i = 3 # noqa class A: def f(self, y): pass class B(object): def __call__(self, z): pass assert varnames(f) == ("x",) assert varnames(A().f) == ('y',) assert varnames(B()) == ('z',) def test_varnames_class(): class C: def __init__(self, x): pass class D: pass assert varnames(C) == ("x",) assert varnames(D) == () class TestMultiCall: def test_uses_copy_of_methods(self): l = [lambda: 42] mc = MultiCall(l, {}) repr(mc) l[:] = [] res = mc.execute() return res == 42 def test_call_passing(self): class P1: def m(self, __multicall__, x): assert len(__multicall__.results) == 1 assert not __multicall__.methods return 17 class P2: def m(self, __multicall__, x): assert __multicall__.results == [] assert __multicall__.methods return 23 p1 = P1() p2 = P2() multicall = MultiCall([p1.m, p2.m], {'x': 23}) assert "23" in repr(multicall) reslist = multicall.execute() assert len(reslist) == 2 # ensure reversed order assert reslist == [23, 17] def test_keyword_args(self): def f(x): return x + 1 class A: def f(self, x, y): return x + y multicall = MultiCall([f, A().f], dict(x=23, y=24)) assert "'x': 23" in repr(multicall) assert "'y': 24" in repr(multicall) reslist = multicall.execute() assert reslist == [24+23, 24] assert "2 results" in repr(multicall) def test_keyword_args_with_defaultargs(self): def f(x, z=1): return x + z reslist = MultiCall([f], dict(x=23, y=24)).execute() assert reslist == [24] reslist = MultiCall([f], dict(x=23, z=2)).execute() assert reslist == [25] def test_tags_call_error(self): multicall = MultiCall([lambda x: x], {}) pytest.raises(TypeError, multicall.execute) def test_call_subexecute(self): def m(__multicall__): subresult = __multicall__.execute() return subresult + 1 def n(): return 1 call = MultiCall([n, m], {}, firstresult=True) res = call.execute() assert res == 2 def test_call_none_is_no_result(self): def m1(): return 1 def m2(): return None res = MultiCall([m1, m2], {}, firstresult=True).execute() assert res == 1 res = MultiCall([m1, m2], {}).execute() assert res == [1] class TestHookRelay: def test_happypath(self): pm = PluginManager() class Api: def hello(self, arg): "api hook 1" mcm = HookRelay(hookspecs=Api, pm=pm, prefix="he") assert hasattr(mcm, 'hello') assert repr(mcm.hello).find("hello") != -1 class Plugin: def hello(self, arg): return arg + 1 pm.register(Plugin()) l = mcm.hello(arg=3) assert l == [4] assert not hasattr(mcm, 'world') def test_only_kwargs(self): pm = PluginManager() class Api: def hello(self, arg): "api hook 1" mcm = HookRelay(hookspecs=Api, pm=pm, prefix="he") pytest.raises(TypeError, lambda: mcm.hello(3)) def test_firstresult_definition(self): pm = PluginManager() class Api: def hello(self, arg): "api hook 1" hello.firstresult = True mcm = HookRelay(hookspecs=Api, pm=pm, prefix="he") class Plugin: def hello(self, arg): return arg + 1 pm.register(Plugin()) res = mcm.hello(arg=3) assert res == 4 class TestTracer: def test_simple(self): from _pytest.core import TagTracer rootlogger = TagTracer() log = rootlogger.get("pytest") log("hello") l = [] rootlogger.setwriter(l.append) log("world") assert len(l) == 1 assert l[0] == "world [pytest]\n" sublog = log.get("collection") sublog("hello") assert l[1] == "hello [pytest:collection]\n" def test_indent(self): from _pytest.core import TagTracer rootlogger = TagTracer() log = rootlogger.get("1") l = [] log.root.setwriter(lambda arg: l.append(arg)) log("hello") log.root.indent += 1 log("line1") log("line2") log.root.indent += 1 log("line3") log("line4") log.root.indent -= 1 log("line5") log.root.indent -= 1 log("last") assert len(l) == 7 names = [x[:x.rfind(' [')] for x in l] assert names == ['hello', ' line1', ' line2', ' line3', ' line4', ' line5', 'last'] def test_readable_output_dictargs(self): from _pytest.core import TagTracer rootlogger = TagTracer() out = rootlogger.format_message(['test'], [1]) assert out == ['1 [test]\n'] out2= rootlogger.format_message(['test'], ['test', {'a':1}]) assert out2 ==[ 'test [test]\n', ' a: 1\n' ] def test_setprocessor(self): from _pytest.core import TagTracer rootlogger = TagTracer() log = rootlogger.get("1") log2 = log.get("2") assert log2.tags == tuple("12") l = [] rootlogger.setprocessor(tuple("12"), lambda *args: l.append(args)) log("not seen") log2("seen") assert len(l) == 1 tags, args = l[0] assert "1" in tags assert "2" in tags assert args == ("seen",) l2 = [] rootlogger.setprocessor("1:2", lambda *args: l2.append(args)) log2("seen") tags, args = l2[0] assert args == ("seen",) def test_setmyprocessor(self): from _pytest.core import TagTracer rootlogger = TagTracer() log = rootlogger.get("1") log2 = log.get("2") l = [] log2.setmyprocessor(lambda *args: l.append(args)) log("not seen") assert not l log2(42) assert len(l) == 1 tags, args = l[0] assert "1" in tags assert "2" in tags assert args == (42,) def test_default_markers(testdir): result = testdir.runpytest("--markers") result.stdout.fnmatch_lines([ "*tryfirst*first*", "*trylast*last*", ]) def test_importplugin_issue375(testdir): testdir.makepyfile(qwe="import aaaa") excinfo = pytest.raises(ImportError, lambda: importplugin("qwe")) assert "qwe" not in str(excinfo.value) assert "aaaa" in str(excinfo.value) pytest-2.5.1/testing/test_assertrewrite.py0000664000175000017500000004153112254002202020421 0ustar hpkhpk00000000000000import os import stat import sys import zipfile import py import pytest ast = pytest.importorskip("ast") if sys.platform.startswith("java"): # XXX should be xfail pytest.skip("assert rewrite does currently not work on jython") from _pytest.assertion import util from _pytest.assertion.rewrite import rewrite_asserts, PYTEST_TAG def setup_module(mod): mod._old_reprcompare = util._reprcompare py.code._reprcompare = None def teardown_module(mod): util._reprcompare = mod._old_reprcompare del mod._old_reprcompare def rewrite(src): tree = ast.parse(src) rewrite_asserts(tree) return tree def getmsg(f, extra_ns=None, must_pass=False): """Rewrite the assertions in f, run it, and get the failure message.""" src = '\n'.join(py.code.Code(f).source().lines) mod = rewrite(src) code = compile(mod, "", "exec") ns = {} if extra_ns is not None: ns.update(extra_ns) py.builtin.exec_(code, ns) func = ns[f.__name__] try: func() except AssertionError: if must_pass: pytest.fail("shouldn't have raised") s = str(sys.exc_info()[1]) if not s.startswith("assert"): return "AssertionError: " + s return s else: if not must_pass: pytest.fail("function didn't raise at all") class TestAssertionRewrite: def test_place_initial_imports(self): s = """'Doc string'\nother = stuff""" m = rewrite(s) assert isinstance(m.body[0], ast.Expr) assert isinstance(m.body[0].value, ast.Str) for imp in m.body[1:3]: assert isinstance(imp, ast.Import) assert imp.lineno == 2 assert imp.col_offset == 0 assert isinstance(m.body[3], ast.Assign) s = """from __future__ import with_statement\nother_stuff""" m = rewrite(s) assert isinstance(m.body[0], ast.ImportFrom) for imp in m.body[1:3]: assert isinstance(imp, ast.Import) assert imp.lineno == 2 assert imp.col_offset == 0 assert isinstance(m.body[3], ast.Expr) s = """'doc string'\nfrom __future__ import with_statement\nother""" m = rewrite(s) assert isinstance(m.body[0], ast.Expr) assert isinstance(m.body[0].value, ast.Str) assert isinstance(m.body[1], ast.ImportFrom) for imp in m.body[2:4]: assert isinstance(imp, ast.Import) assert imp.lineno == 3 assert imp.col_offset == 0 assert isinstance(m.body[4], ast.Expr) s = """from . import relative\nother_stuff""" m = rewrite(s) for imp in m.body[0:2]: assert isinstance(imp, ast.Import) assert imp.lineno == 1 assert imp.col_offset == 0 assert isinstance(m.body[3], ast.Expr) def test_dont_rewrite(self): s = """'PYTEST_DONT_REWRITE'\nassert 14""" m = rewrite(s) assert len(m.body) == 2 assert isinstance(m.body[0].value, ast.Str) assert isinstance(m.body[1], ast.Assert) assert m.body[1].msg is None def test_name(self): def f(): assert False assert getmsg(f) == "assert False" def f(): f = False assert f assert getmsg(f) == "assert False" def f(): assert a_global # noqa assert getmsg(f, {"a_global" : False}) == "assert False" def f(): assert sys == 42 assert getmsg(f, {"sys" : sys}) == "assert sys == 42" def f(): assert cls == 42 # noqa class X(object): pass assert getmsg(f, {"cls" : X}) == "assert cls == 42" def test_assert_already_has_message(self): def f(): assert False, "something bad!" assert getmsg(f) == "AssertionError: something bad!" def test_boolop(self): def f(): f = g = False assert f and g assert getmsg(f) == "assert (False)" def f(): f = True g = False assert f and g assert getmsg(f) == "assert (True and False)" def f(): f = False g = True assert f and g assert getmsg(f) == "assert (False)" def f(): f = g = False assert f or g assert getmsg(f) == "assert (False or False)" def f(): f = g = False assert not f and not g getmsg(f, must_pass=True) def x(): return False def f(): assert x() and x() assert getmsg(f, {"x" : x}) == "assert (x())" def f(): assert False or x() assert getmsg(f, {"x" : x}) == "assert (False or x())" def f(): assert 1 in {} and 2 in {} assert getmsg(f) == "assert (1 in {})" def f(): x = 1 y = 2 assert x in {1 : None} and y in {} assert getmsg(f) == "assert (1 in {1: None} and 2 in {})" def f(): f = True g = False assert f or g getmsg(f, must_pass=True) def f(): f = g = h = lambda: True assert f() and g() and h() getmsg(f, must_pass=True) def test_short_circut_evaluation(self): def f(): assert True or explode # noqa getmsg(f, must_pass=True) def f(): x = 1 assert x == 1 or x == 2 getmsg(f, must_pass=True) def test_unary_op(self): def f(): x = True assert not x assert getmsg(f) == "assert not True" def f(): x = 0 assert ~x + 1 assert getmsg(f) == "assert (~0 + 1)" def f(): x = 3 assert -x + x assert getmsg(f) == "assert (-3 + 3)" def f(): x = 0 assert +x + x assert getmsg(f) == "assert (+0 + 0)" def test_binary_op(self): def f(): x = 1 y = -1 assert x + y assert getmsg(f) == "assert (1 + -1)" def f(): assert not 5 % 4 assert getmsg(f) == "assert not (5 % 4)" def test_call(self): def g(a=42, *args, **kwargs): return False ns = {"g" : g} def f(): assert g() assert getmsg(f, ns) == """assert g()""" def f(): assert g(1) assert getmsg(f, ns) == """assert g(1)""" def f(): assert g(1, 2) assert getmsg(f, ns) == """assert g(1, 2)""" def f(): assert g(1, g=42) assert getmsg(f, ns) == """assert g(1, g=42)""" def f(): assert g(1, 3, g=23) assert getmsg(f, ns) == """assert g(1, 3, g=23)""" def f(): seq = [1, 2, 3] assert g(*seq) assert getmsg(f, ns) == """assert g(*[1, 2, 3])""" def f(): x = "a" assert g(**{x : 2}) assert getmsg(f, ns) == """assert g(**{'a': 2})""" def test_attribute(self): class X(object): g = 3 ns = {"x" : X} def f(): assert not x.g # noqa assert getmsg(f, ns) == """assert not 3 + where 3 = x.g""" def f(): x.a = False # noqa assert x.a # noqa assert getmsg(f, ns) == """assert x.a""" def test_comparisons(self): def f(): a, b = range(2) assert b < a assert getmsg(f) == """assert 1 < 0""" def f(): a, b, c = range(3) assert a > b > c assert getmsg(f) == """assert 0 > 1""" def f(): a, b, c = range(3) assert a < b > c assert getmsg(f) == """assert 1 > 2""" def f(): a, b, c = range(3) assert a < b <= c getmsg(f, must_pass=True) def f(): a, b, c = range(3) assert a < b assert b < c getmsg(f, must_pass=True) def test_len(self): def f(): l = list(range(10)) assert len(l) == 11 assert getmsg(f).startswith("""assert 10 == 11 + where 10 = len([""") def test_custom_reprcompare(self, monkeypatch): def my_reprcompare(op, left, right): return "42" monkeypatch.setattr(util, "_reprcompare", my_reprcompare) def f(): assert 42 < 3 assert getmsg(f) == "assert 42" def my_reprcompare(op, left, right): return "%s %s %s" % (left, op, right) monkeypatch.setattr(util, "_reprcompare", my_reprcompare) def f(): assert 1 < 3 < 5 <= 4 < 7 assert getmsg(f) == "assert 5 <= 4" def test_assert_raising_nonzero_in_comparison(self): def f(): class A(object): def __nonzero__(self): raise ValueError(42) def __lt__(self, other): return A() def __repr__(self): return "" def myany(x): return False assert myany(A() < 0) assert " < 0" in getmsg(f) def test_formatchar(self): def f(): assert "%test" == "test" assert getmsg(f).startswith("assert '%test' == 'test'") class TestRewriteOnImport: def test_pycache_is_a_file(self, testdir): testdir.tmpdir.join("__pycache__").write("Hello") testdir.makepyfile(""" def test_rewritten(): assert "@py_builtins" in globals()""") assert testdir.runpytest().ret == 0 def test_pycache_is_readonly(self, testdir): cache = testdir.tmpdir.mkdir("__pycache__") old_mode = cache.stat().mode cache.chmod(old_mode ^ stat.S_IWRITE) testdir.makepyfile(""" def test_rewritten(): assert "@py_builtins" in globals()""") try: assert testdir.runpytest().ret == 0 finally: cache.chmod(old_mode) def test_zipfile(self, testdir): z = testdir.tmpdir.join("myzip.zip") z_fn = str(z) f = zipfile.ZipFile(z_fn, "w") try: f.writestr("test_gum/__init__.py", "") f.writestr("test_gum/test_lizard.py", "") finally: f.close() z.chmod(256) testdir.makepyfile(""" import sys sys.path.append(%r) import test_gum.test_lizard""" % (z_fn,)) assert testdir.runpytest().ret == 0 def test_readonly(self, testdir): sub = testdir.mkdir("testing") sub.join("test_readonly.py").write( py.builtin._totext(""" def test_rewritten(): assert "@py_builtins" in globals() """).encode("utf-8"), "wb") old_mode = sub.stat().mode sub.chmod(320) try: assert testdir.runpytest().ret == 0 finally: sub.chmod(old_mode) def test_dont_write_bytecode(self, testdir, monkeypatch): testdir.makepyfile(""" import os def test_no_bytecode(): assert "__pycache__" in __cached__ assert not os.path.exists(__cached__) assert not os.path.exists(os.path.dirname(__cached__))""") monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1") assert testdir.runpytest().ret == 0 @pytest.mark.skipif('"__pypy__" in sys.modules') def test_pyc_vs_pyo(self, testdir, monkeypatch): testdir.makepyfile(""" import pytest def test_optimized(): "hello" assert test_optimized.__doc__ is None""" ) p = py.path.local.make_numbered_dir(prefix="runpytest-", keep=None, rootdir=testdir.tmpdir) tmp = "--basetemp=%s" % p monkeypatch.setenv("PYTHONOPTIMIZE", "2") monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False) assert testdir.runpybin("py.test", tmp).ret == 0 tagged = "test_pyc_vs_pyo." + PYTEST_TAG assert tagged + ".pyo" in os.listdir("__pycache__") monkeypatch.undo() monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False) assert testdir.runpybin("py.test", tmp).ret == 1 assert tagged + ".pyc" in os.listdir("__pycache__") def test_package(self, testdir): pkg = testdir.tmpdir.join("pkg") pkg.mkdir() pkg.join("__init__.py").ensure() pkg.join("test_blah.py").write(""" def test_rewritten(): assert "@py_builtins" in globals()""") assert testdir.runpytest().ret == 0 def test_translate_newlines(self, testdir): content = "def test_rewritten():\r\n assert '@py_builtins' in globals()" b = content.encode("utf-8") testdir.tmpdir.join("test_newlines.py").write(b, "wb") assert testdir.runpytest().ret == 0 class TestAssertionRewriteHookDetails(object): def test_loader_is_package_false_for_module(self, testdir): testdir.makepyfile(test_fun=""" def test_loader(): assert not __loader__.is_package(__name__) """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "* 1 passed*", ]) def test_loader_is_package_true_for_package(self, testdir): testdir.makepyfile(test_fun=""" def test_loader(): assert not __loader__.is_package(__name__) def test_fun(): assert __loader__.is_package('fun') def test_missing(): assert not __loader__.is_package('pytest_not_there') """) testdir.mkpydir('fun') result = testdir.runpytest() result.stdout.fnmatch_lines([ '* 3 passed*', ]) @pytest.mark.skipif("sys.version_info[0] >= 3") @pytest.mark.xfail("hasattr(sys, 'pypy_translation_info')") def test_assume_ascii(self, testdir): content = "u'\xe2\x99\xa5\x01\xfe'" testdir.tmpdir.join("test_encoding.py").write(content, "wb") res = testdir.runpytest() assert res.ret != 0 assert "SyntaxError: Non-ASCII character" in res.stdout.str() @pytest.mark.skipif("sys.version_info[0] >= 3") def test_detect_coding_cookie(self, testdir): testdir.tmpdir.join("test_cookie.py").write("""# -*- coding: utf-8 -*- u"St\xc3\xa4d" def test_rewritten(): assert "@py_builtins" in globals()""", "wb") assert testdir.runpytest().ret == 0 @pytest.mark.skipif("sys.version_info[0] >= 3") def test_detect_coding_cookie_second_line(self, testdir): testdir.tmpdir.join("test_cookie.py").write("""#!/usr/bin/env python # -*- coding: utf-8 -*- u"St\xc3\xa4d" def test_rewritten(): assert "@py_builtins" in globals()""", "wb") assert testdir.runpytest().ret == 0 @pytest.mark.skipif("sys.version_info[0] >= 3") def test_detect_coding_cookie_crlf(self, testdir): testdir.tmpdir.join("test_cookie.py").write("""#!/usr/bin/env python # -*- coding: utf-8 -*- u"St\xc3\xa4d" def test_rewritten(): assert "@py_builtins" in globals()""".replace("\n", "\r\n"), "wb") assert testdir.runpytest().ret == 0 def test_write_pyc(self, testdir, tmpdir, monkeypatch): from _pytest.assertion.rewrite import _write_pyc from _pytest.assertion import AssertionState try: import __builtin__ as b except ImportError: import builtins as b config = testdir.parseconfig([]) state = AssertionState(config, "rewrite") source_path = tmpdir.ensure("source.py") pycpath = tmpdir.join("pyc").strpath assert _write_pyc(state, [1], source_path, pycpath) def open(*args): e = IOError() e.errno = 10 raise e monkeypatch.setattr(b, "open", open) assert not _write_pyc(state, [1], source_path, pycpath) def test_resources_provider_for_loader(self, testdir): """ Attempts to load resources from a package should succeed normally, even when the AssertionRewriteHook is used to load the modules. See #366 for details. """ pytest.importorskip("pkg_resources") testdir.mkpydir('testpkg') contents = { 'testpkg/test_pkg': """ import pkg_resources import pytest from _pytest.assertion.rewrite import AssertionRewritingHook def test_load_resource(): assert isinstance(__loader__, AssertionRewritingHook) res = pkg_resources.resource_string(__name__, 'resource.txt') res = res.decode('ascii') assert res == 'Load me please.' """, } testdir.makepyfile(**contents) testdir.maketxtfile(**{'testpkg/resource': "Load me please."}) result = testdir.runpytest() result.stdout.fnmatch_lines([ '* 1 passed*', ]) pytest-2.5.1/testing/test_resultlog.py0000664000175000017500000001613712254002202017542 0ustar hpkhpk00000000000000import py, pytest import os from _pytest.resultlog import generic_path, ResultLog, \ pytest_configure, pytest_unconfigure from _pytest.main import Node, Item, FSCollector def test_generic_path(testdir): from _pytest.main import Session config = testdir.parseconfig() session = Session(config) p1 = Node('a', config=config, session=session) #assert p1.fspath is None p2 = Node('B', parent=p1) p3 = Node('()', parent = p2) item = Item('c', parent = p3) res = generic_path(item) assert res == 'a.B().c' p0 = FSCollector('proj/test', config=config, session=session) p1 = FSCollector('proj/test/a', parent=p0) p2 = Node('B', parent=p1) p3 = Node('()', parent = p2) p4 = Node('c', parent=p3) item = Item('[1]', parent = p4) res = generic_path(item) assert res == 'test/a:B().c[1]' def test_write_log_entry(): reslog = ResultLog(None, None) reslog.logfile = py.io.TextIO() reslog.write_log_entry('name', '.', '') entry = reslog.logfile.getvalue() assert entry[-1] == '\n' entry_lines = entry.splitlines() assert len(entry_lines) == 1 assert entry_lines[0] == '. name' reslog.logfile = py.io.TextIO() reslog.write_log_entry('name', 's', 'Skipped') entry = reslog.logfile.getvalue() assert entry[-1] == '\n' entry_lines = entry.splitlines() assert len(entry_lines) == 2 assert entry_lines[0] == 's name' assert entry_lines[1] == ' Skipped' reslog.logfile = py.io.TextIO() reslog.write_log_entry('name', 's', 'Skipped\n') entry = reslog.logfile.getvalue() assert entry[-1] == '\n' entry_lines = entry.splitlines() assert len(entry_lines) == 2 assert entry_lines[0] == 's name' assert entry_lines[1] == ' Skipped' reslog.logfile = py.io.TextIO() longrepr = ' tb1\n tb 2\nE tb3\nSome Error' reslog.write_log_entry('name', 'F', longrepr) entry = reslog.logfile.getvalue() assert entry[-1] == '\n' entry_lines = entry.splitlines() assert len(entry_lines) == 5 assert entry_lines[0] == 'F name' assert entry_lines[1:] == [' '+line for line in longrepr.splitlines()] class TestWithFunctionIntegration: # XXX (hpk) i think that the resultlog plugin should # provide a Parser object so that one can remain # ignorant regarding formatting details. def getresultlog(self, testdir, arg): resultlog = testdir.tmpdir.join("resultlog") testdir.plugins.append("resultlog") args = ["--resultlog=%s" % resultlog] + [arg] testdir.runpytest(*args) return [x for x in resultlog.readlines(cr=0) if x] def test_collection_report(self, testdir): ok = testdir.makepyfile(test_collection_ok="") skip = testdir.makepyfile(test_collection_skip= "import pytest ; pytest.skip('hello')") fail = testdir.makepyfile(test_collection_fail="XXX") lines = self.getresultlog(testdir, ok) assert not lines lines = self.getresultlog(testdir, skip) assert len(lines) == 2 assert lines[0].startswith("S ") assert lines[0].endswith("test_collection_skip.py") assert lines[1].startswith(" ") assert lines[1].endswith("test_collection_skip.py:1: Skipped: hello") lines = self.getresultlog(testdir, fail) assert lines assert lines[0].startswith("F ") assert lines[0].endswith("test_collection_fail.py"), lines[0] for x in lines[1:]: assert x.startswith(" ") assert "XXX" in "".join(lines[1:]) def test_log_test_outcomes(self, testdir): mod = testdir.makepyfile(test_mod=""" import pytest def test_pass(): pass def test_skip(): pytest.skip("hello") def test_fail(): raise ValueError("FAIL") @pytest.mark.xfail def test_xfail(): raise ValueError("XFAIL") @pytest.mark.xfail def test_xpass(): pass """) lines = self.getresultlog(testdir, mod) assert len(lines) >= 3 assert lines[0].startswith(". ") assert lines[0].endswith("test_pass") assert lines[1].startswith("s "), lines[1] assert lines[1].endswith("test_skip") assert lines[2].find("hello") != -1 assert lines[3].startswith("F ") assert lines[3].endswith("test_fail") tb = "".join(lines[4:8]) assert tb.find('raise ValueError("FAIL")') != -1 assert lines[8].startswith('x ') tb = "".join(lines[8:14]) assert tb.find('raise ValueError("XFAIL")') != -1 assert lines[14].startswith('X ') assert len(lines) == 15 @pytest.mark.parametrize("style", ("native", "long", "short")) def test_internal_exception(self, style): # they are produced for example by a teardown failing # at the end of the run or a failing hook invocation try: raise ValueError except ValueError: excinfo = py.code.ExceptionInfo() reslog = ResultLog(None, py.io.TextIO()) reslog.pytest_internalerror(excinfo.getrepr(style=style)) entry = reslog.logfile.getvalue() entry_lines = entry.splitlines() assert entry_lines[0].startswith('! ') if style != "native": assert os.path.basename(__file__)[:-9] in entry_lines[0] #.pyc/class assert entry_lines[-1][0] == ' ' assert 'ValueError' in entry def test_generic(testdir, LineMatcher): testdir.plugins.append("resultlog") testdir.makepyfile(""" import pytest def test_pass(): pass def test_fail(): assert 0 def test_skip(): pytest.skip("") @pytest.mark.xfail def test_xfail(): assert 0 @pytest.mark.xfail(run=False) def test_xfail_norun(): assert 0 """) testdir.runpytest("--resultlog=result.log") lines = testdir.tmpdir.join("result.log").readlines(cr=0) LineMatcher(lines).fnmatch_lines([ ". *:test_pass", "F *:test_fail", "s *:test_skip", "x *:test_xfail", "x *:test_xfail_norun", ]) def test_no_resultlog_on_slaves(testdir): config = testdir.parseconfig("-p", "resultlog", "--resultlog=resultlog") assert not hasattr(config, '_resultlog') pytest_configure(config) assert hasattr(config, '_resultlog') pytest_unconfigure(config) assert not hasattr(config, '_resultlog') config.slaveinput = {} pytest_configure(config) assert not hasattr(config, '_resultlog') pytest_unconfigure(config) assert not hasattr(config, '_resultlog') def test_failure_issue380(testdir): testdir.makeconftest(""" import pytest class MyCollector(pytest.File): def collect(self): raise ValueError() def repr_failure(self, excinfo): return "somestring" def pytest_collect_file(path, parent): return MyCollector(parent=parent, fspath=path) """) testdir.makepyfile(""" def test_func(): pass """) result = testdir.runpytest("--resultlog=log") assert result.ret == 1 pytest-2.5.1/testing/conftest.py0000664000175000017500000000626112254002202016305 0ustar hpkhpk00000000000000import pytest import sys pytest_plugins = "pytester", import os, py pid = os.getpid() def pytest_addoption(parser): parser.addoption('--lsof', action="store_true", dest="lsof", default=False, help=("run FD checks if lsof is available")) def pytest_configure(config): config._basedir = py.path.local() if config.getvalue("lsof"): try: out = py.process.cmdexec("lsof -p %d" % pid) except py.process.cmdexec.Error: pass else: config._numfiles = len(getopenfiles(out)) #def pytest_report_header(): # return "pid: %s" % os.getpid() def getopenfiles(out): def isopen(line): return ("REG" in line or "CHR" in line) and ( "deleted" not in line and 'mem' not in line and "txt" not in line) return [x for x in out.split("\n") if isopen(x)] def check_open_files(config): out2 = py.process.cmdexec("lsof -p %d" % pid) lines2 = getopenfiles(out2) if len(lines2) > config._numfiles + 3: error = [] error.append("***** %s FD leackage detected" % (len(lines2)-config._numfiles)) error.extend(lines2) error.append(error[0]) # update numfile so that the overall test run continuess config._numfiles = len(lines2) raise AssertionError("\n".join(error)) def pytest_runtest_teardown(item, __multicall__): item.config._basedir.chdir() if hasattr(item.config, '_numfiles'): x = __multicall__.execute() check_open_files(item.config) return x # XXX copied from execnet's conftest.py - needs to be merged winpymap = { 'python2.7': r'C:\Python27\python.exe', 'python2.6': r'C:\Python26\python.exe', 'python2.5': r'C:\Python25\python.exe', 'python2.4': r'C:\Python24\python.exe', 'python3.1': r'C:\Python31\python.exe', 'python3.2': r'C:\Python32\python.exe', 'python3.3': r'C:\Python33\python.exe', 'python3.4': r'C:\Python34\python.exe', } def getexecutable(name, cache={}): try: return cache[name] except KeyError: executable = py.path.local.sysfind(name) if executable: if name == "jython": import subprocess popen = subprocess.Popen([str(executable), "--version"], universal_newlines=True, stderr=subprocess.PIPE) out, err = popen.communicate() if not err or "2.5" not in err: executable = None if "2.5.2" in err: executable = None # http://bugs.jython.org/issue1790 cache[name] = executable return executable @pytest.fixture(params=['python2.5', 'python2.6', 'python2.7', 'python3.2', "python3.3", 'pypy', 'jython']) def anypython(request): name = request.param executable = getexecutable(name) if executable is None: if sys.platform == "win32": executable = winpymap.get(name, None) if executable: executable = py.path.local(executable) if executable.check(): return executable pytest.skip("no suitable %s found" % (name,)) return executable pytest-2.5.1/testing/test_helpconfig.py0000664000175000017500000000440212254002202017630 0ustar hpkhpk00000000000000import py, pytest from _pytest.helpconfig import collectattr def test_version(testdir, pytestconfig): result = testdir.runpytest("--version") assert result.ret == 0 #p = py.path.local(py.__file__).dirpath() result.stderr.fnmatch_lines([ '*py.test*%s*imported from*' % (pytest.__version__, ) ]) if pytestconfig.pluginmanager._plugin_distinfo: result.stderr.fnmatch_lines([ "*setuptools registered plugins:", "*at*", ]) def test_help(testdir): result = testdir.runpytest("--help") assert result.ret == 0 result.stdout.fnmatch_lines(""" *-v*verbose* *setup.cfg* *minversion* *to see*markers*py.test --markers* *to see*fixtures*py.test --fixtures* """) def test_collectattr(): class A: def pytest_hello(self): pass class B(A): def pytest_world(self): pass methods = py.builtin.sorted(collectattr(B)) assert list(methods) == ['pytest_hello', 'pytest_world'] methods = py.builtin.sorted(collectattr(B())) assert list(methods) == ['pytest_hello', 'pytest_world'] def test_hookvalidation_unknown(testdir): testdir.makeconftest(""" def pytest_hello(xyz): pass """) result = testdir.runpytest() assert result.ret != 0 result.stderr.fnmatch_lines([ '*unknown hook*pytest_hello*' ]) def test_hookvalidation_optional(testdir): testdir.makeconftest(""" import pytest @pytest.mark.optionalhook def pytest_hello(xyz): pass """) result = testdir.runpytest() assert result.ret == 0 def test_traceconfig(testdir): result = testdir.runpytest("--traceconfig") result.stdout.fnmatch_lines([ "*using*pytest*py*", "*active plugins*", ]) def test_debug(testdir, monkeypatch): result = testdir.runpytest("--debug") assert result.ret == 0 p = testdir.tmpdir.join("pytestdebug.log") assert "pytest_sessionstart" in p.read() def test_PYTEST_DEBUG(testdir, monkeypatch): monkeypatch.setenv("PYTEST_DEBUG", "1") result = testdir.runpytest() assert result.ret == 0 result.stderr.fnmatch_lines([ "*pytest_plugin_registered*", "*manager*PluginManager*" ]) pytest-2.5.1/testing/test_terminal.py0000664000175000017500000005461712254002202017342 0ustar hpkhpk00000000000000""" terminal reporting of the full testing process. """ import pytest, py import sys from _pytest.terminal import TerminalReporter, repr_pythonversion, getreportopt from _pytest import runner def basic_run_report(item): runner.call_and_report(item, "setup", log=False) return runner.call_and_report(item, "call", log=False) class Option: def __init__(self, verbose=False, fulltrace=False): self.verbose = verbose self.fulltrace = fulltrace @property def args(self): l = [] if self.verbose: l.append('-v') if self.fulltrace: l.append('--fulltrace') return l def pytest_generate_tests(metafunc): if "option" in metafunc.fixturenames: metafunc.addcall(id="default", funcargs={'option': Option(verbose=False)}) metafunc.addcall(id="verbose", funcargs={'option': Option(verbose=True)}) metafunc.addcall(id="quiet", funcargs={'option': Option(verbose= -1)}) metafunc.addcall(id="fulltrace", funcargs={'option': Option(fulltrace=True)}) class TestTerminal: def test_pass_skip_fail(self, testdir, option): testdir.makepyfile(""" import pytest def test_ok(): pass def test_skip(): pytest.skip("xx") def test_func(): assert 0 """) result = testdir.runpytest(*option.args) if option.verbose: result.stdout.fnmatch_lines([ "*test_pass_skip_fail.py:2: *test_ok*PASS*", "*test_pass_skip_fail.py:4: *test_skip*SKIP*", "*test_pass_skip_fail.py:6: *test_func*FAIL*", ]) else: result.stdout.fnmatch_lines([ "*test_pass_skip_fail.py .sF" ]) result.stdout.fnmatch_lines([ " def test_func():", "> assert 0", "E assert 0", ]) def test_internalerror(self, testdir, linecomp): modcol = testdir.getmodulecol("def test_one(): pass") rep = TerminalReporter(modcol.config, file=linecomp.stringio) excinfo = pytest.raises(ValueError, "raise ValueError('hello')") rep.pytest_internalerror(excinfo.getrepr()) linecomp.assert_contains_lines([ "INTERNALERROR> *ValueError*hello*" ]) def test_writeline(self, testdir, linecomp): modcol = testdir.getmodulecol("def test_one(): pass") rep = TerminalReporter(modcol.config, file=linecomp.stringio) rep.write_fspath_result(py.path.local("xy.py"), '.') rep.write_line("hello world") lines = linecomp.stringio.getvalue().split('\n') assert not lines[0] assert lines[1].endswith("xy.py .") assert lines[2] == "hello world" def test_show_runtest_logstart(self, testdir, linecomp): item = testdir.getitem("def test_func(): pass") tr = TerminalReporter(item.config, file=linecomp.stringio) item.config.pluginmanager.register(tr) location = item.reportinfo() tr.config.hook.pytest_runtest_logstart(nodeid=item.nodeid, location=location, fspath=str(item.fspath)) linecomp.assert_contains_lines([ "*test_show_runtest_logstart.py*" ]) def test_runtest_location_shown_before_test_starts(self, testdir): testdir.makepyfile(""" def test_1(): import time time.sleep(20) """) child = testdir.spawn_pytest("") child.expect(".*test_runtest_location.*py") child.sendeof() child.kill(15) def test_itemreport_subclasses_show_subclassed_file(self, testdir): testdir.makepyfile(test_p1=""" class BaseTests: def test_p1(self): pass class TestClass(BaseTests): pass """) p2 = testdir.makepyfile(test_p2=""" from test_p1 import BaseTests class TestMore(BaseTests): pass """) result = testdir.runpytest(p2) result.stdout.fnmatch_lines([ "*test_p2.py .", "*1 passed*", ]) result = testdir.runpytest("-v", p2) result.stdout.fnmatch_lines([ "*test_p2.py <- *test_p1.py:2: TestMore.test_p1*", ]) def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir): a = testdir.mkpydir("a") a.join("test_hello.py").write(py.code.Source(""" class TestClass: def test_method(self): pass """)) result = testdir.runpytest("-v") assert result.ret == 0 result.stdout.fnmatch_lines([ "*a/test_hello.py*PASS*", ]) assert " <- " not in result.stdout.str() def test_keyboard_interrupt(self, testdir, option): testdir.makepyfile(""" def test_foobar(): assert 0 def test_spamegg(): import py; pytest.skip('skip me please!') def test_interrupt_me(): raise KeyboardInterrupt # simulating the user """) result = testdir.runpytest(*option.args) result.stdout.fnmatch_lines([ " def test_foobar():", "> assert 0", "E assert 0", "*_keyboard_interrupt.py:6: KeyboardInterrupt*", ]) if option.fulltrace: result.stdout.fnmatch_lines([ "*raise KeyboardInterrupt # simulating the user*", ]) result.stdout.fnmatch_lines(['*KeyboardInterrupt*']) def test_keyboard_in_sessionstart(self, testdir): testdir.makeconftest(""" def pytest_sessionstart(): raise KeyboardInterrupt """) testdir.makepyfile(""" def test_foobar(): pass """) result = testdir.runpytest() assert result.ret == 2 result.stdout.fnmatch_lines(['*KeyboardInterrupt*']) class TestCollectonly: def test_collectonly_basic(self, testdir): testdir.makepyfile(""" def test_func(): pass """) result = testdir.runpytest("--collect-only",) result.stdout.fnmatch_lines([ "", " ", ]) def test_collectonly_skipped_module(self, testdir): testdir.makepyfile(""" import pytest pytest.skip("hello") """) result = testdir.runpytest("--collect-only", "-rs") result.stdout.fnmatch_lines([ "SKIP*hello*", "*1 skip*", ]) def test_collectonly_failed_module(self, testdir): testdir.makepyfile("""raise ValueError(0)""") result = testdir.runpytest("--collect-only") result.stdout.fnmatch_lines([ "*raise ValueError*", "*1 error*", ]) def test_collectonly_fatal(self, testdir): testdir.makeconftest(""" def pytest_collectstart(collector): assert 0, "urgs" """) result = testdir.runpytest("--collect-only") result.stdout.fnmatch_lines([ "*INTERNAL*args*" ]) assert result.ret == 3 def test_collectonly_simple(self, testdir): p = testdir.makepyfile(""" def test_func1(): pass class TestClass: def test_method(self): pass """) result = testdir.runpytest("--collect-only", p) #assert stderr.startswith("inserting into sys.path") assert result.ret == 0 result.stdout.fnmatch_lines([ "*", "* ", "* ", #"* ", "* ", ]) def test_collectonly_error(self, testdir): p = testdir.makepyfile("import Errlkjqweqwe") result = testdir.runpytest("--collect-only", p) assert result.ret == 1 result.stdout.fnmatch_lines(py.code.Source(""" *ERROR* *import Errlk* *ImportError* *1 error* """).strip()) def test_collectonly_missing_path(self, testdir): """this checks issue 115, failure in parseargs will cause session not to have the items attribute """ result = testdir.runpytest("--collect-only", "uhm_missing_path") assert result.ret == 4 result.stderr.fnmatch_lines([ '*ERROR: file not found*', ]) def test_collectonly_quiet(self, testdir): testdir.makepyfile("def test_foo(): pass") result = testdir.runpytest("--collect-only", "-q") result.stdout.fnmatch_lines([ '*test_foo*', ]) def test_collectonly_more_quiet(self, testdir): testdir.makepyfile(test_fun="def test_foo(): pass") result = testdir.runpytest("--collect-only", "-qq") result.stdout.fnmatch_lines([ '*test_fun.py: 1*', ]) def test_repr_python_version(monkeypatch): try: monkeypatch.setattr(sys, 'version_info', (2, 5, 1, 'final', 0)) assert repr_pythonversion() == "2.5.1-final-0" py.std.sys.version_info = x = (2, 3) assert repr_pythonversion() == str(x) finally: monkeypatch.undo() # do this early as pytest can get confused class TestFixtureReporting: def test_setup_fixture_error(self, testdir): testdir.makepyfile(""" def setup_function(function): print ("setup func") assert 0 def test_nada(): pass """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*ERROR at setup of test_nada*", "*setup_function(function):*", "*setup func*", "*assert 0*", "*1 error*", ]) assert result.ret != 0 def test_teardown_fixture_error(self, testdir): testdir.makepyfile(""" def test_nada(): pass def teardown_function(function): print ("teardown func") assert 0 """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*ERROR at teardown*", "*teardown_function(function):*", "*assert 0*", "*Captured stdout*", "*teardown func*", "*1 passed*1 error*", ]) def test_teardown_fixture_error_and_test_failure(self, testdir): testdir.makepyfile(""" def test_fail(): assert 0, "failingfunc" def teardown_function(function): print ("teardown func") assert False """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*ERROR at teardown of test_fail*", "*teardown_function(function):*", "*assert False*", "*Captured stdout*", "*teardown func*", "*test_fail*", "*def test_fail():", "*failingfunc*", "*1 failed*1 error*", ]) class TestTerminalFunctional: def test_deselected(self, testdir): testpath = testdir.makepyfile(""" def test_one(): pass def test_two(): pass def test_three(): pass """ ) result = testdir.runpytest("-k", "test_two:", testpath) result.stdout.fnmatch_lines([ "*test_deselected.py ..", "=* 1 test*deselected by*test_two:*=", ]) assert result.ret == 0 def test_no_skip_summary_if_failure(self, testdir): testdir.makepyfile(""" import pytest def test_ok(): pass def test_fail(): assert 0 def test_skip(): pytest.skip("dontshow") """) result = testdir.runpytest() assert result.stdout.str().find("skip test summary") == -1 assert result.ret == 1 def test_passes(self, testdir): p1 = testdir.makepyfile(""" def test_passes(): pass class TestClass: def test_method(self): pass """) old = p1.dirpath().chdir() try: result = testdir.runpytest() finally: old.chdir() result.stdout.fnmatch_lines([ "test_passes.py ..", "* 2 pass*", ]) assert result.ret == 0 def test_header_trailer_info(self, testdir): testdir.makepyfile(""" def test_passes(): pass """) result = testdir.runpytest() verinfo = ".".join(map(str, py.std.sys.version_info[:3])) result.stdout.fnmatch_lines([ "*===== test session starts ====*", "platform %s -- Python %s*" % ( py.std.sys.platform, verinfo), # , py.std.sys.executable), "*test_header_trailer_info.py .", "=* 1 passed in *.[0-9][0-9] seconds *=", ]) if pytest.config.pluginmanager._plugin_distinfo: result.stdout.fnmatch_lines([ "plugins: *", ]) def test_showlocals(self, testdir): p1 = testdir.makepyfile(""" def test_showlocals(): x = 3 y = "x" * 5000 assert 0 """) result = testdir.runpytest(p1, '-l') result.stdout.fnmatch_lines([ #"_ _ * Locals *", "x* = 3", "y* = 'xxxxxx*" ]) def test_verbose_reporting(self, testdir, pytestconfig): p1 = testdir.makepyfile(""" import pytest def test_fail(): raise ValueError() def test_pass(): pass class TestClass: def test_skip(self): pytest.skip("hello") def test_gen(): def check(x): assert x == 1 yield check, 0 """) result = testdir.runpytest(p1, '-v') result.stdout.fnmatch_lines([ "*test_verbose_reporting.py:2: test_fail*FAIL*", "*test_verbose_reporting.py:4: test_pass*PASS*", "*test_verbose_reporting.py:7: TestClass.test_skip*SKIP*", "*test_verbose_reporting.py:10: test_gen*FAIL*", ]) assert result.ret == 1 pytestconfig.pluginmanager.skipifmissing("xdist") result = testdir.runpytest(p1, '-v', '-n 1') result.stdout.fnmatch_lines([ "*FAIL*test_verbose_reporting.py:2: test_fail*", ]) assert result.ret == 1 def test_quiet_reporting(self, testdir): p1 = testdir.makepyfile("def test_pass(): pass") result = testdir.runpytest(p1, '-q') s = result.stdout.str() assert 'test session starts' not in s assert p1.basename not in s assert "===" not in s assert "passed" in s def test_more_quiet_reporting(self, testdir): p1 = testdir.makepyfile("def test_pass(): pass") result = testdir.runpytest(p1, '-qq') s = result.stdout.str() assert 'test session starts' not in s assert p1.basename not in s assert "===" not in s assert "passed" not in s def test_fail_extra_reporting(testdir): testdir.makepyfile("def test_this(): assert 0") result = testdir.runpytest() assert 'short test summary' not in result.stdout.str() result = testdir.runpytest('-rf') result.stdout.fnmatch_lines([ "*test summary*", "FAIL*test_fail_extra_reporting*", ]) def test_fail_reporting_on_pass(testdir): testdir.makepyfile("def test_this(): assert 1") result = testdir.runpytest('-rf') assert 'short test summary' not in result.stdout.str() def test_color_yes(testdir): testdir.makepyfile("def test_this(): assert 1") result = testdir.runpytest('--color=yes') assert 'test session starts' in result.stdout.str() assert '\x1b[1m' in result.stdout.str() def test_color_no(testdir): testdir.makepyfile("def test_this(): assert 1") result = testdir.runpytest('--color=no') assert 'test session starts' in result.stdout.str() assert '\x1b[1m' not in result.stdout.str() def test_getreportopt(): class config: class option: reportchars = "" config.option.report = "xfailed" assert getreportopt(config) == "x" config.option.report = "xfailed,skipped" assert getreportopt(config) == "xs" config.option.report = "skipped,xfailed" assert getreportopt(config) == "sx" config.option.report = "skipped" config.option.reportchars = "sf" assert getreportopt(config) == "sf" config.option.reportchars = "sfx" assert getreportopt(config) == "sfx" def test_terminalreporter_reportopt_addopts(testdir): testdir.makeini("[pytest]\naddopts=-rs") testdir.makepyfile(""" def pytest_funcarg__tr(request): tr = request.config.pluginmanager.getplugin("terminalreporter") return tr def test_opt(tr): assert tr.hasopt('skipped') assert not tr.hasopt('qwe') """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*1 passed*" ]) def test_tbstyle_short(testdir): p = testdir.makepyfile(""" def pytest_funcarg__arg(request): return 42 def test_opt(arg): x = 0 assert x """) result = testdir.runpytest("--tb=short") s = result.stdout.str() assert 'arg = 42' not in s assert 'x = 0' not in s result.stdout.fnmatch_lines([ "*%s:5*" % p.basename, ">*assert x", "E*assert*", ]) result = testdir.runpytest() s = result.stdout.str() assert 'x = 0' in s assert 'assert x' in s def test_traceconfig(testdir, monkeypatch): result = testdir.runpytest("--traceconfig") result.stdout.fnmatch_lines([ "*active plugins*" ]) assert result.ret == 0 class TestGenericReporting: """ this test class can be subclassed with a different option provider to run e.g. distributed tests. """ def test_collect_fail(self, testdir, option): testdir.makepyfile("import xyz\n") result = testdir.runpytest(*option.args) result.stdout.fnmatch_lines([ "> import xyz", "E ImportError: No module named *xyz*", "*1 error*", ]) def test_maxfailures(self, testdir, option): testdir.makepyfile(""" def test_1(): assert 0 def test_2(): assert 0 def test_3(): assert 0 """) result = testdir.runpytest("--maxfail=2", *option.args) result.stdout.fnmatch_lines([ "*def test_1():*", "*def test_2():*", "*!! Interrupted: stopping after 2 failures*!!*", "*2 failed*", ]) def test_tb_option(self, testdir, option): testdir.makepyfile(""" import pytest def g(): raise IndexError def test_func(): print (6*7) g() # --calling-- """) for tbopt in ["long", "short", "no"]: print('testing --tb=%s...' % tbopt) result = testdir.runpytest('--tb=%s' % tbopt) s = result.stdout.str() if tbopt == "long": assert 'print (6*7)' in s else: assert 'print (6*7)' not in s if tbopt != "no": assert '--calling--' in s assert 'IndexError' in s else: assert 'FAILURES' not in s assert '--calling--' not in s assert 'IndexError' not in s def test_tb_crashline(self, testdir, option): p = testdir.makepyfile(""" import pytest def g(): raise IndexError def test_func1(): print (6*7) g() # --calling-- def test_func2(): assert 0, "hello" """) result = testdir.runpytest("--tb=line") bn = p.basename result.stdout.fnmatch_lines([ "*%s:3: IndexError*" % bn, "*%s:8: AssertionError: hello*" % bn, ]) s = result.stdout.str() assert "def test_func2" not in s def test_pytest_report_header(self, testdir, option): testdir.makeconftest(""" def pytest_sessionstart(session): session.config._somevalue = 42 def pytest_report_header(config): return "hello: %s" % config._somevalue """) testdir.mkdir("a").join("conftest.py").write(""" def pytest_report_header(config, startdir): return ["line1", str(startdir)] """) result = testdir.runpytest("a") result.stdout.fnmatch_lines([ "*hello: 42*", "line1", str(testdir.tmpdir), ]) @pytest.mark.xfail("not hasattr(os, 'dup')") def test_fdopen_kept_alive_issue124(testdir): testdir.makepyfile(""" import os, sys k = [] def test_open_file_and_keep_alive(capfd): stdout = os.fdopen(1, 'w', 1) k.append(stdout) def test_close_kept_alive_file(): stdout = k.pop() stdout.close() """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*2 passed*" ]) def test_tbstyle_native_setup_error(testdir): testdir.makepyfile(""" import pytest @pytest.fixture def setup_error_fixture(): raise Exception("error in exception") def test_error_fixture(setup_error_fixture): pass """) result = testdir.runpytest("--tb=native") result.stdout.fnmatch_lines([ '*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*' ]) def test_terminal_summary(testdir): testdir.makeconftest(""" def pytest_terminal_summary(terminalreporter): w = terminalreporter w.section("hello") w.line("world") """) result = testdir.runpytest() result.stdout.fnmatch_lines(""" *==== hello ====* world """) pytest-2.5.1/testing/test_collection.py0000664000175000017500000005133412254002202017653 0ustar hpkhpk00000000000000import pytest, py from _pytest.main import Session class TestCollector: def test_collect_versus_item(self): from pytest import Collector, Item assert not issubclass(Collector, Item) assert not issubclass(Item, Collector) def test_compat_attributes(self, testdir, recwarn): modcol = testdir.getmodulecol(""" def test_pass(): pass def test_fail(): assert 0 """) recwarn.clear() assert modcol.Module == pytest.Module assert modcol.Class == pytest.Class assert modcol.Item == pytest.Item assert modcol.File == pytest.File assert modcol.Function == pytest.Function def test_check_equality(self, testdir): modcol = testdir.getmodulecol(""" def test_pass(): pass def test_fail(): assert 0 """) fn1 = testdir.collect_by_name(modcol, "test_pass") assert isinstance(fn1, pytest.Function) fn2 = testdir.collect_by_name(modcol, "test_pass") assert isinstance(fn2, pytest.Function) assert fn1 == fn2 assert fn1 != modcol if py.std.sys.version_info < (3, 0): assert cmp(fn1, fn2) == 0 assert hash(fn1) == hash(fn2) fn3 = testdir.collect_by_name(modcol, "test_fail") assert isinstance(fn3, pytest.Function) assert not (fn1 == fn3) assert fn1 != fn3 for fn in fn1,fn2,fn3: assert fn != 3 assert fn != modcol assert fn != [1,2,3] assert [1,2,3] != fn assert modcol != fn def test_getparent(self, testdir): modcol = testdir.getmodulecol(""" class TestClass: def test_foo(): pass """) cls = testdir.collect_by_name(modcol, "TestClass") fn = testdir.collect_by_name( testdir.collect_by_name(cls, "()"), "test_foo") parent = fn.getparent(pytest.Module) assert parent is modcol parent = fn.getparent(pytest.Function) assert parent is fn parent = fn.getparent(pytest.Class) assert parent is cls def test_getcustomfile_roundtrip(self, testdir): hello = testdir.makefile(".xxx", hello="world") testdir.makepyfile(conftest=""" import pytest class CustomFile(pytest.File): pass def pytest_collect_file(path, parent): if path.ext == ".xxx": return CustomFile(path, parent=parent) """) node = testdir.getpathnode(hello) assert isinstance(node, pytest.File) assert node.name == "hello.xxx" nodes = node.session.perform_collect([node.nodeid], genitems=False) assert len(nodes) == 1 assert isinstance(nodes[0], pytest.File) class TestCollectFS: def test_ignored_certain_directories(self, testdir): tmpdir = testdir.tmpdir tmpdir.ensure("_darcs", 'test_notfound.py') tmpdir.ensure("CVS", 'test_notfound.py') tmpdir.ensure("{arch}", 'test_notfound.py') tmpdir.ensure(".whatever", 'test_notfound.py') tmpdir.ensure(".bzr", 'test_notfound.py') tmpdir.ensure("normal", 'test_found.py') for x in tmpdir.visit("test_*.py"): x.write("def test_hello(): pass") result = testdir.runpytest("--collect-only") s = result.stdout.str() assert "test_notfound" not in s assert "test_found" in s def test_custom_norecursedirs(self, testdir): testdir.makeini(""" [pytest] norecursedirs = mydir xyz* """) tmpdir = testdir.tmpdir tmpdir.ensure("mydir", "test_hello.py").write("def test_1(): pass") tmpdir.ensure("xyz123", "test_2.py").write("def test_2(): 0/0") tmpdir.ensure("xy", "test_ok.py").write("def test_3(): pass") rec = testdir.inline_run() rec.assertoutcome(passed=1) rec = testdir.inline_run("xyz123/test_2.py") rec.assertoutcome(failed=1) class TestCollectPluginHookRelay: def test_pytest_collect_file(self, testdir): wascalled = [] class Plugin: def pytest_collect_file(self, path, parent): wascalled.append(path) testdir.makefile(".abc", "xyz") pytest.main([testdir.tmpdir], plugins=[Plugin()]) assert len(wascalled) == 1 assert wascalled[0].ext == '.abc' def test_pytest_collect_directory(self, testdir): wascalled = [] class Plugin: def pytest_collect_directory(self, path, parent): wascalled.append(path.basename) testdir.mkdir("hello") testdir.mkdir("world") pytest.main(testdir.tmpdir, plugins=[Plugin()]) assert "hello" in wascalled assert "world" in wascalled class TestPrunetraceback: def test_collection_error(self, testdir): p = testdir.makepyfile(""" import not_exists """) result = testdir.runpytest(p) assert "__import__" not in result.stdout.str(), "too long traceback" result.stdout.fnmatch_lines([ "*ERROR collecting*", "*mport*not_exists*" ]) def test_custom_repr_failure(self, testdir): p = testdir.makepyfile(""" import not_exists """) testdir.makeconftest(""" import pytest def pytest_collect_file(path, parent): return MyFile(path, parent) class MyError(Exception): pass class MyFile(pytest.File): def collect(self): raise MyError() def repr_failure(self, excinfo): if excinfo.errisinstance(MyError): return "hello world" return pytest.File.repr_failure(self, excinfo) """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*ERROR collecting*", "*hello world*", ]) @pytest.mark.xfail(reason="other mechanism for adding to reporting needed") def test_collect_report_postprocessing(self, testdir): p = testdir.makepyfile(""" import not_exists """) testdir.makeconftest(""" import pytest def pytest_make_collect_report(__multicall__): rep = __multicall__.execute() rep.headerlines += ["header1"] return rep """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*ERROR collecting*", "*header1*", ]) class TestCustomConftests: def test_ignore_collect_path(self, testdir): testdir.makeconftest(""" def pytest_ignore_collect(path, config): return path.basename.startswith("x") or \ path.basename == "test_one.py" """) sub = testdir.mkdir("xy123") sub.ensure("test_hello.py").write("syntax error") sub.join("conftest.py").write("syntax error") testdir.makepyfile("def test_hello(): pass") testdir.makepyfile(test_one="syntax error") result = testdir.runpytest("--fulltrace") assert result.ret == 0 result.stdout.fnmatch_lines(["*1 passed*"]) def test_ignore_collect_not_called_on_argument(self, testdir): testdir.makeconftest(""" def pytest_ignore_collect(path, config): return True """) p = testdir.makepyfile("def test_hello(): pass") result = testdir.runpytest(p) assert result.ret == 0 assert "1 passed" in result.stdout.str() result = testdir.runpytest() assert result.ret == 0 assert "1 passed" not in result.stdout.str() def test_collectignore_exclude_on_option(self, testdir): testdir.makeconftest(""" collect_ignore = ['hello', 'test_world.py'] def pytest_addoption(parser): parser.addoption("--XX", action="store_true", default=False) def pytest_configure(config): if config.getvalue("XX"): collect_ignore[:] = [] """) testdir.mkdir("hello") testdir.makepyfile(test_world="def test_hello(): pass") result = testdir.runpytest() assert result.ret == 0 assert "passed" not in result.stdout.str() result = testdir.runpytest("--XX") assert result.ret == 0 assert "passed" in result.stdout.str() def test_pytest_fs_collect_hooks_are_seen(self, testdir): testdir.makeconftest(""" import pytest class MyModule(pytest.Module): pass def pytest_collect_file(path, parent): if path.ext == ".py": return MyModule(path, parent) """) testdir.mkdir("sub") testdir.makepyfile("def test_x(): pass") result = testdir.runpytest("--collect-only") result.stdout.fnmatch_lines([ "*MyModule*", "*test_x*" ]) def test_pytest_collect_file_from_sister_dir(self, testdir): sub1 = testdir.mkpydir("sub1") sub2 = testdir.mkpydir("sub2") conf1 = testdir.makeconftest(""" import pytest class MyModule1(pytest.Module): pass def pytest_collect_file(path, parent): if path.ext == ".py": return MyModule1(path, parent) """) conf1.move(sub1.join(conf1.basename)) conf2 = testdir.makeconftest(""" import pytest class MyModule2(pytest.Module): pass def pytest_collect_file(path, parent): if path.ext == ".py": return MyModule2(path, parent) """) conf2.move(sub2.join(conf2.basename)) p = testdir.makepyfile("def test_x(): pass") p.copy(sub1.join(p.basename)) p.copy(sub2.join(p.basename)) result = testdir.runpytest("--collect-only") result.stdout.fnmatch_lines([ "*MyModule1*", "*MyModule2*", "*test_x*" ]) class TestSession: def test_parsearg(self, testdir): p = testdir.makepyfile("def test_func(): pass") subdir = testdir.mkdir("sub") subdir.ensure("__init__.py") target = subdir.join(p.basename) p.move(target) testdir.chdir() subdir.chdir() config = testdir.parseconfig(p.basename) rcol = Session(config=config) assert rcol.fspath == subdir parts = rcol._parsearg(p.basename) assert parts[0] == target assert len(parts) == 1 parts = rcol._parsearg(p.basename + "::test_func") assert parts[0] == target assert parts[1] == "test_func" assert len(parts) == 2 def test_collect_topdir(self, testdir): p = testdir.makepyfile("def test_func(): pass") id = "::".join([p.basename, "test_func"]) # XXX migrate to inline_genitems? (see below) config = testdir.parseconfig(id) topdir = testdir.tmpdir rcol = Session(config) assert topdir == rcol.fspath #rootid = rcol.nodeid #root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0] #assert root2 == rcol, rootid colitems = rcol.perform_collect([rcol.nodeid], genitems=False) assert len(colitems) == 1 assert colitems[0].fspath == p def test_collect_protocol_single_function(self, testdir): p = testdir.makepyfile("def test_func(): pass") id = "::".join([p.basename, "test_func"]) items, hookrec = testdir.inline_genitems(id) item, = items assert item.name == "test_func" newid = item.nodeid assert newid == id py.std.pprint.pprint(hookrec.hookrecorder.calls) topdir = testdir.tmpdir # noqa hookrec.hookrecorder.contains([ ("pytest_collectstart", "collector.fspath == topdir"), ("pytest_make_collect_report", "collector.fspath == topdir"), ("pytest_collectstart", "collector.fspath == p"), ("pytest_make_collect_report", "collector.fspath == p"), ("pytest_pycollect_makeitem", "name == 'test_func'"), ("pytest_collectreport", "report.nodeid.startswith(p.basename)"), ("pytest_collectreport", "report.nodeid == '.'") ]) def test_collect_protocol_method(self, testdir): p = testdir.makepyfile(""" class TestClass: def test_method(self): pass """) normid = p.basename + "::TestClass::()::test_method" for id in [p.basename, p.basename + "::TestClass", p.basename + "::TestClass::()", normid, ]: items, hookrec = testdir.inline_genitems(id) assert len(items) == 1 assert items[0].name == "test_method" newid = items[0].nodeid assert newid == normid def test_collect_custom_nodes_multi_id(self, testdir): p = testdir.makepyfile("def test_func(): pass") testdir.makeconftest(""" import pytest class SpecialItem(pytest.Item): def runtest(self): return # ok class SpecialFile(pytest.File): def collect(self): return [SpecialItem(name="check", parent=self)] def pytest_collect_file(path, parent): if path.basename == %r: return SpecialFile(fspath=path, parent=parent) """ % p.basename) id = p.basename items, hookrec = testdir.inline_genitems(id) py.std.pprint.pprint(hookrec.hookrecorder.calls) assert len(items) == 2 hookrec.hookrecorder.contains([ ("pytest_collectstart", "collector.fspath == collector.session.fspath"), ("pytest_collectstart", "collector.__class__.__name__ == 'SpecialFile'"), ("pytest_collectstart", "collector.__class__.__name__ == 'Module'"), ("pytest_pycollect_makeitem", "name == 'test_func'"), ("pytest_collectreport", "report.nodeid.startswith(p.basename)"), #("pytest_collectreport", # "report.fspath == %r" % str(rcol.fspath)), ]) def test_collect_subdir_event_ordering(self, testdir): p = testdir.makepyfile("def test_func(): pass") aaa = testdir.mkpydir("aaa") test_aaa = aaa.join("test_aaa.py") p.move(test_aaa) items, hookrec = testdir.inline_genitems() assert len(items) == 1 py.std.pprint.pprint(hookrec.hookrecorder.calls) hookrec.hookrecorder.contains([ ("pytest_collectstart", "collector.fspath == test_aaa"), ("pytest_pycollect_makeitem", "name == 'test_func'"), ("pytest_collectreport", "report.nodeid.startswith('aaa/test_aaa.py')"), ]) def test_collect_two_commandline_args(self, testdir): p = testdir.makepyfile("def test_func(): pass") aaa = testdir.mkpydir("aaa") bbb = testdir.mkpydir("bbb") test_aaa = aaa.join("test_aaa.py") p.copy(test_aaa) test_bbb = bbb.join("test_bbb.py") p.move(test_bbb) id = "." items, hookrec = testdir.inline_genitems(id) assert len(items) == 2 py.std.pprint.pprint(hookrec.hookrecorder.calls) hookrec.hookrecorder.contains([ ("pytest_collectstart", "collector.fspath == test_aaa"), ("pytest_pycollect_makeitem", "name == 'test_func'"), ("pytest_collectreport", "report.nodeid == 'aaa/test_aaa.py'"), ("pytest_collectstart", "collector.fspath == test_bbb"), ("pytest_pycollect_makeitem", "name == 'test_func'"), ("pytest_collectreport", "report.nodeid == 'bbb/test_bbb.py'"), ]) def test_serialization_byid(self, testdir): testdir.makepyfile("def test_func(): pass") items, hookrec = testdir.inline_genitems() assert len(items) == 1 item, = items items2, hookrec = testdir.inline_genitems(item.nodeid) item2, = items2 assert item2.name == item.name assert item2.fspath == item.fspath def test_find_byid_without_instance_parents(self, testdir): p = testdir.makepyfile(""" class TestClass: def test_method(self): pass """) arg = p.basename + ("::TestClass::test_method") items, hookrec = testdir.inline_genitems(arg) assert len(items) == 1 item, = items assert item.nodeid.endswith("TestClass::()::test_method") class Test_getinitialnodes: def test_global_file(self, testdir, tmpdir): x = tmpdir.ensure("x.py") config = testdir.parseconfigure(x) col = testdir.getnode(config, x) assert isinstance(col, pytest.Module) assert col.name == 'x.py' assert col.parent.name == testdir.tmpdir.basename assert col.parent.parent is None for col in col.listchain(): assert col.config is config def test_pkgfile(self, testdir): testdir.chdir() tmpdir = testdir.tmpdir subdir = tmpdir.join("subdir") x = subdir.ensure("x.py") subdir.ensure("__init__.py") config = testdir.parseconfigure(x) col = testdir.getnode(config, x) assert isinstance(col, pytest.Module) assert col.name == 'subdir/x.py' assert col.parent.parent is None for col in col.listchain(): assert col.config is config class Test_genitems: def test_check_collect_hashes(self, testdir): p = testdir.makepyfile(""" def test_1(): pass def test_2(): pass """) p.copy(p.dirpath(p.purebasename + "2" + ".py")) items, reprec = testdir.inline_genitems(p.dirpath()) assert len(items) == 4 for numi, i in enumerate(items): for numj, j in enumerate(items): if numj != numi: assert hash(i) != hash(j) assert i != j def test_example_items1(self, testdir): p = testdir.makepyfile(''' def testone(): pass class TestX: def testmethod_one(self): pass class TestY(TestX): pass ''') items, reprec = testdir.inline_genitems(p) assert len(items) == 3 assert items[0].name == 'testone' assert items[1].name == 'testmethod_one' assert items[2].name == 'testmethod_one' # let's also test getmodpath here assert items[0].getmodpath() == "testone" assert items[1].getmodpath() == "TestX.testmethod_one" assert items[2].getmodpath() == "TestY.testmethod_one" s = items[0].getmodpath(stopatmodule=False) assert s.endswith("test_example_items1.testone") print(s) def test_matchnodes_two_collections_same_file(testdir): testdir.makeconftest(""" import pytest def pytest_configure(config): config.pluginmanager.register(Plugin2()) class Plugin2: def pytest_collect_file(self, path, parent): if path.ext == ".abc": return MyFile2(path, parent) def pytest_collect_file(path, parent): if path.ext == ".abc": return MyFile1(path, parent) class MyFile1(pytest.Item, pytest.File): def runtest(self): pass class MyFile2(pytest.File): def collect(self): return [Item2("hello", parent=self)] class Item2(pytest.Item): def runtest(self): pass """) p = testdir.makefile(".abc", "") result = testdir.runpytest() assert result.ret == 0 result.stdout.fnmatch_lines([ "*2 passed*", ]) res = testdir.runpytest("%s::hello" % p.basename) res.stdout.fnmatch_lines([ "*1 passed*", ]) class TestNodekeywords: def test_no_under(self, testdir): modcol = testdir.getmodulecol(""" def test_pass(): pass def test_fail(): assert 0 """) l = list(modcol.keywords) assert modcol.name in l for x in l: assert not x.startswith("_") assert modcol.name in repr(modcol.keywords) def test_issue345(self, testdir): testdir.makepyfile(""" def test_should_not_be_selected(): assert False, 'I should not have been selected to run' def test___repr__(): pass """) reprec = testdir.inline_run("-k repr") reprec.assertoutcome(passed=1, failed=0) pytest-2.5.1/testing/test_runner.py0000664000175000017500000004132412254002202017027 0ustar hpkhpk00000000000000from __future__ import with_statement import pytest, py, sys, os from _pytest import runner, main class TestSetupState: def test_setup(self, testdir): ss = runner.SetupState() item = testdir.getitem("def test_func(): pass") l = [1] ss.prepare(item) ss.addfinalizer(l.pop, colitem=item) assert l ss._pop_and_teardown() assert not l def test_setup_scope_None(self, testdir): item = testdir.getitem("def test_func(): pass") ss = runner.SetupState() l = [1] ss.prepare(item) ss.addfinalizer(l.pop, colitem=None) assert l ss._pop_and_teardown() assert l ss._pop_and_teardown() assert l ss.teardown_all() assert not l def test_teardown_exact_stack_empty(self, testdir): item = testdir.getitem("def test_func(): pass") ss = runner.SetupState() ss.teardown_exact(item, None) ss.teardown_exact(item, None) ss.teardown_exact(item, None) def test_setup_fails_and_failure_is_cached(self, testdir): item = testdir.getitem(""" def setup_module(mod): raise ValueError(42) def test_func(): pass """) # noqa ss = runner.SetupState() pytest.raises(ValueError, lambda: ss.prepare(item)) pytest.raises(ValueError, lambda: ss.prepare(item)) def test_teardown_multiple_one_fails(self, testdir): r = [] def fin1(): r.append('fin1') def fin2(): raise Exception('oops') def fin3(): r.append('fin3') item = testdir.getitem("def test_func(): pass") ss = runner.SetupState() ss.addfinalizer(fin1, item) ss.addfinalizer(fin2, item) ss.addfinalizer(fin3, item) with pytest.raises(Exception) as err: ss._callfinalizers(item) assert err.value.args == ('oops',) assert r == ['fin3', 'fin1'] def test_teardown_multiple_fail(self, testdir): # Ensure the first exception is the one which is re-raised. # Ideally both would be reported however. def fin1(): raise Exception('oops1') def fin2(): raise Exception('oops2') item = testdir.getitem("def test_func(): pass") ss = runner.SetupState() ss.addfinalizer(fin1, item) ss.addfinalizer(fin2, item) with pytest.raises(Exception) as err: ss._callfinalizers(item) assert err.value.args == ('oops2',) class BaseFunctionalTests: def test_passfunction(self, testdir): reports = testdir.runitem(""" def test_func(): pass """) rep = reports[1] assert rep.passed assert not rep.failed assert rep.outcome == "passed" assert not rep.longrepr def test_failfunction(self, testdir): reports = testdir.runitem(""" def test_func(): assert 0 """) rep = reports[1] assert not rep.passed assert not rep.skipped assert rep.failed assert rep.when == "call" assert rep.outcome == "failed" #assert isinstance(rep.longrepr, ReprExceptionInfo) def test_skipfunction(self, testdir): reports = testdir.runitem(""" import pytest def test_func(): pytest.skip("hello") """) rep = reports[1] assert not rep.failed assert not rep.passed assert rep.skipped assert rep.outcome == "skipped" #assert rep.skipped.when == "call" #assert rep.skipped.when == "call" #assert rep.skipped == "%sreason == "hello" #assert rep.skipped.location.lineno == 3 #assert rep.skipped.location.path #assert not rep.skipped.failurerepr def test_skip_in_setup_function(self, testdir): reports = testdir.runitem(""" import pytest def setup_function(func): pytest.skip("hello") def test_func(): pass """) print(reports) rep = reports[0] assert not rep.failed assert not rep.passed assert rep.skipped #assert rep.skipped.reason == "hello" #assert rep.skipped.location.lineno == 3 #assert rep.skipped.location.lineno == 3 assert len(reports) == 2 assert reports[1].passed # teardown def test_failure_in_setup_function(self, testdir): reports = testdir.runitem(""" import pytest def setup_function(func): raise ValueError(42) def test_func(): pass """) rep = reports[0] assert not rep.skipped assert not rep.passed assert rep.failed assert rep.when == "setup" assert len(reports) == 2 def test_failure_in_teardown_function(self, testdir): reports = testdir.runitem(""" import pytest def teardown_function(func): raise ValueError(42) def test_func(): pass """) print(reports) assert len(reports) == 3 rep = reports[2] assert not rep.skipped assert not rep.passed assert rep.failed assert rep.when == "teardown" #assert rep.longrepr.reprcrash.lineno == 3 #assert rep.longrepr.reprtraceback.reprentries def test_custom_failure_repr(self, testdir): testdir.makepyfile(conftest=""" import pytest class Function(pytest.Function): def repr_failure(self, excinfo): return "hello" """) reports = testdir.runitem(""" import pytest def test_func(): assert 0 """) rep = reports[1] assert not rep.skipped assert not rep.passed assert rep.failed #assert rep.outcome.when == "call" #assert rep.failed.where.lineno == 3 #assert rep.failed.where.path.basename == "test_func.py" #assert rep.failed.failurerepr == "hello" def test_teardown_final_returncode(self, testdir): rec = testdir.inline_runsource(""" def test_func(): pass def teardown_function(func): raise ValueError(42) """) assert rec.ret == 1 def test_exact_teardown_issue90(self, testdir): rec = testdir.inline_runsource(""" import pytest class TestClass: def test_method(self): pass def teardown_class(cls): raise Exception() def test_func(): import sys # on python2 exc_info is keept till a function exits # so we would end up calling test functions while # sys.exc_info would return the indexerror # from guessing the lastitem excinfo = sys.exc_info() import traceback assert excinfo[0] is None, \ traceback.format_exception(*excinfo) def teardown_function(func): raise ValueError(42) """) reps = rec.getreports("pytest_runtest_logreport") print (reps) for i in range(2): assert reps[i].nodeid.endswith("test_method") assert reps[i].passed assert reps[2].when == "teardown" assert reps[2].failed assert len(reps) == 6 for i in range(3,5): assert reps[i].nodeid.endswith("test_func") assert reps[i].passed assert reps[5].when == "teardown" assert reps[5].nodeid.endswith("test_func") assert reps[5].failed def test_failure_in_setup_function_ignores_custom_repr(self, testdir): testdir.makepyfile(conftest=""" import pytest class Function(pytest.Function): def repr_failure(self, excinfo): assert 0 """) reports = testdir.runitem(""" def setup_function(func): raise ValueError(42) def test_func(): pass """) assert len(reports) == 2 rep = reports[0] print(rep) assert not rep.skipped assert not rep.passed assert rep.failed #assert rep.outcome.when == "setup" #assert rep.outcome.where.lineno == 3 #assert rep.outcome.where.path.basename == "test_func.py" #assert instanace(rep.failed.failurerepr, PythonFailureRepr) def test_systemexit_does_not_bail_out(self, testdir): try: reports = testdir.runitem(""" def test_func(): raise SystemExit(42) """) except SystemExit: py.test.fail("runner did not catch SystemExit") rep = reports[1] assert rep.failed assert rep.when == "call" def test_exit_propagates(self, testdir): try: testdir.runitem(""" import pytest def test_func(): raise pytest.exit.Exception() """) except py.test.exit.Exception: pass else: py.test.fail("did not raise") class TestExecutionNonForked(BaseFunctionalTests): def getrunner(self): def f(item): return runner.runtestprotocol(item, log=False) return f def test_keyboardinterrupt_propagates(self, testdir): try: testdir.runitem(""" def test_func(): raise KeyboardInterrupt("fake") """) except KeyboardInterrupt: pass else: py.test.fail("did not raise") class TestExecutionForked(BaseFunctionalTests): pytestmark = pytest.mark.skipif("not hasattr(os, 'fork')") def getrunner(self): # XXX re-arrange this test to live in pytest-xdist xplugin = py.test.importorskip("xdist.plugin") return xplugin.forked_run_report def test_suicide(self, testdir): reports = testdir.runitem(""" def test_func(): import os os.kill(os.getpid(), 15) """) rep = reports[0] assert rep.failed assert rep.when == "???" class TestSessionReports: def test_collect_result(self, testdir): col = testdir.getmodulecol(""" def test_func1(): pass class TestClass: pass """) rep = runner.collect_one_node(col) assert not rep.failed assert not rep.skipped assert rep.passed locinfo = rep.location assert locinfo[0] == col.fspath.basename assert not locinfo[1] assert locinfo[2] == col.fspath.basename res = rep.result assert len(res) == 2 assert res[0].name == "test_func1" assert res[1].name == "TestClass" def test_skip_at_module_scope(self, testdir): col = testdir.getmodulecol(""" import pytest pytest.skip("hello") def test_func(): pass """) rep = main.collect_one_node(col) assert not rep.failed assert not rep.passed assert rep.skipped reporttypes = [ runner.BaseReport, runner.TestReport, runner.TeardownErrorReport, runner.CollectReport, ] @pytest.mark.parametrize('reporttype', reporttypes, ids=[x.__name__ for x in reporttypes]) def test_report_extra_parameters(reporttype): args = py.std.inspect.getargspec(reporttype.__init__)[0][1:] basekw = dict.fromkeys(args, []) report = reporttype(newthing=1, **basekw) assert report.newthing == 1 def test_callinfo(): ci = runner.CallInfo(lambda: 0, '123') assert ci.when == "123" assert ci.result == 0 assert "result" in repr(ci) ci = runner.CallInfo(lambda: 0/0, '123') assert ci.when == "123" assert not hasattr(ci, 'result') assert ci.excinfo assert "exc" in repr(ci) # design question: do we want general hooks in python files? # then something like the following functional tests makes sense @pytest.mark.xfail def test_runtest_in_module_ordering(testdir): p1 = testdir.makepyfile(""" def pytest_runtest_setup(item): # runs after class-level! item.function.mylist.append("module") class TestClass: def pytest_runtest_setup(self, item): assert not hasattr(item.function, 'mylist') item.function.mylist = ['class'] def pytest_funcarg__mylist(self, request): return request.function.mylist def pytest_runtest_call(self, item, __multicall__): try: __multicall__.execute() except ValueError: pass def test_hello1(self, mylist): assert mylist == ['class', 'module'], mylist raise ValueError() def test_hello2(self, mylist): assert mylist == ['class', 'module'], mylist def pytest_runtest_teardown(item): del item.function.mylist """) result = testdir.runpytest(p1) result.stdout.fnmatch_lines([ "*2 passed*" ]) def test_outcomeexception_exceptionattributes(): outcome = runner.OutcomeException('test') assert outcome.args[0] == outcome.msg def test_pytest_exit(): try: py.test.exit("hello") except py.test.exit.Exception: excinfo = py.code.ExceptionInfo() assert excinfo.errisinstance(KeyboardInterrupt) def test_pytest_fail(): try: py.test.fail("hello") except py.test.fail.Exception: excinfo = py.code.ExceptionInfo() s = excinfo.exconly(tryshort=True) assert s.startswith("Failed") def test_pytest_fail_notrace(testdir): testdir.makepyfile(""" import pytest def test_hello(): pytest.fail("hello", pytrace=False) def teardown_function(function): pytest.fail("world", pytrace=False) """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "world", "hello", ]) assert 'def teardown_function' not in result.stdout.str() def test_exception_printing_skip(): try: pytest.skip("hello") except pytest.skip.Exception: excinfo = py.code.ExceptionInfo() s = excinfo.exconly(tryshort=True) assert s.startswith("Skipped") def test_importorskip(): importorskip = py.test.importorskip def f(): importorskip("asdlkj") try: sys = importorskip("sys") # noqa assert sys == py.std.sys #path = py.test.importorskip("os.path") #assert path == py.std.os.path excinfo = pytest.raises(pytest.skip.Exception, f) path = py.path.local(excinfo.getrepr().reprcrash.path) # check that importorskip reports the actual call # in this test the test_runner.py file assert path.purebasename == "test_runner" pytest.raises(SyntaxError, "py.test.importorskip('x y z')") pytest.raises(SyntaxError, "py.test.importorskip('x=y')") mod = py.std.types.ModuleType("hello123") mod.__version__ = "1.3" sys.modules["hello123"] = mod pytest.raises(pytest.skip.Exception, """ py.test.importorskip("hello123", minversion="1.3.1") """) mod2 = pytest.importorskip("hello123", minversion="1.3") assert mod2 == mod except pytest.skip.Exception: print(py.code.ExceptionInfo()) py.test.fail("spurious skip") def test_importorskip_imports_last_module_part(): ospath = py.test.importorskip("os.path") assert os.path == ospath def test_pytest_cmdline_main(testdir): p = testdir.makepyfile(""" import py def test_hello(): assert 1 if __name__ == '__main__': py.test.cmdline.main([__file__]) """) import subprocess popen = subprocess.Popen([sys.executable, str(p)], stdout=subprocess.PIPE) popen.communicate() ret = popen.wait() assert ret == 0 def test_unicode_in_longrepr(testdir): testdir.makeconftest(""" import py def pytest_runtest_makereport(__multicall__): rep = __multicall__.execute() if rep.when == "call": rep.longrepr = py.builtin._totext("\\xc3\\xa4", "utf8") return rep """) testdir.makepyfile(""" def test_out(): assert 0 """) result = testdir.runpytest() assert result.ret == 1 assert "UnicodeEncodeError" not in result.stderr.str() def test_failure_in_setup(testdir): testdir.makepyfile(""" def setup_module(): 0/0 def test_func(): pass """) result = testdir.runpytest("--tb=line") assert "def setup_module" not in result.stdout.str() pytest-2.5.1/testing/test_capture.py0000664000175000017500000003733112254002202017164 0ustar hpkhpk00000000000000import pytest, py, os, sys from _pytest.capture import CaptureManager needsosdup = pytest.mark.xfail("not hasattr(os, 'dup')") class TestCaptureManager: def test_getmethod_default_no_fd(self, testdir, monkeypatch): config = testdir.parseconfig(testdir.tmpdir) assert config.getvalue("capture") is None capman = CaptureManager() monkeypatch.delattr(os, 'dup', raising=False) try: assert capman._getmethod(config, None) == "sys" finally: monkeypatch.undo() @pytest.mark.parametrize("mode", "no fd sys".split()) def test_configure_per_fspath(self, testdir, mode): config = testdir.parseconfig(testdir.tmpdir) capman = CaptureManager() hasfd = hasattr(os, 'dup') if hasfd: assert capman._getmethod(config, None) == "fd" else: assert capman._getmethod(config, None) == "sys" if not hasfd and mode == 'fd': return sub = testdir.tmpdir.mkdir("dir" + mode) sub.ensure("__init__.py") sub.join("conftest.py").write('option_capture = %r' % mode) assert capman._getmethod(config, sub.join("test_hello.py")) == mode @needsosdup @pytest.mark.parametrize("method", ['no', 'fd', 'sys']) def test_capturing_basic_api(self, method): capouter = py.io.StdCaptureFD() old = sys.stdout, sys.stderr, sys.stdin try: capman = CaptureManager() # call suspend without resume or start outerr = capman.suspendcapture() outerr = capman.suspendcapture() assert outerr == ("", "") capman.resumecapture(method) print ("hello") out, err = capman.suspendcapture() if method == "no": assert old == (sys.stdout, sys.stderr, sys.stdin) else: assert out == "hello\n" capman.resumecapture(method) out, err = capman.suspendcapture() assert not out and not err capman.reset_capturings() finally: capouter.reset() @needsosdup def test_juggle_capturings(self, testdir): capouter = py.io.StdCaptureFD() try: #config = testdir.parseconfig(testdir.tmpdir) capman = CaptureManager() try: capman.resumecapture("fd") pytest.raises(ValueError, 'capman.resumecapture("fd")') pytest.raises(ValueError, 'capman.resumecapture("sys")') os.write(1, "hello\n".encode('ascii')) out, err = capman.suspendcapture() assert out == "hello\n" capman.resumecapture("sys") os.write(1, "hello\n".encode('ascii')) py.builtin.print_("world", file=sys.stderr) out, err = capman.suspendcapture() assert not out assert err == "world\n" finally: capman.reset_capturings() finally: capouter.reset() @pytest.mark.xfail("hasattr(sys, 'pypy_version_info')") @pytest.mark.parametrize("method", ['fd', 'sys']) def test_capturing_unicode(testdir, method): if sys.version_info >= (3,0): obj = "'b\u00f6y'" else: obj = "u'\u00f6y'" testdir.makepyfile(""" # coding=utf8 # taken from issue 227 from nosetests def test_unicode(): import sys print (sys.stdout) print (%s) """ % obj) result = testdir.runpytest("--capture=%s" % method) result.stdout.fnmatch_lines([ "*1 passed*" ]) @pytest.mark.parametrize("method", ['fd', 'sys']) def test_capturing_bytes_in_utf8_encoding(testdir, method): testdir.makepyfile(""" def test_unicode(): print ('b\\u00f6y') """) result = testdir.runpytest("--capture=%s" % method) result.stdout.fnmatch_lines([ "*1 passed*" ]) def test_collect_capturing(testdir): p = testdir.makepyfile(""" print ("collect %s failure" % 13) import xyz42123 """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*Captured stdout*", "*collect 13 failure*", ]) class TestPerTestCapturing: def test_capture_and_fixtures(self, testdir): p = testdir.makepyfile(""" def setup_module(mod): print ("setup module") def setup_function(function): print ("setup " + function.__name__) def test_func1(): print ("in func1") assert 0 def test_func2(): print ("in func2") assert 0 """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "setup module*", "setup test_func1*", "in func1*", "setup test_func2*", "in func2*", ]) @pytest.mark.xfail def test_capture_scope_cache(self, testdir): p = testdir.makepyfile(""" import sys def setup_module(func): print ("module-setup") def setup_function(func): print ("function-setup") def test_func(): print ("in function") assert 0 def teardown_function(func): print ("in teardown") """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*test_func():*", "*Captured stdout during setup*", "module-setup*", "function-setup*", "*Captured stdout*", "in teardown*", ]) def test_no_carry_over(self, testdir): p = testdir.makepyfile(""" def test_func1(): print ("in func1") def test_func2(): print ("in func2") assert 0 """) result = testdir.runpytest(p) s = result.stdout.str() assert "in func1" not in s assert "in func2" in s def test_teardown_capturing(self, testdir): p = testdir.makepyfile(""" def setup_function(function): print ("setup func1") def teardown_function(function): print ("teardown func1") assert 0 def test_func1(): print ("in func1") pass """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ '*teardown_function*', '*Captured stdout*', "setup func1*", "in func1*", "teardown func1*", #"*1 fixture failure*" ]) def test_teardown_capturing_final(self, testdir): p = testdir.makepyfile(""" def teardown_module(mod): print ("teardown module") assert 0 def test_func(): pass """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*def teardown_module(mod):*", "*Captured stdout*", "*teardown module*", "*1 error*", ]) def test_capturing_outerr(self, testdir): p1 = testdir.makepyfile(""" import sys def test_capturing(): print (42) sys.stderr.write(str(23)) def test_capturing_error(): print (1) sys.stderr.write(str(2)) raise ValueError """) result = testdir.runpytest(p1) result.stdout.fnmatch_lines([ "*test_capturing_outerr.py .F", "====* FAILURES *====", "____*____", "*test_capturing_outerr.py:8: ValueError", "*--- Captured stdout ---*", "1", "*--- Captured stderr ---*", "2", ]) class TestLoggingInteraction: def test_logging_stream_ownership(self, testdir): p = testdir.makepyfile(""" def test_logging(): import logging import pytest stream = py.io.TextIO() logging.basicConfig(stream=stream) stream.close() # to free memory/release resources """) result = testdir.runpytest(p) result.stderr.str().find("atexit") == -1 def test_logging_and_immediate_setupteardown(self, testdir): p = testdir.makepyfile(""" import logging def setup_function(function): logging.warn("hello1") def test_logging(): logging.warn("hello2") assert 0 def teardown_function(function): logging.warn("hello3") assert 0 """) for optargs in (('--capture=sys',), ('--capture=fd',)): print (optargs) result = testdir.runpytest(p, *optargs) s = result.stdout.str() result.stdout.fnmatch_lines([ "*WARN*hello3", # errors show first! "*WARN*hello1", "*WARN*hello2", ]) # verify proper termination assert "closed" not in s def test_logging_and_crossscope_fixtures(self, testdir): p = testdir.makepyfile(""" import logging def setup_module(function): logging.warn("hello1") def test_logging(): logging.warn("hello2") assert 0 def teardown_module(function): logging.warn("hello3") assert 0 """) for optargs in (('--capture=sys',), ('--capture=fd',)): print (optargs) result = testdir.runpytest(p, *optargs) s = result.stdout.str() result.stdout.fnmatch_lines([ "*WARN*hello3", # errors come first "*WARN*hello1", "*WARN*hello2", ]) # verify proper termination assert "closed" not in s def test_logging_initialized_in_test(self, testdir): p = testdir.makepyfile(""" import sys def test_something(): # pytest does not import logging assert 'logging' not in sys.modules import logging logging.basicConfig() logging.warn("hello432") assert 0 """) result = testdir.runpytest(p, "--traceconfig", "-p", "no:capturelog") assert result.ret != 0 result.stdout.fnmatch_lines([ "*hello432*", ]) assert 'operation on closed file' not in result.stderr.str() def test_conftestlogging_is_shown(self, testdir): testdir.makeconftest(""" import logging logging.basicConfig() logging.warn("hello435") """) # make sure that logging is still captured in tests result = testdir.runpytest("-s", "-p", "no:capturelog") assert result.ret == 0 result.stderr.fnmatch_lines([ "WARNING*hello435*", ]) assert 'operation on closed file' not in result.stderr.str() def test_conftestlogging_and_test_logging(self, testdir): testdir.makeconftest(""" import logging logging.basicConfig() """) # make sure that logging is still captured in tests p = testdir.makepyfile(""" def test_hello(): import logging logging.warn("hello433") assert 0 """) result = testdir.runpytest(p, "-p", "no:capturelog") assert result.ret != 0 result.stdout.fnmatch_lines([ "WARNING*hello433*", ]) assert 'something' not in result.stderr.str() assert 'operation on closed file' not in result.stderr.str() class TestCaptureFixture: def test_std_functional(self, testdir): reprec = testdir.inline_runsource(""" def test_hello(capsys): print (42) out, err = capsys.readouterr() assert out.startswith("42") """) reprec.assertoutcome(passed=1) def test_capsyscapfd(self, testdir): p = testdir.makepyfile(""" def test_one(capsys, capfd): pass def test_two(capfd, capsys): pass """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*ERROR*setup*test_one*", "*capsys*capfd*same*time*", "*ERROR*setup*test_two*", "*capsys*capfd*same*time*", "*2 error*"]) @pytest.mark.parametrize("method", ["sys", "fd"]) def test_capture_is_represented_on_failure_issue128(self, testdir, method): p = testdir.makepyfile(""" def test_hello(cap%s): print ("xxx42xxx") assert 0 """ % method) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "xxx42xxx", ]) @needsosdup def test_stdfd_functional(self, testdir): reprec = testdir.inline_runsource(""" def test_hello(capfd): import os os.write(1, "42".encode('ascii')) out, err = capfd.readouterr() assert out.startswith("42") capfd.close() """) reprec.assertoutcome(passed=1) def test_partial_setup_failure(self, testdir): p = testdir.makepyfile(""" def test_hello(capsys, missingarg): pass """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*test_partial_setup_failure*", "*1 error*", ]) @needsosdup def test_keyboardinterrupt_disables_capturing(self, testdir): p = testdir.makepyfile(""" def test_hello(capfd): import os os.write(1, str(42).encode('ascii')) raise KeyboardInterrupt() """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*KeyboardInterrupt*" ]) assert result.ret == 2 @pytest.mark.issue14 def test_capture_and_logging(self, testdir): p = testdir.makepyfile(""" import logging def test_log(capsys): logging.error('x') """) result = testdir.runpytest(p) assert 'closed' not in result.stderr.str() def test_setup_failure_does_not_kill_capturing(testdir): sub1 = testdir.mkpydir("sub1") sub1.join("conftest.py").write(py.code.Source(""" def pytest_runtest_setup(item): raise ValueError(42) """)) sub1.join("test_mod.py").write("def test_func1(): pass") result = testdir.runpytest(testdir.tmpdir, '--traceconfig') result.stdout.fnmatch_lines([ "*ValueError(42)*", "*1 error*" ]) def test_fdfuncarg_skips_on_no_osdup(testdir): testdir.makepyfile(""" import os if hasattr(os, 'dup'): del os.dup def test_hello(capfd): pass """) result = testdir.runpytest("--capture=no") result.stdout.fnmatch_lines([ "*1 skipped*" ]) def test_capture_conftest_runtest_setup(testdir): testdir.makeconftest(""" def pytest_runtest_setup(): print ("hello19") """) testdir.makepyfile("def test_func(): pass") result = testdir.runpytest() assert result.ret == 0 assert 'hello19' not in result.stdout.str() def test_capture_early_option_parsing(testdir): testdir.makeconftest(""" def pytest_runtest_setup(): print ("hello19") """) testdir.makepyfile("def test_func(): pass") result = testdir.runpytest("-vs") assert result.ret == 0 assert 'hello19' in result.stdout.str() pytest-2.5.1/testing/test_recwarn.py0000664000175000017500000000472312254002202017161 0ustar hpkhpk00000000000000import py, pytest from _pytest.recwarn import WarningsRecorder def test_WarningRecorder(recwarn): showwarning = py.std.warnings.showwarning rec = WarningsRecorder() assert py.std.warnings.showwarning != showwarning assert not rec.list py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13) assert len(rec.list) == 1 py.std.warnings.warn(DeprecationWarning("hello")) assert len(rec.list) == 2 warn = rec.pop() assert str(warn.message) == "hello" l = rec.list rec.clear() assert len(rec.list) == 0 assert l is rec.list pytest.raises(AssertionError, "rec.pop()") rec.finalize() assert showwarning == py.std.warnings.showwarning def test_recwarn_functional(testdir): reprec = testdir.inline_runsource(""" pytest_plugins = 'pytest_recwarn', import warnings oldwarn = warnings.showwarning def test_method(recwarn): assert warnings.showwarning != oldwarn warnings.warn("hello") warn = recwarn.pop() assert isinstance(warn.message, UserWarning) def test_finalized(): assert warnings.showwarning == oldwarn """) res = reprec.countoutcomes() assert tuple(res) == (2, 0, 0), res # # ============ test py.test.deprecated_call() ============== # def dep(i): if i == 0: py.std.warnings.warn("is deprecated", DeprecationWarning) return 42 reg = {} def dep_explicit(i): if i == 0: py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning, filename="hello", lineno=3) def test_deprecated_call_raises(): excinfo = pytest.raises(AssertionError, "py.test.deprecated_call(dep, 3)") assert str(excinfo).find("did not produce") != -1 def test_deprecated_call(): py.test.deprecated_call(dep, 0) def test_deprecated_call_ret(): ret = py.test.deprecated_call(dep, 0) assert ret == 42 def test_deprecated_call_preserves(): r = py.std.warnings.onceregistry.copy() f = py.std.warnings.filters[:] test_deprecated_call_raises() test_deprecated_call() assert r == py.std.warnings.onceregistry assert f == py.std.warnings.filters def test_deprecated_explicit_call_raises(): pytest.raises(AssertionError, "py.test.deprecated_call(dep_explicit, 3)") def test_deprecated_explicit_call(): py.test.deprecated_call(dep_explicit, 0) py.test.deprecated_call(dep_explicit, 0) pytest-2.5.1/testing/test_runner_xunit.py0000664000175000017500000001573512254002202020265 0ustar hpkhpk00000000000000# # test correct setup/teardowns at # module, class, and instance level def test_module_and_function_setup(testdir): reprec = testdir.inline_runsource(""" modlevel = [] def setup_module(module): assert not modlevel module.modlevel.append(42) def teardown_module(module): modlevel.pop() def setup_function(function): function.answer = 17 def teardown_function(function): del function.answer def test_modlevel(): assert modlevel[0] == 42 assert test_modlevel.answer == 17 class TestFromClass: def test_module(self): assert modlevel[0] == 42 assert not hasattr(test_modlevel, 'answer') """) rep = reprec.matchreport("test_modlevel") assert rep.passed rep = reprec.matchreport("test_module") assert rep.passed def test_module_setup_failure_no_teardown(testdir): reprec = testdir.inline_runsource(""" l = [] def setup_module(module): l.append(1) 0/0 def test_nothing(): pass def teardown_module(module): l.append(2) """) reprec.assertoutcome(failed=1) calls = reprec.getcalls("pytest_runtest_setup") assert calls[0].item.module.l == [1] def test_setup_function_failure_no_teardown(testdir): reprec = testdir.inline_runsource(""" modlevel = [] def setup_function(function): modlevel.append(1) 0/0 def teardown_function(module): modlevel.append(2) def test_func(): pass """) calls = reprec.getcalls("pytest_runtest_setup") assert calls[0].item.module.modlevel == [1] def test_class_setup(testdir): reprec = testdir.inline_runsource(""" class TestSimpleClassSetup: clslevel = [] def setup_class(cls): cls.clslevel.append(23) def teardown_class(cls): cls.clslevel.pop() def test_classlevel(self): assert self.clslevel[0] == 23 class TestInheritedClassSetupStillWorks(TestSimpleClassSetup): def test_classlevel_anothertime(self): assert self.clslevel == [23] def test_cleanup(): assert not TestSimpleClassSetup.clslevel assert not TestInheritedClassSetupStillWorks.clslevel """) reprec.assertoutcome(passed=1+2+1) def test_class_setup_failure_no_teardown(testdir): reprec = testdir.inline_runsource(""" class TestSimpleClassSetup: clslevel = [] def setup_class(cls): 0/0 def teardown_class(cls): cls.clslevel.append(1) def test_classlevel(self): pass def test_cleanup(): assert not TestSimpleClassSetup.clslevel """) reprec.assertoutcome(failed=1, passed=1) def test_method_setup(testdir): reprec = testdir.inline_runsource(""" class TestSetupMethod: def setup_method(self, meth): self.methsetup = meth def teardown_method(self, meth): del self.methsetup def test_some(self): assert self.methsetup == self.test_some def test_other(self): assert self.methsetup == self.test_other """) reprec.assertoutcome(passed=2) def test_method_setup_failure_no_teardown(testdir): reprec = testdir.inline_runsource(""" class TestMethodSetup: clslevel = [] def setup_method(self, method): self.clslevel.append(1) 0/0 def teardown_method(self, method): self.clslevel.append(2) def test_method(self): pass def test_cleanup(): assert TestMethodSetup.clslevel == [1] """) reprec.assertoutcome(failed=1, passed=1) def test_method_generator_setup(testdir): reprec = testdir.inline_runsource(""" class TestSetupTeardownOnInstance: def setup_class(cls): cls.classsetup = True def setup_method(self, method): self.methsetup = method def test_generate(self): assert self.classsetup assert self.methsetup == self.test_generate yield self.generated, 5 yield self.generated, 2 def generated(self, value): assert self.classsetup assert self.methsetup == self.test_generate assert value == 5 """) reprec.assertoutcome(passed=1, failed=1) def test_func_generator_setup(testdir): reprec = testdir.inline_runsource(""" import sys def setup_module(mod): print ("setup_module") mod.x = [] def setup_function(fun): print ("setup_function") x.append(1) def teardown_function(fun): print ("teardown_function") x.pop() def test_one(): assert x == [1] def check(): print ("check") sys.stderr.write("e\\n") assert x == [1] yield check assert x == [1] """) rep = reprec.matchreport("test_one", names="pytest_runtest_logreport") assert rep.passed def test_method_setup_uses_fresh_instances(testdir): reprec = testdir.inline_runsource(""" class TestSelfState1: memory = [] def test_hello(self): self.memory.append(self) def test_afterhello(self): assert self != self.memory[0] """) reprec.assertoutcome(passed=2, failed=0) def test_setup_that_skips_calledagain(testdir): p = testdir.makepyfile(""" import pytest def setup_module(mod): pytest.skip("x") def test_function1(): pass def test_function2(): pass """) reprec = testdir.inline_run(p) reprec.assertoutcome(skipped=2) def test_setup_fails_again_on_all_tests(testdir): p = testdir.makepyfile(""" import pytest def setup_module(mod): raise ValueError(42) def test_function1(): pass def test_function2(): pass """) reprec = testdir.inline_run(p) reprec.assertoutcome(failed=2) def test_setup_funcarg_setup_when_outer_scope_fails(testdir): p = testdir.makepyfile(""" import pytest def setup_module(mod): raise ValueError(42) def pytest_funcarg__hello(request): raise ValueError("xyz43") def test_function1(hello): pass def test_function2(hello): pass """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*function1*", "*ValueError*42*", "*function2*", "*ValueError*42*", "*2 error*" ]) assert "xyz43" not in result.stdout.str() pytest-2.5.1/testing/test_assertinterpret.py0000664000175000017500000002074712254002202020762 0ustar hpkhpk00000000000000"PYTEST_DONT_REWRITE" import pytest, py from _pytest.assertion import util def exvalue(): return py.std.sys.exc_info()[1] def f(): return 2 def test_not_being_rewritten(): assert "@py_builtins" not in globals() def test_assert(): try: assert f() == 3 except AssertionError: e = exvalue() s = str(e) assert s.startswith('assert 2 == 3\n') def test_assert_with_explicit_message(): try: assert f() == 3, "hello" except AssertionError: e = exvalue() assert e.msg == 'hello' def test_assert_within_finally(): excinfo = py.test.raises(ZeroDivisionError, """ try: 1/0 finally: i = 42 """) s = excinfo.exconly() assert py.std.re.search("division.+by zero", s) is not None #def g(): # A.f() #excinfo = getexcinfo(TypeError, g) #msg = getmsg(excinfo) #assert msg.find("must be called with A") != -1 def test_assert_multiline_1(): try: assert (f() == 3) except AssertionError: e = exvalue() s = str(e) assert s.startswith('assert 2 == 3\n') def test_assert_multiline_2(): try: assert (f() == (4, 3)[-1]) except AssertionError: e = exvalue() s = str(e) assert s.startswith('assert 2 ==') def test_in(): try: assert "hi" in [1, 2] except AssertionError: e = exvalue() s = str(e) assert s.startswith("assert 'hi' in") def test_is(): try: assert 1 is 2 except AssertionError: e = exvalue() s = str(e) assert s.startswith("assert 1 is 2") @py.test.mark.skipif("sys.version_info < (2,6)") def test_attrib(): class Foo(object): b = 1 i = Foo() try: assert i.b == 2 except AssertionError: e = exvalue() s = str(e) assert s.startswith("assert 1 == 2") @py.test.mark.skipif("sys.version_info < (2,6)") def test_attrib_inst(): class Foo(object): b = 1 try: assert Foo().b == 2 except AssertionError: e = exvalue() s = str(e) assert s.startswith("assert 1 == 2") def test_len(): l = list(range(42)) try: assert len(l) == 100 except AssertionError: e = exvalue() s = str(e) assert s.startswith("assert 42 == 100") assert "where 42 = len([" in s def test_assert_non_string_message(): class A: def __str__(self): return "hello" try: assert 0 == 1, A() except AssertionError: e = exvalue() assert e.msg == "hello" def test_assert_keyword_arg(): def f(x=3): return False try: assert f(x=5) except AssertionError: e = exvalue() assert "x=5" in e.msg # These tests should both fail, but should fail nicely... class WeirdRepr: def __repr__(self): return '' def bug_test_assert_repr(): v = WeirdRepr() try: assert v == 1 except AssertionError: e = exvalue() assert e.msg.find('WeirdRepr') != -1 assert e.msg.find('second line') != -1 assert 0 def test_assert_non_string(): try: assert 0, ['list'] except AssertionError: e = exvalue() assert e.msg.find("list") != -1 def test_assert_implicit_multiline(): try: x = [1,2,3] assert x != [1, 2, 3] except AssertionError: e = exvalue() assert e.msg.find('assert [1, 2, 3] !=') != -1 def test_assert_with_brokenrepr_arg(): class BrokenRepr: def __repr__(self): 0 / 0 e = AssertionError(BrokenRepr()) if e.msg.find("broken __repr__") == -1: py.test.fail("broken __repr__ not handle correctly") def test_multiple_statements_per_line(): try: a = 1; assert a == 2 except AssertionError: e = exvalue() assert "assert 1 == 2" in e.msg def test_power(): try: assert 2**3 == 7 except AssertionError: e = exvalue() assert "assert (2 ** 3) == 7" in e.msg class TestView: def setup_class(cls): cls.View = pytest.importorskip("_pytest.assertion.oldinterpret").View def test_class_dispatch(self): ### Use a custom class hierarchy with existing instances class Picklable(self.View): pass class Simple(Picklable): __view__ = object def pickle(self): return repr(self.__obj__) class Seq(Picklable): __view__ = list, tuple, dict def pickle(self): return ';'.join( [Picklable(item).pickle() for item in self.__obj__]) class Dict(Seq): __view__ = dict def pickle(self): return Seq.pickle(self) + '!' + Seq(self.values()).pickle() assert Picklable(123).pickle() == '123' assert Picklable([1,[2,3],4]).pickle() == '1;2;3;4' assert Picklable({1:2}).pickle() == '1!2' def test_viewtype_class_hierarchy(self): # Use a custom class hierarchy based on attributes of existing instances class Operation: "Existing class that I don't want to change." def __init__(self, opname, *args): self.opname = opname self.args = args existing = [Operation('+', 4, 5), Operation('getitem', '', 'join'), Operation('setattr', 'x', 'y', 3), Operation('-', 12, 1)] class PyOp(self.View): def __viewkey__(self): return self.opname def generate(self): return '%s(%s)' % (self.opname, ', '.join(map(repr, self.args))) class PyBinaryOp(PyOp): __view__ = ('+', '-', '*', '/') def generate(self): return '%s %s %s' % (self.args[0], self.opname, self.args[1]) codelines = [PyOp(op).generate() for op in existing] assert codelines == ["4 + 5", "getitem('', 'join')", "setattr('x', 'y', 3)", "12 - 1"] @py.test.mark.skipif("sys.version_info < (2,6)") def test_assert_customizable_reprcompare(monkeypatch): monkeypatch.setattr(util, '_reprcompare', lambda *args: 'hello') try: assert 3 == 4 except AssertionError: e = exvalue() s = str(e) assert "hello" in s def test_assert_long_source_1(): try: assert len == [ (None, ['somet text', 'more text']), ] except AssertionError: e = exvalue() s = str(e) assert 're-run' not in s assert 'somet text' in s def test_assert_long_source_2(): try: assert(len == [ (None, ['somet text', 'more text']), ]) except AssertionError: e = exvalue() s = str(e) assert 're-run' not in s assert 'somet text' in s def test_assert_raise_alias(testdir): testdir.makepyfile(""" "PYTEST_DONT_REWRITE" import sys EX = AssertionError def test_hello(): raise EX("hello" "multi" "line") """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*def test_hello*", "*raise EX*", "*1 failed*", ]) @pytest.mark.skipif("sys.version_info < (2,5)") def test_assert_raise_subclass(): class SomeEx(AssertionError): def __init__(self, *args): super(SomeEx, self).__init__() try: raise SomeEx("hello") except AssertionError: s = str(exvalue()) assert 're-run' not in s assert 'could not determine' in s def test_assert_raises_in_nonzero_of_object_pytest_issue10(): class A(object): def __nonzero__(self): raise ValueError(42) def __lt__(self, other): return A() def __repr__(self): return "" def myany(x): return True try: assert not(myany(A() < 0)) except AssertionError: e = exvalue() s = str(e) assert " < 0" in s @py.test.mark.skipif("sys.version_info >= (2,6)") def test_oldinterpret_importation(): # we had a cyclic import there # requires pytest on sys.path res = py.std.subprocess.call([ py.std.sys.executable, '-c', str(py.code.Source(""" try: from _pytest.assertion.newinterpret import interpret except ImportError: from _pytest.assertion.oldinterpret import interpret """)) ]) assert res == 0 pytest-2.5.1/testing/test_assertion.py0000664000175000017500000002701012254002202017521 0ustar hpkhpk00000000000000# -*- coding: utf-8 -*- import sys import py, pytest import _pytest.assertion as plugin from _pytest.assertion import reinterpret needsnewassert = pytest.mark.skipif("sys.version_info < (2,6)") @pytest.fixture def mock_config(): class Config(object): verbose = False def getoption(self, name): if name == 'verbose': return self.verbose raise KeyError('Not mocked out: %s' % name) return Config() def interpret(expr): return reinterpret.reinterpret(expr, py.code.Frame(sys._getframe(1))) class TestBinReprIntegration: pytestmark = needsnewassert def test_pytest_assertrepr_compare_called(self, testdir): testdir.makeconftest(""" l = [] def pytest_assertrepr_compare(op, left, right): l.append((op, left, right)) def pytest_funcarg__l(request): return l """) testdir.makepyfile(""" def test_hello(): assert 0 == 1 def test_check(l): assert l == [("==", 0, 1)] """) result = testdir.runpytest("-v") result.stdout.fnmatch_lines([ "*test_hello*FAIL*", "*test_check*PASS*", ]) def callequal(left, right, verbose=False): config = mock_config() config.verbose = verbose return plugin.pytest_assertrepr_compare(config, '==', left, right) class TestAssert_reprcompare: def test_different_types(self): assert callequal([0, 1], 'foo') is None def test_summary(self): summary = callequal([0, 1], [0, 2])[0] assert len(summary) < 65 def test_text_diff(self): diff = callequal('spam', 'eggs')[1:] assert '- spam' in diff assert '+ eggs' in diff def test_text_skipping(self): lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs') assert 'Skipping' in lines[1] for line in lines: assert 'a'*50 not in line def test_text_skipping_verbose(self): lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs', verbose=True) assert '- ' + 'a'*50 + 'spam' in lines assert '+ ' + 'a'*50 + 'eggs' in lines def test_multiline_text_diff(self): left = 'foo\nspam\nbar' right = 'foo\neggs\nbar' diff = callequal(left, right) assert '- spam' in diff assert '+ eggs' in diff def test_list(self): expl = callequal([0, 1], [0, 2]) assert len(expl) > 1 def test_list_different_lenghts(self): expl = callequal([0, 1], [0, 1, 2]) assert len(expl) > 1 expl = callequal([0, 1, 2], [0, 1]) assert len(expl) > 1 def test_dict(self): expl = callequal({'a': 0}, {'a': 1}) assert len(expl) > 1 def test_dict_omitting(self): lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}) assert lines[1].startswith('Omitting 1 identical item') assert 'Common items' not in lines for line in lines[1:]: assert 'b' not in line def test_dict_omitting_verbose(self): lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=True) assert lines[1].startswith('Common items:') assert 'Omitting' not in lines[1] assert lines[2] == "{'b': 1}" def test_set(self): expl = callequal(set([0, 1]), set([0, 2])) assert len(expl) > 1 def test_frozenzet(self): expl = callequal(frozenset([0, 1]), set([0, 2])) assert len(expl) > 1 def test_Sequence(self): col = py.builtin._tryimport( "collections.abc", "collections", "sys") if not hasattr(col, "MutableSequence"): pytest.skip("cannot import MutableSequence") MutableSequence = col.MutableSequence class TestSequence(MutableSequence): # works with a Sequence subclass def __init__(self, iterable): self.elements = list(iterable) def __getitem__(self, item): return self.elements[item] def __len__(self): return len(self.elements) def __setitem__(self, item, value): pass def __delitem__(self, item): pass def insert(self, item, index): pass expl = callequal(TestSequence([0, 1]), list([0, 2])) assert len(expl) > 1 def test_list_tuples(self): expl = callequal([], [(1,2)]) assert len(expl) > 1 expl = callequal([(1,2)], []) assert len(expl) > 1 def test_list_bad_repr(self): class A: def __repr__(self): raise ValueError(42) expl = callequal([], [A()]) assert 'ValueError' in "".join(expl) expl = callequal({}, {'1': A()}) assert 'faulty' in "".join(expl) def test_one_repr_empty(self): """ the faulty empty string repr did trigger a unbound local error in _diff_text """ class A(str): def __repr__(self): return '' expl = callequal(A(), '') assert not expl def test_repr_no_exc(self): expl = ' '.join(callequal('foo', 'bar')) assert 'raised in repr()' not in expl def test_unicode(self): left = py.builtin._totext('£€', 'utf-8') right = py.builtin._totext('£', 'utf-8') expl = callequal(left, right) assert expl[0] == py.builtin._totext("'£€' == '£'", 'utf-8') assert expl[1] == py.builtin._totext('- £€', 'utf-8') assert expl[2] == py.builtin._totext('+ £', 'utf-8') def test_python25_compile_issue257(testdir): testdir.makepyfile(""" def test_rewritten(): assert 1 == 2 # some comment """) result = testdir.runpytest() assert result.ret == 1 result.stdout.fnmatch_lines(""" *E*assert 1 == 2* *1 failed* """) @needsnewassert def test_rewritten(testdir): testdir.makepyfile(""" def test_rewritten(): assert "@py_builtins" in globals() """) assert testdir.runpytest().ret == 0 def test_reprcompare_notin(mock_config): detail = plugin.pytest_assertrepr_compare( mock_config, 'not in', 'foo', 'aaafoobbb')[1:] assert detail == ["'foo' is contained here:", ' aaafoobbb', '? +++'] @needsnewassert def test_pytest_assertrepr_compare_integration(testdir): testdir.makepyfile(""" def test_hello(): x = set(range(100)) y = x.copy() y.remove(50) assert x == y """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*def test_hello():*", "*assert x == y*", "*E*Extra items*left*", "*E*50*", ]) @needsnewassert def test_sequence_comparison_uses_repr(testdir): testdir.makepyfile(""" def test_hello(): x = set("hello x") y = set("hello y") assert x == y """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*def test_hello():*", "*assert x == y*", "*E*Extra items*left*", "*E*'x'*", "*E*Extra items*right*", "*E*'y'*", ]) @pytest.mark.xfail("sys.version_info < (2,6)") def test_assert_compare_truncate_longmessage(testdir): testdir.makepyfile(r""" def test_long(): a = list(range(200)) b = a[::2] a = '\n'.join(map(str, a)) b = '\n'.join(map(str, b)) assert a == b """) result = testdir.runpytest() result.stdout.fnmatch_lines([ "*truncated*use*-vv*", ]) result = testdir.runpytest('-vv') result.stdout.fnmatch_lines([ "*- 197", ]) @needsnewassert def test_assertrepr_loaded_per_dir(testdir): testdir.makepyfile(test_base=['def test_base(): assert 1 == 2']) a = testdir.mkdir('a') a_test = a.join('test_a.py') a_test.write('def test_a(): assert 1 == 2') a_conftest = a.join('conftest.py') a_conftest.write('def pytest_assertrepr_compare(): return ["summary a"]') b = testdir.mkdir('b') b_test = b.join('test_b.py') b_test.write('def test_b(): assert 1 == 2') b_conftest = b.join('conftest.py') b_conftest.write('def pytest_assertrepr_compare(): return ["summary b"]') result = testdir.runpytest() result.stdout.fnmatch_lines([ '*def test_base():*', '*E*assert 1 == 2*', '*def test_a():*', '*E*assert summary a*', '*def test_b():*', '*E*assert summary b*']) def test_assertion_options(testdir): testdir.makepyfile(""" def test_hello(): x = 3 assert x == 4 """) result = testdir.runpytest() assert "3 == 4" in result.stdout.str() off_options = (("--no-assert",), ("--nomagic",), ("--no-assert", "--nomagic"), ("--assert=plain",), ("--assert=plain", "--no-assert"), ("--assert=plain", "--nomagic"), ("--assert=plain", "--no-assert", "--nomagic")) for opt in off_options: result = testdir.runpytest(*opt) assert "3 == 4" not in result.stdout.str() def test_old_assert_mode(testdir): testdir.makepyfile(""" def test_in_old_mode(): assert "@py_builtins" not in globals() """) result = testdir.runpytest("--assert=reinterp") assert result.ret == 0 def test_triple_quoted_string_issue113(testdir): testdir.makepyfile(""" def test_hello(): assert "" == ''' '''""") result = testdir.runpytest("--fulltrace") result.stdout.fnmatch_lines([ "*1 failed*", ]) assert 'SyntaxError' not in result.stdout.str() def test_traceback_failure(testdir): p1 = testdir.makepyfile(""" def g(): return 2 def f(x): assert x == g() def test_onefails(): f(3) """) result = testdir.runpytest(p1) result.stdout.fnmatch_lines([ "*test_traceback_failure.py F", "====* FAILURES *====", "____*____", "", " def test_onefails():", "> f(3)", "", "*test_*.py:6: ", "_ _ _ *", #"", " def f(x):", "> assert x == g()", "E assert 3 == 2", "E + where 2 = g()", "", "*test_traceback_failure.py:4: AssertionError" ]) @pytest.mark.skipif("sys.version_info < (2,5) or '__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')" ) def test_warn_missing(testdir): testdir.makepyfile("") result = testdir.run(sys.executable, "-OO", "-m", "pytest", "-h") result.stderr.fnmatch_lines([ "*WARNING*assert statements are not executed*", ]) result = testdir.run(sys.executable, "-OO", "-m", "pytest", "--no-assert") result.stderr.fnmatch_lines([ "*WARNING*assert statements are not executed*", ]) def test_recursion_source_decode(testdir): testdir.makepyfile(""" def test_something(): pass """) testdir.makeini(""" [pytest] python_files = *.py """) result = testdir.runpytest("--collect-only") result.stdout.fnmatch_lines(""" """) def test_AssertionError_message(testdir): testdir.makepyfile(""" def test_hello(): x,y = 1,2 assert 0, (x,y) """) result = testdir.runpytest() result.stdout.fnmatch_lines(""" *def test_hello* *assert 0, (x,y)* *AssertionError: (1, 2)* """) pytest-2.5.1/testing/test_conftest.py0000664000175000017500000002202412254002202017337 0ustar hpkhpk00000000000000import py, pytest from _pytest.config import Conftest def pytest_generate_tests(metafunc): if "basedir" in metafunc.fixturenames: metafunc.addcall(param="global") metafunc.addcall(param="inpackage") def pytest_funcarg__basedir(request): def basedirmaker(request): d = request.getfuncargvalue("tmpdir") d.ensure("adir/conftest.py").write("a=1 ; Directory = 3") d.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5") if request.param == "inpackage": d.ensure("adir/__init__.py") d.ensure("adir/b/__init__.py") return d return request.cached_setup( lambda: basedirmaker(request), extrakey=request.param) def ConftestWithSetinitial(path): conftest = Conftest() conftest.setinitial([path]) return conftest class TestConftestValueAccessGlobal: def test_basic_init(self, basedir): conftest = Conftest() conftest.setinitial([basedir.join("adir")]) assert conftest.rget("a") == 1 def test_onimport(self, basedir): l = [] conftest = Conftest(onimport=l.append) conftest.setinitial([basedir.join("adir"), '--confcutdir=%s' % basedir]) assert len(l) == 1 assert conftest.rget("a") == 1 assert conftest.rget("b", basedir.join("adir", "b")) == 2 assert len(l) == 2 def test_immediate_initialiation_and_incremental_are_the_same(self, basedir): conftest = Conftest() len(conftest._path2confmods) conftest.getconftestmodules(basedir) snap1 = len(conftest._path2confmods) #assert len(conftest._path2confmods) == snap1 + 1 conftest.getconftestmodules(basedir.join('adir')) assert len(conftest._path2confmods) == snap1 + 1 conftest.getconftestmodules(basedir.join('b')) assert len(conftest._path2confmods) == snap1 + 2 def test_default_has_lower_prio(self, basedir): conftest = ConftestWithSetinitial(basedir.join("adir")) assert conftest.rget('Directory') == 3 #assert conftest.lget('Directory') == pytest.Directory def test_value_access_not_existing(self, basedir): conftest = ConftestWithSetinitial(basedir) pytest.raises(KeyError, lambda: conftest.rget('a')) #pytest.raises(KeyError, "conftest.lget('a')") def test_value_access_by_path(self, basedir): conftest = ConftestWithSetinitial(basedir) assert conftest.rget("a", basedir.join('adir')) == 1 #assert conftest.lget("a", basedir.join('adir')) == 1 assert conftest.rget("a", basedir.join('adir', 'b')) == 1.5 #assert conftest.lget("a", basedir.join('adir', 'b')) == 1 #assert conftest.lget("b", basedir.join('adir', 'b')) == 2 #assert pytest.raises(KeyError, # 'conftest.lget("b", basedir.join("a"))' #) def test_value_access_with_init_one_conftest(self, basedir): conftest = ConftestWithSetinitial(basedir.join('adir')) assert conftest.rget("a") == 1 #assert conftest.lget("a") == 1 def test_value_access_with_init_two_conftests(self, basedir): conftest = ConftestWithSetinitial(basedir.join("adir", "b")) conftest.rget("a") == 1.5 #conftest.lget("a") == 1 #conftest.lget("b") == 1 def test_value_access_with_confmod(self, basedir): startdir = basedir.join("adir", "b") startdir.ensure("xx", dir=True) conftest = ConftestWithSetinitial(startdir) mod, value = conftest.rget_with_confmod("a", startdir) assert value == 1.5 path = py.path.local(mod.__file__) assert path.dirpath() == basedir.join("adir", "b") assert path.purebasename.startswith("conftest") def test_conftest_in_nonpkg_with_init(tmpdir): tmpdir.ensure("adir-1.0/conftest.py").write("a=1 ; Directory = 3") tmpdir.ensure("adir-1.0/b/conftest.py").write("b=2 ; a = 1.5") tmpdir.ensure("adir-1.0/b/__init__.py") tmpdir.ensure("adir-1.0/__init__.py") ConftestWithSetinitial(tmpdir.join("adir-1.0", "b")) def test_doubledash_not_considered(testdir): conf = testdir.mkdir("--option") conf.join("conftest.py").ensure() conftest = Conftest() conftest.setinitial([conf.basename, conf.basename]) l = conftest.getconftestmodules(None) assert len(l) == 0 def test_issue151_load_all_conftests(testdir): names = "code proj src".split() for name in names: p = testdir.mkdir(name) p.ensure("conftest.py") conftest = Conftest() conftest.setinitial(names) d = list(conftest._conftestpath2mod.values()) assert len(d) == len(names) def test_conftest_global_import(testdir): testdir.makeconftest("x=3") p = testdir.makepyfile(""" import py, pytest from _pytest.config import Conftest conf = Conftest() mod = conf.importconftest(py.path.local("conftest.py")) assert mod.x == 3 import conftest assert conftest is mod, (conftest, mod) subconf = py.path.local().ensure("sub", "conftest.py") subconf.write("y=4") mod2 = conf.importconftest(subconf) assert mod != mod2 assert mod2.y == 4 import conftest assert conftest is mod2, (conftest, mod) """) res = testdir.runpython(p) assert res.ret == 0 def test_conftestcutdir(testdir): conf = testdir.makeconftest("") p = testdir.mkdir("x") conftest = Conftest(confcutdir=p) conftest.setinitial([testdir.tmpdir]) l = conftest.getconftestmodules(p) assert len(l) == 0 l = conftest.getconftestmodules(conf.dirpath()) assert len(l) == 0 assert conf not in conftest._conftestpath2mod # but we can still import a conftest directly conftest.importconftest(conf) l = conftest.getconftestmodules(conf.dirpath()) assert l[0].__file__.startswith(str(conf)) # and all sub paths get updated properly l = conftest.getconftestmodules(p) assert len(l) == 1 assert l[0].__file__.startswith(str(conf)) def test_conftestcutdir_inplace_considered(testdir): conf = testdir.makeconftest("") conftest = Conftest(confcutdir=conf.dirpath()) conftest.setinitial([conf.dirpath()]) l = conftest.getconftestmodules(conf.dirpath()) assert len(l) == 1 assert l[0].__file__.startswith(str(conf)) def test_setinitial_confcut(testdir): conf = testdir.makeconftest("") sub = testdir.mkdir("sub") sub.chdir() for opts in (["--confcutdir=%s" % sub, sub], [sub, "--confcutdir=%s" % sub], ["--confcutdir=.", sub], [sub, "--confcutdir", sub], [str(sub), "--confcutdir", "."], ): conftest = Conftest() conftest.setinitial(opts) assert conftest._confcutdir == sub assert conftest.getconftestmodules(sub) == [] assert conftest.getconftestmodules(conf.dirpath()) == [] @pytest.mark.parametrize("name", 'test tests whatever .dotdir'.split()) def test_setinitial_conftest_subdirs(testdir, name): sub = testdir.mkdir(name) subconftest = sub.ensure("conftest.py") conftest = Conftest() conftest.setinitial([sub.dirpath(), '--confcutdir=%s' % testdir.tmpdir]) if name not in ('whatever', '.dotdir'): assert subconftest in conftest._conftestpath2mod assert len(conftest._conftestpath2mod) == 1 else: assert subconftest not in conftest._conftestpath2mod assert len(conftest._conftestpath2mod) == 0 def test_conftest_confcutdir(testdir): testdir.makeconftest("assert 0") x = testdir.mkdir("x") x.join("conftest.py").write(py.code.Source(""" def pytest_addoption(parser): parser.addoption("--xyz", action="store_true") """)) result = testdir.runpytest("-h", "--confcutdir=%s" % x, x) result.stdout.fnmatch_lines(["*--xyz*"]) def test_conftest_import_order(testdir, monkeypatch): ct1 = testdir.makeconftest("") sub = testdir.mkdir("sub") ct2 = sub.join("conftest.py") ct2.write("") def impct(p): return p conftest = Conftest() monkeypatch.setattr(conftest, 'importconftest', impct) assert conftest.getconftestmodules(sub) == [ct1, ct2] def test_fixture_dependency(testdir, monkeypatch): ct1 = testdir.makeconftest("") ct1 = testdir.makepyfile("__init__.py") ct1.write("") sub = testdir.mkdir("sub") sub.join("__init__.py").write("") sub.join("conftest.py").write(py.std.textwrap.dedent(""" import pytest @pytest.fixture def not_needed(): assert False, "Should not be called!" @pytest.fixture def foo(): assert False, "Should not be called!" @pytest.fixture def bar(foo): return 'bar' """)) subsub = sub.mkdir("subsub") subsub.join("__init__.py").write("") subsub.join("test_bar.py").write(py.std.textwrap.dedent(""" import pytest @pytest.fixture def bar(): return 'sub bar' def test_event_fixture(bar): assert bar == 'sub bar' """)) result = testdir.runpytest("sub") result.stdout.fnmatch_lines(["*1 passed*"]) pytest-2.5.1/pytest.egg-info/0000775000175000017500000000000012254002202015446 5ustar hpkhpk00000000000000pytest-2.5.1/pytest.egg-info/entry_points.txt0000664000175000017500000000010312254002202020736 0ustar hpkhpk00000000000000[console_scripts] py.test-2.7 = pytest:main py.test = pytest:main pytest-2.5.1/pytest.egg-info/top_level.txt0000664000175000017500000000001712254002202020176 0ustar hpkhpk00000000000000_pytest pytest pytest-2.5.1/pytest.egg-info/requires.txt0000664000175000017500000000001212254002202020037 0ustar hpkhpk00000000000000py>=1.4.19pytest-2.5.1/pytest.egg-info/PKG-INFO0000664000175000017500000000647412254002202016556 0ustar hpkhpk00000000000000Metadata-Version: 1.1 Name: pytest Version: 2.5.1 Summary: py.test: simple powerful testing with Python Home-page: http://pytest.org Author: Holger Krekel, Benjamin Peterson, Ronny Pfannschmidt, Floris Bruynooghe and others Author-email: holger at merlinux.eu License: MIT license Description: Documentation: http://pytest.org/latest/ Changelog: http://pytest.org/latest/changelog.html Issues: https://bitbucket.org/hpk42/pytest/issues?status=open The ``py.test`` testing tool makes it easy to write small tests, yet scales to support complex functional testing. It provides - `auto-discovery `_ of test modules and functions, - detailed info on failing `assert statements `_ (no need to remember ``self.assert*`` names) - `modular fixtures `_ for managing small or parametrized long-lived test resources. - multi-paradigm support: you can use ``py.test`` to run test suites based on `unittest `_ (or trial), `nose `_ - single-source compatibility to Python2.4 all the way up to Python3.3, PyPy-1.9 and Jython-2.5.1. - many `external plugins `_. .. image:: https://secure.travis-ci.org/hpk42/pytest.png :target: http://travis-ci.org/hpk42/pytest A simple example for a test:: # content of test_module.py def test_function(): i = 4 assert i == 3 which can be run with ``py.test test_module.py``. See `getting-started `_ for more examples. For much more info, including PDF docs, see http://pytest.org and report bugs at: http://bitbucket.org/hpk42/pytest/issues/ and checkout repos at: http://github.com/hpk42/pytest/ (mirror) http://bitbucket.org/hpk42/pytest/ Copyright Holger Krekel and others, 2004-2013 Licensed under the MIT license. Platform: unix Platform: linux Platform: osx Platform: cygwin Platform: win32 Classifier: Development Status :: 6 - Mature Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: POSIX Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: MacOS :: MacOS X Classifier: Topic :: Software Development :: Testing Classifier: Topic :: Software Development :: Libraries Classifier: Topic :: Utilities Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3.0 Classifier: Programming Language :: Python :: 3.1 Classifier: Programming Language :: Python :: 3.2 Classifier: Programming Language :: Python :: 3.3 pytest-2.5.1/pytest.egg-info/SOURCES.txt0000664000175000017500000002065612254002202017343 0ustar hpkhpk00000000000000CHANGELOG LICENSE MANIFEST.in README.rst pytest.py setup.cfg setup.py tox.ini _pytest/__init__.py _pytest/_argcomplete.py _pytest/capture.py _pytest/config.py _pytest/core.py _pytest/doctest.py _pytest/genscript.py _pytest/helpconfig.py _pytest/hookspec.py _pytest/junitxml.py _pytest/main.py _pytest/mark.py _pytest/monkeypatch.py _pytest/nose.py _pytest/pastebin.py _pytest/pdb.py _pytest/pytester.py _pytest/python.py _pytest/recwarn.py _pytest/resultlog.py _pytest/runner.py _pytest/skipping.py _pytest/standalonetemplate.py _pytest/terminal.py _pytest/tmpdir.py _pytest/unittest.py _pytest/assertion/__init__.py _pytest/assertion/newinterpret.py _pytest/assertion/oldinterpret.py _pytest/assertion/reinterpret.py _pytest/assertion/rewrite.py _pytest/assertion/util.py doc/en/Makefile doc/en/apiref.txt doc/en/assert.txt doc/en/attic_fixtures.txt doc/en/bash-completion.txt doc/en/builtin.txt doc/en/capture.txt doc/en/changelog.txt doc/en/check_sphinx.py doc/en/conf.py doc/en/conftest.py doc/en/contact.txt doc/en/contents.txt doc/en/customize.txt doc/en/develop.txt doc/en/doctest.txt doc/en/faq.txt doc/en/feedback.rst doc/en/fixture.txt doc/en/funcarg_compare.txt doc/en/funcargs.txt doc/en/genapi.py doc/en/getting-started.txt doc/en/goodpractises.txt doc/en/index.txt doc/en/links.inc doc/en/mark.txt doc/en/monkeypatch.txt doc/en/naming20.txt doc/en/nose.txt doc/en/overview.txt doc/en/parametrize.txt doc/en/plugins.txt doc/en/projects.txt doc/en/pytest.ini doc/en/recwarn.txt doc/en/setup.txt doc/en/skipping.txt doc/en/talks.txt doc/en/tmpdir.txt doc/en/unittest.txt doc/en/usage.txt doc/en/xdist.txt doc/en/xunit_setup.txt doc/en/yieldfixture.txt doc/en/_templates/globaltoc.html doc/en/_templates/layout.html doc/en/_templates/links.html doc/en/_templates/sidebarintro.html doc/en/_themes/.gitignore doc/en/_themes/LICENSE doc/en/_themes/README doc/en/_themes/flask_theme_support.py doc/en/_themes/flask/layout.html doc/en/_themes/flask/relations.html doc/en/_themes/flask/theme.conf doc/en/_themes/flask/static/flasky.css_t doc/en/announce/index.txt doc/en/announce/release-2.0.0.txt doc/en/announce/release-2.0.1.txt doc/en/announce/release-2.0.2.txt doc/en/announce/release-2.0.3.txt doc/en/announce/release-2.1.0.txt doc/en/announce/release-2.1.1.txt doc/en/announce/release-2.1.2.txt doc/en/announce/release-2.1.3.txt doc/en/announce/release-2.2.0.txt doc/en/announce/release-2.2.1.txt doc/en/announce/release-2.2.2.txt doc/en/announce/release-2.2.4.txt doc/en/announce/release-2.3.0.txt doc/en/announce/release-2.3.1.txt doc/en/announce/release-2.3.2.txt doc/en/announce/release-2.3.3.txt doc/en/announce/release-2.3.4.txt doc/en/announce/release-2.3.5.txt doc/en/announce/release-2.4.0.txt doc/en/announce/release-2.4.1.txt doc/en/announce/release-2.4.2.txt doc/en/announce/release-2.5.0.txt doc/en/announce/release-2.5.1.txt doc/en/example/attic.txt doc/en/example/conftest.py doc/en/example/index.txt doc/en/example/markers.txt doc/en/example/multipython.py doc/en/example/nonpython.txt doc/en/example/parametrize.txt doc/en/example/pythoncollection.py doc/en/example/pythoncollection.txt doc/en/example/reportingdemo.txt doc/en/example/simple.txt doc/en/example/special.txt doc/en/example/xfail_demo.py doc/en/example/assertion/failure_demo.py doc/en/example/assertion/test_failures.py doc/en/example/assertion/test_setup_flow_example.py doc/en/example/assertion/global_testmodule_config/conftest.py doc/en/example/assertion/global_testmodule_config/test_hello.py doc/en/example/costlysetup/conftest.py doc/en/example/costlysetup/sub1/__init__.py doc/en/example/costlysetup/sub1/test_quick.py doc/en/example/costlysetup/sub2/__init__.py doc/en/example/costlysetup/sub2/test_two.py doc/en/example/layout1/setup.cfg doc/en/example/nonpython/__init__.py doc/en/example/nonpython/conftest.py doc/en/example/nonpython/test_simple.yml doc/en/example/py2py3/conftest.py doc/en/example/py2py3/test_py2.py doc/en/example/py2py3/test_py3.py doc/en/img/cramer2.png doc/en/img/gaynor3.png doc/en/img/keleshev.png doc/en/img/pylib.png doc/en/img/theuni.png doc/en/plugins_index/plugins_index.py doc/en/plugins_index/plugins_index.txt doc/en/plugins_index/test_plugins_index.expected.txt doc/en/plugins_index/test_plugins_index.py doc/en/test/attic.txt doc/en/test/config.html doc/en/test/dist.html doc/en/test/extend.html doc/en/test/index.txt doc/en/test/mission.txt doc/en/test/test.html doc/en/test/plugin/cov.txt doc/en/test/plugin/coverage.txt doc/en/test/plugin/django.txt doc/en/test/plugin/figleaf.txt doc/en/test/plugin/genscript.txt doc/en/test/plugin/helpconfig.txt doc/en/test/plugin/index.txt doc/en/test/plugin/links.txt doc/en/test/plugin/nose.txt doc/en/test/plugin/oejskit.txt doc/en/test/plugin/terminal.txt doc/en/test/plugin/xdist.txt doc/ja/Makefile doc/ja/apiref.txt doc/ja/assert.txt doc/ja/builtin.txt doc/ja/capture.txt doc/ja/changelog.txt doc/ja/check_sphinx.py doc/ja/conf.py doc/ja/conftest.py doc/ja/contact.txt doc/ja/contents.txt doc/ja/customize.txt doc/ja/develop.txt doc/ja/doctest.txt doc/ja/faq.txt doc/ja/feedback.rst doc/ja/funcargs.txt doc/ja/getting-started.txt doc/ja/goodpractises.txt doc/ja/index.txt doc/ja/links.inc doc/ja/mark.txt doc/ja/monkeypatch.txt doc/ja/naming20.txt doc/ja/nose.txt doc/ja/overview.txt doc/ja/plugins.txt doc/ja/projects.txt doc/ja/pytest.ini doc/ja/recwarn.txt doc/ja/skipping.txt doc/ja/talks.txt doc/ja/tmpdir.txt doc/ja/unittest.txt doc/ja/usage.txt doc/ja/xdist.txt doc/ja/xunit_setup.txt doc/ja/_static/sphinxdoc.css doc/ja/announce/index.txt doc/ja/announce/release-2.0.0.txt doc/ja/announce/release-2.0.1.txt doc/ja/announce/release-2.0.2.txt doc/ja/announce/release-2.0.3.txt doc/ja/announce/release-2.1.0.txt doc/ja/announce/release-2.1.1.txt doc/ja/announce/release-2.1.2.txt doc/ja/announce/release-2.1.3.txt doc/ja/announce/release-2.2.0.txt doc/ja/announce/release-2.2.1.txt doc/ja/announce/release-2.2.2.txt doc/ja/announce/release-2.2.4.txt doc/ja/example/attic.txt doc/ja/example/conftest.py doc/ja/example/index.txt doc/ja/example/markers.txt doc/ja/example/multipython.py doc/ja/example/mysetup.txt doc/ja/example/nonpython.txt doc/ja/example/parametrize.txt doc/ja/example/pythoncollection.py doc/ja/example/pythoncollection.txt doc/ja/example/reportingdemo.txt doc/ja/example/simple.txt doc/ja/example/xfail_demo.py doc/ja/example/assertion/failure_demo.py doc/ja/example/assertion/test_failures.py doc/ja/example/assertion/test_setup_flow_example.py doc/ja/example/assertion/global_testmodule_config/conftest.py doc/ja/example/assertion/global_testmodule_config/test_hello.py doc/ja/example/costlysetup/conftest.py doc/ja/example/costlysetup/sub1/__init__.py doc/ja/example/costlysetup/sub1/test_quick.py doc/ja/example/costlysetup/sub2/__init__.py doc/ja/example/costlysetup/sub2/test_two.py doc/ja/example/layout1/setup.cfg doc/ja/example/nonpython/__init__.py doc/ja/example/nonpython/conftest.py doc/ja/example/nonpython/test_simple.yml doc/ja/example/py2py3/conftest.py doc/ja/example/py2py3/test_py2.py doc/ja/example/py2py3/test_py3.py doc/ja/img/pylib.png doc/ja/test/attic.txt doc/ja/test/index.txt doc/ja/test/mission.txt doc/ja/test/plugin/cov.txt doc/ja/test/plugin/coverage.txt doc/ja/test/plugin/django.txt doc/ja/test/plugin/figleaf.txt doc/ja/test/plugin/genscript.txt doc/ja/test/plugin/helpconfig.txt doc/ja/test/plugin/index.txt doc/ja/test/plugin/links.txt doc/ja/test/plugin/nose.txt doc/ja/test/plugin/oejskit.txt doc/ja/test/plugin/terminal.txt doc/ja/test/plugin/xdist.txt pytest.egg-info/PKG-INFO pytest.egg-info/SOURCES.txt pytest.egg-info/dependency_links.txt pytest.egg-info/entry_points.txt pytest.egg-info/not-zip-safe pytest.egg-info/requires.txt pytest.egg-info/top_level.txt testing/acceptance_test.py testing/conftest.py testing/test_argcomplete.py testing/test_assertinterpret.py testing/test_assertion.py testing/test_assertrewrite.py testing/test_capture.py testing/test_collection.py testing/test_config.py testing/test_conftest.py testing/test_core.py testing/test_doctest.py testing/test_genscript.py testing/test_helpconfig.py testing/test_junitxml.py testing/test_mark.py testing/test_monkeypatch.py testing/test_nose.py testing/test_parseopt.py testing/test_pastebin.py testing/test_pdb.py testing/test_pytester.py testing/test_recwarn.py testing/test_resultlog.py testing/test_runner.py testing/test_runner_xunit.py testing/test_session.py testing/test_skipping.py testing/test_terminal.py testing/test_tmpdir.py testing/test_unittest.py testing/python/collect.py testing/python/fixture.py testing/python/integration.py testing/python/metafunc.py testing/python/raises.pypytest-2.5.1/pytest.egg-info/dependency_links.txt0000664000175000017500000000000112254002202021514 0ustar hpkhpk00000000000000 pytest-2.5.1/pytest.egg-info/not-zip-safe0000664000175000017500000000000112254002202017674 0ustar hpkhpk00000000000000 pytest-2.5.1/PKG-INFO0000664000175000017500000000647412254002202013534 0ustar hpkhpk00000000000000Metadata-Version: 1.1 Name: pytest Version: 2.5.1 Summary: py.test: simple powerful testing with Python Home-page: http://pytest.org Author: Holger Krekel, Benjamin Peterson, Ronny Pfannschmidt, Floris Bruynooghe and others Author-email: holger at merlinux.eu License: MIT license Description: Documentation: http://pytest.org/latest/ Changelog: http://pytest.org/latest/changelog.html Issues: https://bitbucket.org/hpk42/pytest/issues?status=open The ``py.test`` testing tool makes it easy to write small tests, yet scales to support complex functional testing. It provides - `auto-discovery `_ of test modules and functions, - detailed info on failing `assert statements `_ (no need to remember ``self.assert*`` names) - `modular fixtures `_ for managing small or parametrized long-lived test resources. - multi-paradigm support: you can use ``py.test`` to run test suites based on `unittest `_ (or trial), `nose `_ - single-source compatibility to Python2.4 all the way up to Python3.3, PyPy-1.9 and Jython-2.5.1. - many `external plugins `_. .. image:: https://secure.travis-ci.org/hpk42/pytest.png :target: http://travis-ci.org/hpk42/pytest A simple example for a test:: # content of test_module.py def test_function(): i = 4 assert i == 3 which can be run with ``py.test test_module.py``. See `getting-started `_ for more examples. For much more info, including PDF docs, see http://pytest.org and report bugs at: http://bitbucket.org/hpk42/pytest/issues/ and checkout repos at: http://github.com/hpk42/pytest/ (mirror) http://bitbucket.org/hpk42/pytest/ Copyright Holger Krekel and others, 2004-2013 Licensed under the MIT license. Platform: unix Platform: linux Platform: osx Platform: cygwin Platform: win32 Classifier: Development Status :: 6 - Mature Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: POSIX Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: MacOS :: MacOS X Classifier: Topic :: Software Development :: Testing Classifier: Topic :: Software Development :: Libraries Classifier: Topic :: Utilities Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3.0 Classifier: Programming Language :: Python :: 3.1 Classifier: Programming Language :: Python :: 3.2 Classifier: Programming Language :: Python :: 3.3 pytest-2.5.1/CHANGELOG0000664000175000017500000020344712254002202013650 0ustar hpkhpk000000000000002.5.1 ----------------------------------- - merge new documentation styling PR from Tobias Bieniek. - fix issue403: allow parametrize of multiple same-name functions within a collection node. Thanks Andreas Kloeckner and Alex Gaynor for reporting and analysis. - Allow parameterized fixtures to specify the ID of the parameters by adding an ids argument to pytest.fixture() and pytest.yield_fixture(). Thanks Floris Bruynooghe. - fix issue404 by always using the binary xml escape in the junitxml plugin. Thanks Ronny Pfannschmidt. - fix issue407: fix addoption docstring to point to argparse instead of optparse. Thanks Daniel D. Wright. 2.5.0 ----------------------------------- - dropped python2.5 from automated release testing of pytest itself which means it's probably going to break soon (but still works with this release we believe). - simplified and fixed implementation for calling finalizers when parametrized fixtures or function arguments are involved. finalization is now performed lazily at setup time instead of in the "teardown phase". While this might sound odd at first, it helps to ensure that we are correctly handling setup/teardown even in complex code. User-level code should not be affected unless it's implementing the pytest_runtest_teardown hook and expecting certain fixture instances are torn down within (very unlikely and would have been unreliable anyway). - PR90: add --color=yes|no|auto option to force terminal coloring mode ("auto" is default). Thanks Marc Abramowitz. - fix issue319 - correctly show unicode in assertion errors. Many thanks to Floris Bruynooghe for the complete PR. Also means we depend on py>=1.4.19 now. - fix issue396 - correctly sort and finalize class-scoped parametrized tests independently from number of methods on the class. - refix issue323 in a better way -- parametrization should now never cause Runtime Recursion errors because the underlying algorithm for re-ordering tests per-scope/per-fixture is not recursive anymore (it was tail-call recursive before which could lead to problems for more than >966 non-function scoped parameters). - fix issue290 - there is preliminary support now for parametrizing with repeated same values (sometimes useful to to test if calling a second time works as with the first time). - close issue240 - document precisely how pytest module importing works, discuss the two common test directory layouts, and how it interacts with PEP420-namespace packages. - fix issue246 fix finalizer order to be LIFO on independent fixtures depending on a parametrized higher-than-function scoped fixture. (was quite some effort so please bear with the complexity of this sentence :) Thanks Ralph Schmitt for the precise failure example. - fix issue244 by implementing special index for parameters to only use indices for paramentrized test ids - fix issue287 by running all finalizers but saving the exception from the first failing finalizer and re-raising it so teardown will still have failed. We reraise the first failing exception because it might be the cause for other finalizers to fail. - fix ordering when mock.patch or other standard decorator-wrappings are used with test methods. This fixues issue346 and should help with random "xdist" collection failures. Thanks to Ronny Pfannschmidt and Donald Stufft for helping to isolate it. - fix issue357 - special case "-k" expressions to allow for filtering with simple strings that are not valid python expressions. Examples: "-k 1.3" matches all tests parametrized with 1.3. "-k None" filters all tests that have "None" in their name and conversely "-k 'not None'". Previously these examples would raise syntax errors. - fix issue384 by removing the trial support code since the unittest compat enhancements allow trial to handle it on its own - don't hide an ImportError when importing a plugin produces one. fixes issue375. - fix issue275 - allow usefixtures and autouse fixtures for running doctest text files. - fix issue380 by making --resultlog only rely on longrepr instead of the "reprcrash" attribute which only exists sometimes. - address issue122: allow @pytest.fixture(params=iterator) by exploding into a list early on. - fix pexpect-3.0 compatibility for pytest's own tests. (fixes issue386) - allow nested parametrize-value markers, thanks James Lan for the PR. - fix unicode handling with new monkeypatch.setattr(import_path, value) API. Thanks Rob Dennis. Fixes issue371. - fix unicode handling with junitxml, fixes issue368. - In assertion rewriting mode on Python 2, fix the detection of coding cookies. See issue #330. - make "--runxfail" turn imperative pytest.xfail calls into no ops (it already did neutralize pytest.mark.xfail markers) - refine pytest / pkg_resources interactions: The AssertionRewritingHook PEP302 compliant loader now registers itself with setuptools/pkg_resources properly so that the pkg_resources.resource_stream method works properly. Fixes issue366. Thanks for the investigations and full PR to Jason R. Coombs. - pytestconfig fixture is now session-scoped as it is the same object during the whole test run. Fixes issue370. - avoid one surprising case of marker malfunction/confusion:: @pytest.mark.some(lambda arg: ...) def test_function(): would not work correctly because pytest assumes @pytest.mark.some gets a function to be decorated already. We now at least detect if this arg is an lambda and thus the example will work. Thanks Alex Gaynor for bringing it up. - xfail a test on pypy that checks wrong encoding/ascii (pypy does not error out). fixes issue385. - internally make varnames() deal with classes's __init__, although it's not needed by pytest itself atm. Also fix caching. Fixes issue376. - fix issue221 - handle importing of namespace-package with no __init__.py properly. - refactor internal FixtureRequest handling to avoid monkeypatching. One of the positive user-facing effects is that the "request" object can now be used in closures. - fixed version comparison in pytest.importskip(modname, minverstring) - fix issue377 by clarifying in the nose-compat docs that pytest does not duplicate the unittest-API into the "plain" namespace. - fix verbose reporting for @mock'd test functions v2.4.2 ----------------------------------- - on Windows require colorama and a newer py lib so that py.io.TerminalWriter() now uses colorama instead of its own ctypes hacks. (fixes issue365) thanks Paul Moore for bringing it up. - fix "-k" matching of tests where "repr" and "attr" and other names would cause wrong matches because of an internal implementation quirk (don't ask) which is now properly implemented. fixes issue345. - avoid tmpdir fixture to create too long filenames especially when parametrization is used (issue354) - fix pytest-pep8 and pytest-flakes / pytest interactions (collection names in mark plugin was assuming an item always has a function which is not true for those plugins etc.) Thanks Andi Zeidler. - introduce node.get_marker/node.add_marker API for plugins like pytest-pep8 and pytest-flakes to avoid the messy details of the node.keywords pseudo-dicts. Adapated docs. - remove attempt to "dup" stdout at startup as it's icky. the normal capturing should catch enough possibilities of tests messing up standard FDs. - add pluginmanager.do_configure(config) as a link to config.do_configure() for plugin-compatibility v2.4.1 ----------------------------------- - When using parser.addoption() unicode arguments to the "type" keyword should also be converted to the respective types. thanks Floris Bruynooghe, @dnozay. (fixes issue360 and issue362) - fix dotted filename completion when using argcomplete thanks Anthon van der Neuth. (fixes issue361) - fix regression when a 1-tuple ("arg",) is used for specifying parametrization (the values of the parametrization were passed nested in a tuple). Thanks Donald Stufft. - merge doc typo fixes, thanks Andy Dirnberger v2.4 ----------------------------------- known incompatibilities: - if calling --genscript from python2.7 or above, you only get a standalone script which works on python2.7 or above. Use Python2.6 to also get a python2.5 compatible version. - all xunit-style teardown methods (nose-style, pytest-style, unittest-style) will not be called if the corresponding setup method failed, see issue322 below. - the pytest_plugin_unregister hook wasn't ever properly called and there is no known implementation of the hook - so it got removed. - pytest.fixture-decorated functions cannot be generators (i.e. use yield) anymore. This change might be reversed in 2.4.1 if it causes unforeseen real-life issues. However, you can always write and return an inner function/generator and change the fixture consumer to iterate over the returned generator. This change was done in lieu of the new ``pytest.yield_fixture`` decorator, see below. new features: - experimentally introduce a new ``pytest.yield_fixture`` decorator which accepts exactly the same parameters as pytest.fixture but mandates a ``yield`` statement instead of a ``return statement`` from fixture functions. This allows direct integration with "with-style" context managers in fixture functions and generally avoids registering of finalization callbacks in favour of treating the "after-yield" as teardown code. Thanks Andreas Pelme, Vladimir Keleshev, Floris Bruynooghe, Ronny Pfannschmidt and many others for discussions. - allow boolean expression directly with skipif/xfail if a "reason" is also specified. Rework skipping documentation to recommend "condition as booleans" because it prevents surprises when importing markers between modules. Specifying conditions as strings will remain fully supported. - reporting: color the last line red or green depending if failures/errors occured or everything passed. thanks Christian Theunert. - make "import pdb ; pdb.set_trace()" work natively wrt capturing (no "-s" needed anymore), making ``pytest.set_trace()`` a mere shortcut. - fix issue181: --pdb now also works on collect errors (and on internal errors) . This was implemented by a slight internal refactoring and the introduction of a new hook ``pytest_exception_interact`` hook (see next item). - fix issue341: introduce new experimental hook for IDEs/terminals to intercept debugging: ``pytest_exception_interact(node, call, report)``. - new monkeypatch.setattr() variant to provide a shorter invocation for patching out classes/functions from modules: monkeypatch.setattr("requests.get", myfunc) will replace the "get" function of the "requests" module with ``myfunc``. - fix issue322: tearDownClass is not run if setUpClass failed. Thanks Mathieu Agopian for the initial fix. Also make all of pytest/nose finalizer mimick the same generic behaviour: if a setupX exists and fails, don't run teardownX. This internally introduces a new method "node.addfinalizer()" helper which can only be called during the setup phase of a node. - simplify pytest.mark.parametrize() signature: allow to pass a CSV-separated string to specify argnames. For example: ``pytest.mark.parametrize("input,expected", [(1,2), (2,3)])`` works as well as the previous: ``pytest.mark.parametrize(("input", "expected"), ...)``. - add support for setUpModule/tearDownModule detection, thanks Brian Okken. - integrate tab-completion on options through use of "argcomplete". Thanks Anthon van der Neut for the PR. - change option names to be hyphen-separated long options but keep the old spelling backward compatible. py.test -h will only show the hyphenated version, for example "--collect-only" but "--collectonly" will remain valid as well (for backward-compat reasons). Many thanks to Anthon van der Neut for the implementation and to Hynek Schlawack for pushing us. - fix issue 308 - allow to mark/xfail/skip individual parameter sets when parametrizing. Thanks Brianna Laugher. - call new experimental pytest_load_initial_conftests hook to allow 3rd party plugins to do something before a conftest is loaded. Bug fixes: - fix issue358 - capturing options are now parsed more properly by using a new parser.parse_known_args method. - pytest now uses argparse instead of optparse (thanks Anthon) which means that "argparse" is added as a dependency if installing into python2.6 environments or below. - fix issue333: fix a case of bad unittest/pytest hook interaction. - PR27: correctly handle nose.SkipTest during collection. Thanks Antonio Cuni, Ronny Pfannschmidt. - fix issue355: junitxml puts name="pytest" attribute to testsuite tag. - fix issue336: autouse fixture in plugins should work again. - fix issue279: improve object comparisons on assertion failure for standard datatypes and recognise collections.abc. Thanks to Brianna Laugher and Mathieu Agopian. - fix issue317: assertion rewriter support for the is_package method - fix issue335: document py.code.ExceptionInfo() object returned from pytest.raises(), thanks Mathieu Agopian. - remove implicit distribute_setup support from setup.py. - fix issue305: ignore any problems when writing pyc files. - SO-17664702: call fixture finalizers even if the fixture function partially failed (finalizers would not always be called before) - fix issue320 - fix class scope for fixtures when mixed with module-level functions. Thanks Anatloy Bubenkoff. - you can specify "-q" or "-qq" to get different levels of "quieter" reporting (thanks Katarzyna Jachim) - fix issue300 - Fix order of conftest loading when starting py.test in a subdirectory. - fix issue323 - sorting of many module-scoped arg parametrizations - make sessionfinish hooks execute with the same cwd-context as at session start (helps fix plugin behaviour which write output files with relative path such as pytest-cov) - fix issue316 - properly reference collection hooks in docs - fix issue 306 - cleanup of -k/-m options to only match markers/test names/keywords respectively. Thanks Wouter van Ackooy. - improved doctest counting for doctests in python modules -- files without any doctest items will not show up anymore and doctest examples are counted as separate test items. thanks Danilo Bellini. - fix issue245 by depending on the released py-1.4.14 which fixes py.io.dupfile to work with files with no mode. Thanks Jason R. Coombs. - fix junitxml generation when test output contains control characters, addressing issue267, thanks Jaap Broekhuizen - fix issue338: honor --tb style for setup/teardown errors as well. Thanks Maho. - fix issue307 - use yaml.safe_load in example, thanks Mark Eichin. - better parametrize error messages, thanks Brianna Laugher - pytest_terminal_summary(terminalreporter) hooks can now use ".section(title)" and ".line(msg)" methods to print extra information at the end of a test run. v2.3.5 ----------------------------------- - fix issue169: respect --tb=style with setup/teardown errors as well. - never consider a fixture function for test function collection - allow re-running of test items / helps to fix pytest-reruntests plugin and also help to keep less fixture/resource references alive - put captured stdout/stderr into junitxml output even for passing tests (thanks Adam Goucher) - Issue 265 - integrate nose setup/teardown with setupstate so it doesnt try to teardown if it did not setup - issue 271 - dont write junitxml on slave nodes - Issue 274 - dont try to show full doctest example when doctest does not know the example location - issue 280 - disable assertion rewriting on buggy CPython 2.6.0 - inject "getfixture()" helper to retrieve fixtures from doctests, thanks Andreas Zeidler - issue 259 - when assertion rewriting, be consistent with the default source encoding of ASCII on Python 2 - issue 251 - report a skip instead of ignoring classes with init - issue250 unicode/str mixes in parametrization names and values now works - issue257, assertion-triggered compilation of source ending in a comment line doesn't blow up in python2.5 (fixed through py>=1.4.13.dev6) - fix --genscript option to generate standalone scripts that also work with python3.3 (importer ordering) - issue171 - in assertion rewriting, show the repr of some global variables - fix option help for "-k" - move long description of distribution into README.rst - improve docstring for metafunc.parametrize() - fix bug where using capsys with pytest.set_trace() in a test function would break when looking at capsys.readouterr() - allow to specify prefixes starting with "_" when customizing python_functions test discovery. (thanks Graham Horler) - improve PYTEST_DEBUG tracing output by puting extra data on a new lines with additional indent - ensure OutcomeExceptions like skip/fail have initialized exception attributes - issue 260 - don't use nose special setup on plain unittest cases - fix issue134 - print the collect errors that prevent running specified test items - fix issue266 - accept unicode in MarkEvaluator expressions v2.3.4 ----------------------------------- - yielded test functions will now have autouse-fixtures active but cannot accept fixtures as funcargs - it's anyway recommended to rather use the post-2.0 parametrize features instead of yield, see: http://pytest.org/latest/example/parametrize.html - fix autouse-issue where autouse-fixtures would not be discovered if defined in a a/conftest.py file and tests in a/tests/test_some.py - fix issue226 - LIFO ordering for fixture teardowns - fix issue224 - invocations with >256 char arguments now work - fix issue91 - add/discuss package/directory level setups in example - allow to dynamically define markers via item.keywords[...]=assignment integrating with "-m" option - make "-k" accept an expressions the same as with "-m" so that one can write: -k "name1 or name2" etc. This is a slight incompatibility if you used special syntax like "TestClass.test_method" which you now need to write as -k "TestClass and test_method" to match a certain method in a certain test class. v2.3.3 ----------------------------------- - fix issue214 - parse modules that contain special objects like e. g. flask's request object which blows up on getattr access if no request is active. thanks Thomas Waldmann. - fix issue213 - allow to parametrize with values like numpy arrays that do not support an __eq__ operator - fix issue215 - split test_python.org into multiple files - fix issue148 - @unittest.skip on classes is now recognized and avoids calling setUpClass/tearDownClass, thanks Pavel Repin - fix issue209 - reintroduce python2.4 support by depending on newer pylib which re-introduced statement-finding for pre-AST interpreters - nose support: only call setup if its a callable, thanks Andrew Taumoefolau - fix issue219 - add py2.4-3.3 classifiers to TROVE list - in tracebacks *,** arg values are now shown next to normal arguments (thanks Manuel Jacob) - fix issue217 - support mock.patch with pytest's fixtures - note that you need either mock-1.0.1 or the python3.3 builtin unittest.mock. - fix issue127 - improve documentation for pytest_addoption() and add a ``config.getoption(name)`` helper function for consistency. v2.3.2 ----------------------------------- - fix issue208 and fix issue29 use new py version to avoid long pauses when printing tracebacks in long modules - fix issue205 - conftests in subdirs customizing pytest_pycollect_makemodule and pytest_pycollect_makeitem now work properly - fix teardown-ordering for parametrized setups - fix issue127 - better documentation for pytest_addoption and related objects. - fix unittest behaviour: TestCase.runtest only called if there are test methods defined - improve trial support: don't collect its empty unittest.TestCase.runTest() method - "python setup.py test" now works with pytest itself - fix/improve internal/packaging related bits: - exception message check of test_nose.py now passes on python33 as well - issue206 - fix test_assertrewrite.py to work when a global PYTHONDONTWRITEBYTECODE=1 is present - add tox.ini to pytest distribution so that ignore-dirs and others config bits are properly distributed for maintainers who run pytest-own tests v2.3.1 ----------------------------------- - fix issue202 - fix regression: using "self" from fixture functions now works as expected (it's the same "self" instance that a test method which uses the fixture sees) - skip pexpect using tests (test_pdb.py mostly) on freebsd* systems due to pexpect not supporting it properly (hanging) - link to web pages from --markers output which provides help for pytest.mark.* usage. v2.3.0 ----------------------------------- - fix issue202 - better automatic names for parametrized test functions - fix issue139 - introduce @pytest.fixture which allows direct scoping and parametrization of funcarg factories. - fix issue198 - conftest fixtures were not found on windows32 in some circumstances with nested directory structures due to path manipulation issues - fix issue193 skip test functions with were parametrized with empty parameter sets - fix python3.3 compat, mostly reporting bits that previously depended on dict ordering - introduce re-ordering of tests by resource and parametrization setup which takes precedence to the usual file-ordering - fix issue185 monkeypatching time.time does not cause pytest to fail - fix issue172 duplicate call of pytest.fixture decoratored setup_module functions - fix junitxml=path construction so that if tests change the current working directory and the path is a relative path it is constructed correctly from the original current working dir. - fix "python setup.py test" example to cause a proper "errno" return - fix issue165 - fix broken doc links and mention stackoverflow for FAQ - catch unicode-issues when writing failure representations to terminal to prevent the whole session from crashing - fix xfail/skip confusion: a skip-mark or an imperative pytest.skip will now take precedence before xfail-markers because we can't determine xfail/xpass status in case of a skip. see also: http://stackoverflow.com/questions/11105828/in-py-test-when-i-explicitly-skip-a-test-that-is-marked-as-xfail-how-can-i-get - always report installed 3rd party plugins in the header of a test run - fix issue160: a failing setup of an xfail-marked tests should be reported as xfail (not xpass) - fix issue128: show captured output when capsys/capfd are used - fix issue179: propperly show the dependency chain of factories - pluginmanager.register(...) now raises ValueError if the plugin has been already registered or the name is taken - fix issue159: improve http://pytest.org/latest/faq.html especially with respect to the "magic" history, also mention pytest-django, trial and unittest integration. - make request.keywords and node.keywords writable. All descendant collection nodes will see keyword values. Keywords are dictionaries containing markers and other info. - fix issue 178: xml binary escapes are now wrapped in py.xml.raw - fix issue 176: correctly catch the builtin AssertionError even when we replaced AssertionError with a subclass on the python level - factory discovery no longer fails with magic global callables that provide no sane __code__ object (mock.call for example) - fix issue 182: testdir.inprocess_run now considers passed plugins - fix issue 188: ensure sys.exc_info is clear on python2 before calling into a test - fix issue 191: add unittest TestCase runTest method support - fix issue 156: monkeypatch correctly handles class level descriptors - reporting refinements: - pytest_report_header now receives a "startdir" so that you can use startdir.bestrelpath(yourpath) to show nice relative path - allow plugins to implement both pytest_report_header and pytest_sessionstart (sessionstart is invoked first). - don't show deselected reason line if there is none - py.test -vv will show all of assert comparisations instead of truncating v2.2.4 ----------------------------------- - fix error message for rewritten assertions involving the % operator - fix issue 126: correctly match all invalid xml characters for junitxml binary escape - fix issue with unittest: now @unittest.expectedFailure markers should be processed correctly (you can also use @pytest.mark markers) - document integration with the extended distribute/setuptools test commands - fix issue 140: propperly get the real functions of bound classmethods for setup/teardown_class - fix issue #141: switch from the deceased paste.pocoo.org to bpaste.net - fix issue #143: call unconfigure/sessionfinish always when configure/sessionstart where called - fix issue #144: better mangle test ids to junitxml classnames - upgrade distribute_setup.py to 0.6.27 v2.2.3 ---------------------------------------- - fix uploaded package to only include neccesary files v2.2.2 ---------------------------------------- - fix issue101: wrong args to unittest.TestCase test function now produce better output - fix issue102: report more useful errors and hints for when a test directory was renamed and some pyc/__pycache__ remain - fix issue106: allow parametrize to be applied multiple times e.g. from module, class and at function level. - fix issue107: actually perform session scope finalization - don't check in parametrize if indirect parameters are funcarg names - add chdir method to monkeypatch funcarg - fix crash resulting from calling monkeypatch undo a second time - fix issue115: make --collectonly robust against early failure (missing files/directories) - "-qq --collectonly" now shows only files and the number of tests in them - "-q --collectonly" now shows test ids - allow adding of attributes to test reports such that it also works with distributed testing (no upgrade of pytest-xdist needed) v2.2.1 ---------------------------------------- - fix issue99 (in pytest and py) internallerrors with resultlog now produce better output - fixed by normalizing pytest_internalerror input arguments. - fix issue97 / traceback issues (in pytest and py) improve traceback output in conjunction with jinja2 and cython which hack tracebacks - fix issue93 (in pytest and pytest-xdist) avoid "delayed teardowns": the final test in a test node will now run its teardown directly instead of waiting for the end of the session. Thanks Dave Hunt for the good reporting and feedback. The pytest_runtest_protocol as well as the pytest_runtest_teardown hooks now have "nextitem" available which will be None indicating the end of the test run. - fix collection crash due to unknown-source collected items, thanks to Ralf Schmitt (fixed by depending on a more recent pylib) v2.2.0 ---------------------------------------- - fix issue90: introduce eager tearing down of test items so that teardown function are called earlier. - add an all-powerful metafunc.parametrize function which allows to parametrize test function arguments in multiple steps and therefore from indepdenent plugins and palces. - add a @pytest.mark.parametrize helper which allows to easily call a test function with different argument values - Add examples to the "parametrize" example page, including a quick port of Test scenarios and the new parametrize function and decorator. - introduce registration for "pytest.mark.*" helpers via ini-files or through plugin hooks. Also introduce a "--strict" option which will treat unregistered markers as errors allowing to avoid typos and maintain a well described set of markers for your test suite. See exaples at http://pytest.org/latest/mark.html and its links. - issue50: introduce "-m marker" option to select tests based on markers (this is a stricter and more predictable version of '-k' in that "-m" only matches complete markers and has more obvious rules for and/or semantics. - new feature to help optimizing the speed of your tests: --durations=N option for displaying N slowest test calls and setup/teardown methods. - fix issue87: --pastebin now works with python3 - fix issue89: --pdb with unexpected exceptions in doctest work more sensibly - fix and cleanup pytest's own test suite to not leak FDs - fix issue83: link to generated funcarg list - fix issue74: pyarg module names are now checked against imp.find_module false positives - fix compatibility with twisted/trial-11.1.0 use cases - simplify Node.listchain - simplify junitxml output code by relying on py.xml - add support for skip properties on unittest classes and functions v2.1.3 ---------------------------------------- - fix issue79: assertion rewriting failed on some comparisons in boolops - correctly handle zero length arguments (a la pytest '') - fix issue67 / junitxml now contains correct test durations, thanks ronny - fix issue75 / skipping test failure on jython - fix issue77 / Allow assertrepr_compare hook to apply to a subset of tests v2.1.2 ---------------------------------------- - fix assertion rewriting on files with windows newlines on some Python versions - refine test discovery by package/module name (--pyargs), thanks Florian Mayer - fix issue69 / assertion rewriting fixed on some boolean operations - fix issue68 / packages now work with assertion rewriting - fix issue66: use different assertion rewriting caches when the -O option is passed - don't try assertion rewriting on Jython, use reinterp v2.1.1 ---------------------------------------------- - fix issue64 / pytest.set_trace now works within pytest_generate_tests hooks - fix issue60 / fix error conditions involving the creation of __pycache__ - fix issue63 / assertion rewriting on inserts involving strings containing '%' - fix assertion rewriting on calls with a ** arg - don't cache rewritten modules if bytecode generation is disabled - fix assertion rewriting in read-only directories - fix issue59: provide system-out/err tags for junitxml output - fix issue61: assertion rewriting on boolean operations with 3 or more operands - you can now build a man page with "cd doc ; make man" v2.1.0 ---------------------------------------------- - fix issue53 call nosestyle setup functions with correct ordering - fix issue58 and issue59: new assertion code fixes - merge Benjamin's assertionrewrite branch: now assertions for test modules on python 2.6 and above are done by rewriting the AST and saving the pyc file before the test module is imported. see doc/assert.txt for more info. - fix issue43: improve doctests with better traceback reporting on unexpected exceptions - fix issue47: timing output in junitxml for test cases is now correct - fix issue48: typo in MarkInfo repr leading to exception - fix issue49: avoid confusing error when initizaliation partially fails - fix issue44: env/username expansion for junitxml file path - show releaselevel information in test runs for pypy - reworked doc pages for better navigation and PDF generation - report KeyboardInterrupt even if interrupted during session startup - fix issue 35 - provide PDF doc version and download link from index page v2.0.3 ---------------------------------------------- - fix issue38: nicer tracebacks on calls to hooks, particularly early configure/sessionstart ones - fix missing skip reason/meta information in junitxml files, reported via http://lists.idyll.org/pipermail/testing-in-python/2011-March/003928.html - fix issue34: avoid collection failure with "test" prefixed classes deriving from object. - don't require zlib (and other libs) for genscript plugin without --genscript actually being used. - speed up skips (by not doing a full traceback represenation internally) - fix issue37: avoid invalid characters in junitxml's output v2.0.2 ---------------------------------------------- - tackle issue32 - speed up test runs of very quick test functions by reducing the relative overhead - fix issue30 - extended xfail/skipif handling and improved reporting. If you have a syntax error in your skip/xfail expressions you now get nice error reports. Also you can now access module globals from xfail/skipif expressions so that this for example works now:: import pytest import mymodule @pytest.mark.skipif("mymodule.__version__[0] == "1") def test_function(): pass This will not run the test function if the module's version string does not start with a "1". Note that specifying a string instead of a boolean expressions allows py.test to report meaningful information when summarizing a test run as to what conditions lead to skipping (or xfail-ing) tests. - fix issue28 - setup_method and pytest_generate_tests work together The setup_method fixture method now gets called also for test function invocations generated from the pytest_generate_tests hook. - fix issue27 - collectonly and keyword-selection (-k) now work together Also, if you do "py.test --collectonly -q" you now get a flat list of test ids that you can use to paste to the py.test commandline in order to execute a particular test. - fix issue25 avoid reported problems with --pdb and python3.2/encodings output - fix issue23 - tmpdir argument now works on Python3.2 and WindowsXP Starting with Python3.2 os.symlink may be supported. By requiring a newer py lib version the py.path.local() implementation acknowledges this. - fixed typos in the docs (thanks Victor Garcia, Brianna Laugher) and particular thanks to Laura Creighton who also revieved parts of the documentation. - fix slighly wrong output of verbose progress reporting for classes (thanks Amaury) - more precise (avoiding of) deprecation warnings for node.Class|Function accesses - avoid std unittest assertion helper code in tracebacks (thanks Ronny) v2.0.1 ---------------------------------------------- - refine and unify initial capturing so that it works nicely even if the logging module is used on an early-loaded conftest.py file or plugin. - allow to omit "()" in test ids to allow for uniform test ids as produced by Alfredo's nice pytest.vim plugin. - fix issue12 - show plugin versions with "--version" and "--traceconfig" and also document how to add extra information to reporting test header - fix issue17 (import-* reporting issue on python3) by requiring py>1.4.0 (1.4.1 is going to include it) - fix issue10 (numpy arrays truth checking) by refining assertion interpretation in py lib - fix issue15: make nose compatibility tests compatible with python3 (now that nose-1.0 supports python3) - remove somewhat surprising "same-conftest" detection because it ignores conftest.py when they appear in several subdirs. - improve assertions ("not in"), thanks Floris Bruynooghe - improve behaviour/warnings when running on top of "python -OO" (assertions and docstrings are turned off, leading to potential false positives) - introduce a pytest_cmdline_processargs(args) hook to allow dynamic computation of command line arguments. This fixes a regression because py.test prior to 2.0 allowed to set command line options from conftest.py files which so far pytest-2.0 only allowed from ini-files now. - fix issue7: assert failures in doctest modules. unexpected failures in doctests will not generally show nicer, i.e. within the doctest failing context. - fix issue9: setup/teardown functions for an xfail-marked test will report as xfail if they fail but report as normally passing (not xpassing) if they succeed. This only is true for "direct" setup/teardown invocations because teardown_class/ teardown_module cannot closely relate to a single test. - fix issue14: no logging errors at process exit - refinements to "collecting" output on non-ttys - refine internal plugin registration and --traceconfig output - introduce a mechanism to prevent/unregister plugins from the command line, see http://pytest.org/plugins.html#cmdunregister - activate resultlog plugin by default - fix regression wrt yielded tests which due to the collection-before-running semantics were not setup as with pytest 1.3.4. Note, however, that the recommended and much cleaner way to do test parametraization remains the "pytest_generate_tests" mechanism, see the docs. v2.0.0 ---------------------------------------------- - pytest-2.0 is now its own package and depends on pylib-2.0 - new ability: python -m pytest / python -m pytest.main ability - new python invcation: pytest.main(args, plugins) to load some custom plugins early. - try harder to run unittest test suites in a more compatible manner by deferring setup/teardown semantics to the unittest package. also work harder to run twisted/trial and Django tests which should now basically work by default. - introduce a new way to set config options via ini-style files, by default setup.cfg and tox.ini files are searched. The old ways (certain environment variables, dynamic conftest.py reading is removed). - add a new "-q" option which decreases verbosity and prints a more nose/unittest-style "dot" output. - fix issue135 - marks now work with unittest test cases as well - fix issue126 - introduce py.test.set_trace() to trace execution via PDB during the running of tests even if capturing is ongoing. - fix issue123 - new "python -m py.test" invocation for py.test (requires Python 2.5 or above) - fix issue124 - make reporting more resilient against tests opening files on filedescriptor 1 (stdout). - fix issue109 - sibling conftest.py files will not be loaded. (and Directory collectors cannot be customized anymore from a Directory's conftest.py - this needs to happen at least one level up). - introduce (customizable) assertion failure representations and enhance output on assertion failures for comparisons and other cases (Floris Bruynooghe) - nose-plugin: pass through type-signature failures in setup/teardown functions instead of not calling them (Ed Singleton) - remove py.test.collect.Directory (follows from a major refactoring and simplification of the collection process) - majorly reduce py.test core code, shift function/python testing to own plugin - fix issue88 (finding custom test nodes from command line arg) - refine 'tmpdir' creation, will now create basenames better associated with test names (thanks Ronny) - "xpass" (unexpected pass) tests don't cause exitcode!=0 - fix issue131 / issue60 - importing doctests in __init__ files used as namespace packages - fix issue93 stdout/stderr is captured while importing conftest.py - fix bug: unittest collected functions now also can have "pytestmark" applied at class/module level - add ability to use "class" level for cached_setup helper - fix strangeness: mark.* objects are now immutable, create new instances v1.3.4 ---------------------------------------------- - fix issue111: improve install documentation for windows - fix issue119: fix custom collectability of __init__.py as a module - fix issue116: --doctestmodules work with __init__.py files as well - fix issue115: unify internal exception passthrough/catching/GeneratorExit - fix issue118: new --tb=native for presenting cpython-standard exceptions v1.3.3 ---------------------------------------------- - fix issue113: assertion representation problem with triple-quoted strings (and possibly other cases) - make conftest loading detect that a conftest file with the same content was already loaded, avoids surprises in nested directory structures which can be produced e.g. by Hudson. It probably removes the need to use --confcutdir in most cases. - fix terminal coloring for win32 (thanks Michael Foord for reporting) - fix weirdness: make terminal width detection work on stdout instead of stdin (thanks Armin Ronacher for reporting) - remove trailing whitespace in all py/text distribution files v1.3.2 ---------------------------------------------- New features ++++++++++++++++++ - fix issue103: introduce py.test.raises as context manager, examples:: with py.test.raises(ZeroDivisionError): x = 0 1 / x with py.test.raises(RuntimeError) as excinfo: call_something() # you may do extra checks on excinfo.value|type|traceback here (thanks Ronny Pfannschmidt) - Funcarg factories can now dynamically apply a marker to a test invocation. This is for example useful if a factory provides parameters to a test which are expected-to-fail:: def pytest_funcarg__arg(request): request.applymarker(py.test.mark.xfail(reason="flaky config")) ... def test_function(arg): ... - improved error reporting on collection and import errors. This makes use of a more general mechanism, namely that for custom test item/collect nodes ``node.repr_failure(excinfo)`` is now uniformly called so that you can override it to return a string error representation of your choice which is going to be reported as a (red) string. - introduce '--junitprefix=STR' option to prepend a prefix to all reports in the junitxml file. Bug fixes / Maintenance ++++++++++++++++++++++++++ - make tests and the ``pytest_recwarn`` plugin in particular fully compatible to Python2.7 (if you use the ``recwarn`` funcarg warnings will be enabled so that you can properly check for their existence in a cross-python manner). - refine --pdb: ignore xfailed tests, unify its TB-reporting and don't display failures again at the end. - fix assertion interpretation with the ** operator (thanks Benjamin Peterson) - fix issue105 assignment on the same line as a failing assertion (thanks Benjamin Peterson) - fix issue104 proper escaping for test names in junitxml plugin (thanks anonymous) - fix issue57 -f|--looponfail to work with xpassing tests (thanks Ronny) - fix issue92 collectonly reporter and --pastebin (thanks Benjamin Peterson) - fix py.code.compile(source) to generate unique filenames - fix assertion re-interp problems on PyPy, by defering code compilation to the (overridable) Frame.eval class. (thanks Amaury Forgeot) - fix py.path.local.pyimport() to work with directories - streamline py.path.local.mkdtemp implementation and usage - don't print empty lines when showing junitxml-filename - add optional boolean ignore_errors parameter to py.path.local.remove - fix terminal writing on win32/python2.4 - py.process.cmdexec() now tries harder to return properly encoded unicode objects on all python versions - install plain py.test/py.which scripts also for Jython, this helps to get canonical script paths in virtualenv situations - make path.bestrelpath(path) return ".", note that when calling X.bestrelpath the assumption is that X is a directory. - make initial conftest discovery ignore "--" prefixed arguments - fix resultlog plugin when used in an multicpu/multihost xdist situation (thanks Jakub Gustak) - perform distributed testing related reporting in the xdist-plugin rather than having dist-related code in the generic py.test distribution - fix homedir detection on Windows - ship distribute_setup.py version 0.6.13 v1.3.1 --------------------------------------------- New features ++++++++++++++++++ - issue91: introduce new py.test.xfail(reason) helper to imperatively mark a test as expected to fail. Can be used from within setup and test functions. This is useful especially for parametrized tests when certain configurations are expected-to-fail. In this case the declarative approach with the @py.test.mark.xfail cannot be used as it would mark all configurations as xfail. - issue102: introduce new --maxfail=NUM option to stop test runs after NUM failures. This is a generalization of the '-x' or '--exitfirst' option which is now equivalent to '--maxfail=1'. Both '-x' and '--maxfail' will now also print a line near the end indicating the Interruption. - issue89: allow py.test.mark decorators to be used on classes (class decorators were introduced with python2.6) and also allow to have multiple markers applied at class/module level by specifying a list. - improve and refine letter reporting in the progress bar: . pass f failed test s skipped tests (reminder: use for dependency/platform mismatch only) x xfailed test (test that was expected to fail) X xpassed test (test that was expected to fail but passed) You can use any combination of 'fsxX' with the '-r' extended reporting option. The xfail/xpass results will show up as skipped tests in the junitxml output - which also fixes issue99. - make py.test.cmdline.main() return the exitstatus instead of raising SystemExit and also allow it to be called multiple times. This of course requires that your application and tests are properly teared down and don't have global state. Fixes / Maintenance ++++++++++++++++++++++ - improved traceback presentation: - improved and unified reporting for "--tb=short" option - Errors during test module imports are much shorter, (using --tb=short style) - raises shows shorter more relevant tracebacks - --fulltrace now more systematically makes traces longer / inhibits cutting - improve support for raises and other dynamically compiled code by manipulating python's linecache.cache instead of the previous rather hacky way of creating custom code objects. This makes it seemlessly work on Jython and PyPy where it previously didn't. - fix issue96: make capturing more resilient against Control-C interruptions (involved somewhat substantial refactoring to the underlying capturing functionality to avoid race conditions). - fix chaining of conditional skipif/xfail decorators - so it works now as expected to use multiple @py.test.mark.skipif(condition) decorators, including specific reporting which of the conditions lead to skipping. - fix issue95: late-import zlib so that it's not required for general py.test startup. - fix issue94: make reporting more robust against bogus source code (and internally be more careful when presenting unexpected byte sequences) v1.3.0 --------------------------------------------- - deprecate --report option in favour of a new shorter and easier to remember -r option: it takes a string argument consisting of any combination of 'xfsX' characters. They relate to the single chars you see during the dotted progress printing and will print an extra line per test at the end of the test run. This extra line indicates the exact position or test ID that you directly paste to the py.test cmdline in order to re-run a particular test. - allow external plugins to register new hooks via the new pytest_addhooks(pluginmanager) hook. The new release of the pytest-xdist plugin for distributed and looponfailing testing requires this feature. - add a new pytest_ignore_collect(path, config) hook to allow projects and plugins to define exclusion behaviour for their directory structure - for example you may define in a conftest.py this method:: def pytest_ignore_collect(path): return path.check(link=1) to prevent even a collection try of any tests in symlinked dirs. - new pytest_pycollect_makemodule(path, parent) hook for allowing customization of the Module collection object for a matching test module. - extend and refine xfail mechanism: ``@py.test.mark.xfail(run=False)`` do not run the decorated test ``@py.test.mark.xfail(reason="...")`` prints the reason string in xfail summaries specifiying ``--runxfail`` on command line virtually ignores xfail markers - expose (previously internal) commonly useful methods: py.io.get_terminal_with() -> return terminal width py.io.ansi_print(...) -> print colored/bold text on linux/win32 py.io.saferepr(obj) -> return limited representation string - expose test outcome related exceptions as py.test.skip.Exception, py.test.raises.Exception etc., useful mostly for plugins doing special outcome interpretation/tweaking - (issue85) fix junitxml plugin to handle tests with non-ascii output - fix/refine python3 compatibility (thanks Benjamin Peterson) - fixes for making the jython/win32 combination work, note however: jython2.5.1/win32 does not provide a command line launcher, see http://bugs.jython.org/issue1491 . See pylib install documentation for how to work around. - fixes for handling of unicode exception values and unprintable objects - (issue87) fix unboundlocal error in assertionold code - (issue86) improve documentation for looponfailing - refine IO capturing: stdin-redirect pseudo-file now has a NOP close() method - ship distribute_setup.py version 0.6.10 - added links to the new capturelog and coverage plugins v1.2.0 --------------------------------------------- - refined usage and options for "py.cleanup":: py.cleanup # remove "*.pyc" and "*$py.class" (jython) files py.cleanup -e .swp -e .cache # also remove files with these extensions py.cleanup -s # remove "build" and "dist" directory next to setup.py files py.cleanup -d # also remove empty directories py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'" py.cleanup -n # dry run, only show what would be removed - add a new option "py.test --funcargs" which shows available funcargs and their help strings (docstrings on their respective factory function) for a given test path - display a short and concise traceback if a funcarg lookup fails - early-load "conftest.py" files in non-dot first-level sub directories. allows to conveniently keep and access test-related options in a ``test`` subdir and still add command line options. - fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value - fix issue78: always call python-level teardown functions even if the according setup failed. This includes refinements for calling setup_module/class functions which will now only be called once instead of the previous behaviour where they'd be called multiple times if they raise an exception (including a Skipped exception). Any exception will be re-corded and associated with all tests in the according module/class scope. - fix issue63: assume <40 columns to be a bogus terminal width, default to 80 - fix pdb debugging to be in the correct frame on raises-related errors - update apipkg.py to fix an issue where recursive imports might unnecessarily break importing - fix plugin links v1.1.1 --------------------------------------------- - moved dist/looponfailing from py.test core into a new separately released pytest-xdist plugin. - new junitxml plugin: --junitxml=path will generate a junit style xml file which is processable e.g. by the Hudson CI system. - new option: --genscript=path will generate a standalone py.test script which will not need any libraries installed. thanks to Ralf Schmitt. - new option: --ignore will prevent specified path from collection. Can be specified multiple times. - new option: --confcutdir=dir will make py.test only consider conftest files that are relative to the specified dir. - new funcarg: "pytestconfig" is the pytest config object for access to command line args and can now be easily used in a test. - install 'py.test' and `py.which` with a ``-$VERSION`` suffix to disambiguate between Python3, python2.X, Jython and PyPy installed versions. - new "pytestconfig" funcarg allows access to test config object - new "pytest_report_header" hook can return additional lines to be displayed at the header of a test run. - (experimental) allow "py.test path::name1::name2::..." for pointing to a test within a test collection directly. This might eventually evolve as a full substitute to "-k" specifications. - streamlined plugin loading: order is now as documented in customize.html: setuptools, ENV, commandline, conftest. also setuptools entry point names are turned to canonical namees ("pytest_*") - automatically skip tests that need 'capfd' but have no os.dup - allow pytest_generate_tests to be defined in classes as well - deprecate usage of 'disabled' attribute in favour of pytestmark - deprecate definition of Directory, Module, Class and Function nodes in conftest.py files. Use pytest collect hooks instead. - collection/item node specific runtest/collect hooks are only called exactly on matching conftest.py files, i.e. ones which are exactly below the filesystem path of an item - change: the first pytest_collect_directory hook to return something will now prevent further hooks to be called. - change: figleaf plugin now requires --figleaf to run. Also change its long command line options to be a bit shorter (see py.test -h). - change: pytest doctest plugin is now enabled by default and has a new option --doctest-glob to set a pattern for file matches. - change: remove internal py._* helper vars, only keep py._pydir - robustify capturing to survive if custom pytest_runtest_setup code failed and prevented the capturing setup code from running. - make py.test.* helpers provided by default plugins visible early - works transparently both for pydoc and for interactive sessions which will regularly see e.g. py.test.mark and py.test.importorskip. - simplify internal plugin manager machinery - simplify internal collection tree by introducing a RootCollector node - fix assert reinterpreation that sees a call containing "keyword=..." - fix issue66: invoke pytest_sessionstart and pytest_sessionfinish hooks on slaves during dist-testing, report module/session teardown hooks correctly. - fix issue65: properly handle dist-testing if no execnet/py lib installed remotely. - skip some install-tests if no execnet is available - fix docs, fix internal bin/ script generation v1.1.0 --------------------------------------------- - introduce automatic plugin registration via 'pytest11' entrypoints via setuptools' pkg_resources.iter_entry_points - fix py.test dist-testing to work with execnet >= 1.0.0b4 - re-introduce py.test.cmdline.main() for better backward compatibility - svn paths: fix a bug with path.check(versioned=True) for svn paths, allow '%' in svn paths, make svnwc.update() default to interactive mode like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction. - refine distributed tarball to contain test and no pyc files - try harder to have deprecation warnings for py.compat.* accesses report a correct location v1.0.2 --------------------------------------------- * adjust and improve docs * remove py.rest tool and internal namespace - it was never really advertised and can still be used with the old release if needed. If there is interest it could be revived into its own tool i guess. * fix issue48 and issue59: raise an Error if the module from an imported test file does not seem to come from the filepath - avoids "same-name" confusion that has been reported repeatedly * merged Ronny's nose-compatibility hacks: now nose-style setup_module() and setup() functions are supported * introduce generalized py.test.mark function marking * reshuffle / refine command line grouping * deprecate parser.addgroup in favour of getgroup which creates option group * add --report command line option that allows to control showing of skipped/xfailed sections * generalized skipping: a new way to mark python functions with skipif or xfail at function, class and modules level based on platform or sys-module attributes. * extend py.test.mark decorator to allow for positional args * introduce and test "py.cleanup -d" to remove empty directories * fix issue #59 - robustify unittest test collection * make bpython/help interaction work by adding an __all__ attribute to ApiModule, cleanup initpkg * use MIT license for pylib, add some contributors * remove py.execnet code and substitute all usages with 'execnet' proper * fix issue50 - cached_setup now caches more to expectations for test functions with multiple arguments. * merge Jarko's fixes, issue #45 and #46 * add the ability to specify a path for py.lookup to search in * fix a funcarg cached_setup bug probably only occuring in distributed testing and "module" scope with teardown. * many fixes and changes for making the code base python3 compatible, many thanks to Benjamin Peterson for helping with this. * consolidate builtins implementation to be compatible with >=2.3, add helpers to ease keeping 2 and 3k compatible code * deprecate py.compat.doctest|subprocess|textwrap|optparse * deprecate py.magic.autopath, remove py/magic directory * move pytest assertion handling to py/code and a pytest_assertion plugin, add "--no-assert" option, deprecate py.magic namespaces in favour of (less) py.code ones. * consolidate and cleanup py/code classes and files * cleanup py/misc, move tests to bin-for-dist * introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg * consolidate py.log implementation, remove old approach. * introduce py.io.TextIO and py.io.BytesIO for distinguishing between text/unicode and byte-streams (uses underlying standard lib io.* if available) * make py.unittest_convert helper script available which converts "unittest.py" style files into the simpler assert/direct-test-classes py.test/nosetests style. The script was written by Laura Creighton. * simplified internal localpath implementation v1.0.2 ------------------------------------------- * fixing packaging issues, triggered by fedora redhat packaging, also added doc, examples and contrib dirs to the tarball. * added a documentation link to the new django plugin. v1.0.1 ------------------------------------------- * added a 'pytest_nose' plugin which handles nose.SkipTest, nose-style function/method/generator setup/teardown and tries to report functions correctly. * capturing of unicode writes or encoded strings to sys.stdout/err work better, also terminalwriting was adapted and somewhat unified between windows and linux. * improved documentation layout and content a lot * added a "--help-config" option to show conftest.py / ENV-var names for all longopt cmdline options, and some special conftest.py variables. renamed 'conf_capture' conftest setting to 'option_capture' accordingly. * fix issue #27: better reporting on non-collectable items given on commandline (e.g. pyc files) * fix issue #33: added --version flag (thanks Benjamin Peterson) * fix issue #32: adding support for "incomplete" paths to wcpath.status() * "Test" prefixed classes are *not* collected by default anymore if they have an __init__ method * monkeypatch setenv() now accepts a "prepend" parameter * improved reporting of collection error tracebacks * simplified multicall mechanism and plugin architecture, renamed some internal methods and argnames v1.0.0 ------------------------------------------- * more terse reporting try to show filesystem path relatively to current dir * improve xfail output a bit v1.0.0b9 ------------------------------------------- * cleanly handle and report final teardown of test setup * fix svn-1.6 compat issue with py.path.svnwc().versioned() (thanks Wouter Vanden Hove) * setup/teardown or collection problems now show as ERRORs or with big "E"'s in the progress lines. they are reported and counted separately. * dist-testing: properly handle test items that get locally collected but cannot be collected on the remote side - often due to platform/dependency reasons * simplified py.test.mark API - see keyword plugin documentation * integrate better with logging: capturing now by default captures test functions and their immediate setup/teardown in a single stream * capsys and capfd funcargs now have a readouterr() and a close() method (underlyingly py.io.StdCapture/FD objects are used which grew a readouterr() method as well to return snapshots of captured out/err) * make assert-reinterpretation work better with comparisons not returning bools (reported with numpy from thanks maciej fijalkowski) * reworked per-test output capturing into the pytest_iocapture.py plugin and thus removed capturing code from config object * item.repr_failure(excinfo) instead of item.repr_failure(excinfo, outerr) v1.0.0b8 ------------------------------------------- * pytest_unittest-plugin is now enabled by default * introduced pytest_keyboardinterrupt hook and refined pytest_sessionfinish hooked, added tests. * workaround a buggy logging module interaction ("closing already closed files"). Thanks to Sridhar Ratnakumar for triggering. * if plugins use "py.test.importorskip" for importing a dependency only a warning will be issued instead of exiting the testing process. * many improvements to docs: - refined funcargs doc , use the term "factory" instead of "provider" - added a new talk/tutorial doc page - better download page - better plugin docstrings - added new plugins page and automatic doc generation script * fixed teardown problem related to partially failing funcarg setups (thanks MrTopf for reporting), "pytest_runtest_teardown" is now always invoked even if the "pytest_runtest_setup" failed. * tweaked doctest output for docstrings in py modules, thanks Radomir. v1.0.0b7 ------------------------------------------- * renamed py.test.xfail back to py.test.mark.xfail to avoid two ways to decorate for xfail * re-added py.test.mark decorator for setting keywords on functions (it was actually documented so removing it was not nice) * remove scope-argument from request.addfinalizer() because request.cached_setup has the scope arg. TOOWTDI. * perform setup finalization before reporting failures * apply modified patches from Andreas Kloeckner to allow test functions to have no func_code (#22) and to make "-k" and function keywords work (#20) * apply patch from Daniel Peolzleithner (issue #23) * resolve issue #18, multiprocessing.Manager() and redirection clash * make __name__ == "__channelexec__" for remote_exec code v1.0.0b3 ------------------------------------------- * plugin classes are removed: one now defines hooks directly in conftest.py or global pytest_*.py files. * added new pytest_namespace(config) hook that allows to inject helpers directly to the py.test.* namespace. * documented and refined many hooks * added new style of generative tests via pytest_generate_tests hook that integrates well with function arguments. v1.0.0b1 ------------------------------------------- * introduced new "funcarg" setup method, see doc/test/funcarg.txt * introduced plugin architecuture and many new py.test plugins, see doc/test/plugins.txt * teardown_method is now guaranteed to get called after a test method has run. * new method: py.test.importorskip(mod,minversion) will either import or call py.test.skip() * completely revised internal py.test architecture * new py.process.ForkedFunc object allowing to fork execution of a function to a sub process and getting a result back. XXX lots of things missing here XXX v0.9.2 ------------------------------------------- * refined installation and metadata, created new setup.py, now based on setuptools/ez_setup (thanks to Ralf Schmitt for his support). * improved the way of making py.* scripts available in windows environments, they are now added to the Scripts directory as ".cmd" files. * py.path.svnwc.status() now is more complete and uses xml output from the 'svn' command if available (Guido Wesdorp) * fix for py.path.svn* to work with svn 1.5 (Chris Lamb) * fix path.relto(otherpath) method on windows to use normcase for checking if a path is relative. * py.test's traceback is better parseable from editors (follows the filenames:LINENO: MSG convention) (thanks to Osmo Salomaa) * fix to javascript-generation, "py.test --runbrowser" should work more reliably now * removed previously accidentally added py.test.broken and py.test.notimplemented helpers. * there now is a py.__version__ attribute v0.9.1 ------------------------------------------- This is a fairly complete list of v0.9.1, which can serve as a reference for developers. * allowing + signs in py.path.svn urls [39106] * fixed support for Failed exceptions without excinfo in py.test [39340] * added support for killing processes for Windows (as well as platforms that support os.kill) in py.misc.killproc [39655] * added setup/teardown for generative tests to py.test [40702] * added detection of FAILED TO LOAD MODULE to py.test [40703, 40738, 40739] * fixed problem with calling .remove() on wcpaths of non-versioned files in py.path [44248] * fixed some import and inheritance issues in py.test [41480, 44648, 44655] * fail to run greenlet tests when pypy is available, but without stackless [45294] * small fixes in rsession tests [45295] * fixed issue with 2.5 type representations in py.test [45483, 45484] * made that internal reporting issues displaying is done atomically in py.test [45518] * made that non-existing files are igored by the py.lookup script [45519] * improved exception name creation in py.test [45535] * made that less threads are used in execnet [merge in 45539] * removed lock required for atomical reporting issue displaying in py.test [45545] * removed globals from execnet [45541, 45547] * refactored cleanup mechanics, made that setDaemon is set to 1 to make atexit get called in 2.5 (py.execnet) [45548] * fixed bug in joining threads in py.execnet's servemain [45549] * refactored py.test.rsession tests to not rely on exact output format anymore [45646] * using repr() on test outcome [45647] * added 'Reason' classes for py.test.skip() [45648, 45649] * killed some unnecessary sanity check in py.test.collect [45655] * avoid using os.tmpfile() in py.io.fdcapture because on Windows it's only usable by Administrators [45901] * added support for locking and non-recursive commits to py.path.svnwc [45994] * locking files in py.execnet to prevent CPython from segfaulting [46010] * added export() method to py.path.svnurl * fixed -d -x in py.test [47277] * fixed argument concatenation problem in py.path.svnwc [49423] * restore py.test behaviour that it exits with code 1 when there are failures [49974] * don't fail on html files that don't have an accompanying .txt file [50606] * fixed 'utestconvert.py < input' [50645] * small fix for code indentation in py.code.source [50755] * fix _docgen.py documentation building [51285] * improved checks for source representation of code blocks in py.test [51292] * added support for passing authentication to py.path.svn* objects [52000, 52001] * removed sorted() call for py.apigen tests in favour of [].sort() to support Python 2.3 [52481] pytest-2.5.1/_pytest/0000775000175000017500000000000012254002202014113 5ustar hpkhpk00000000000000pytest-2.5.1/_pytest/recwarn.py0000664000175000017500000000620312254002202016127 0ustar hpkhpk00000000000000""" recording warnings during test function execution. """ import py import sys def pytest_funcarg__recwarn(request): """Return a WarningsRecorder instance that provides these methods: * ``pop(category=None)``: return last warning matching the category. * ``clear()``: clear list of warnings See http://docs.python.org/library/warnings.html for information on warning categories. """ if sys.version_info >= (2,7): import warnings oldfilters = warnings.filters[:] warnings.simplefilter('default') def reset_filters(): warnings.filters[:] = oldfilters request.addfinalizer(reset_filters) wrec = WarningsRecorder() request.addfinalizer(wrec.finalize) return wrec def pytest_namespace(): return {'deprecated_call': deprecated_call} def deprecated_call(func, *args, **kwargs): """ assert that calling ``func(*args, **kwargs)`` triggers a DeprecationWarning. """ warningmodule = py.std.warnings l = [] oldwarn_explicit = getattr(warningmodule, 'warn_explicit') def warn_explicit(*args, **kwargs): l.append(args) oldwarn_explicit(*args, **kwargs) oldwarn = getattr(warningmodule, 'warn') def warn(*args, **kwargs): l.append(args) oldwarn(*args, **kwargs) warningmodule.warn_explicit = warn_explicit warningmodule.warn = warn try: ret = func(*args, **kwargs) finally: warningmodule.warn_explicit = warn_explicit warningmodule.warn = warn if not l: #print warningmodule __tracebackhide__ = True raise AssertionError("%r did not produce DeprecationWarning" %(func,)) return ret class RecordedWarning: def __init__(self, message, category, filename, lineno, line): self.message = message self.category = category self.filename = filename self.lineno = lineno self.line = line class WarningsRecorder: def __init__(self): warningmodule = py.std.warnings self.list = [] def showwarning(message, category, filename, lineno, line=0): self.list.append(RecordedWarning( message, category, filename, lineno, line)) try: self.old_showwarning(message, category, filename, lineno, line=line) except TypeError: # < python2.6 self.old_showwarning(message, category, filename, lineno) self.old_showwarning = warningmodule.showwarning warningmodule.showwarning = showwarning def pop(self, cls=Warning): """ pop the first recorded warning, raise exception if not exists.""" for i, w in enumerate(self.list): if issubclass(w.category, cls): return self.list.pop(i) __tracebackhide__ = True assert 0, "%r not found in %r" %(cls, self.list) #def resetregistry(self): # import warnings # warnings.onceregistry.clear() # warnings.__warningregistry__.clear() def clear(self): self.list[:] = [] def finalize(self): py.std.warnings.showwarning = self.old_showwarning pytest-2.5.1/_pytest/pdb.py0000664000175000017500000000663512254002202015244 0ustar hpkhpk00000000000000""" interactive debugging with PDB, the Python Debugger. """ import pytest, py import sys def pytest_addoption(parser): group = parser.getgroup("general") group._addoption('--pdb', action="store_true", dest="usepdb", default=False, help="start the interactive Python debugger on errors.") def pytest_namespace(): return {'set_trace': pytestPDB().set_trace} def pytest_configure(config): if config.getvalue("usepdb"): config.pluginmanager.register(PdbInvoke(), 'pdbinvoke') old_trace = py.std.pdb.set_trace def fin(): py.std.pdb.set_trace = old_trace py.std.pdb.set_trace = pytest.set_trace config._cleanup.append(fin) class pytestPDB: """ Pseudo PDB that defers to the real pdb. """ item = None collector = None def set_trace(self): """ invoke PDB set_trace debugging, dropping any IO capturing. """ frame = sys._getframe().f_back item = self.item or self.collector if item is not None: capman = item.config.pluginmanager.getplugin("capturemanager") out, err = capman.suspendcapture() if hasattr(item, 'outerr'): item.outerr = (item.outerr[0] + out, item.outerr[1] + err) tw = py.io.TerminalWriter() tw.line() tw.sep(">", "PDB set_trace (IO-capturing turned off)") py.std.pdb.Pdb().set_trace(frame) def pdbitem(item): pytestPDB.item = item pytest_runtest_setup = pytest_runtest_call = pytest_runtest_teardown = pdbitem @pytest.mark.tryfirst def pytest_make_collect_report(__multicall__, collector): try: pytestPDB.collector = collector return __multicall__.execute() finally: pytestPDB.collector = None def pytest_runtest_makereport(): pytestPDB.item = None class PdbInvoke: def pytest_exception_interact(self, node, call, report): return _enter_pdb(node, call.excinfo, report) def pytest_internalerror(self, excrepr, excinfo): for line in str(excrepr).split("\n"): sys.stderr.write("INTERNALERROR> %s\n" %line) sys.stderr.flush() tb = _postmortem_traceback(excinfo) post_mortem(tb) def _enter_pdb(node, excinfo, rep): # XXX we re-use the TerminalReporter's terminalwriter # because this seems to avoid some encoding related troubles # for not completely clear reasons. tw = node.config.pluginmanager.getplugin("terminalreporter")._tw tw.line() tw.sep(">", "traceback") rep.toterminal(tw) tw.sep(">", "entering PDB") tb = _postmortem_traceback(excinfo) post_mortem(tb) rep._pdbshown = True return rep def _postmortem_traceback(excinfo): # A doctest.UnexpectedException is not useful for post_mortem. # Use the underlying exception instead: if isinstance(excinfo.value, py.std.doctest.UnexpectedException): return excinfo.value.exc_info[2] else: return excinfo._excinfo[2] def _find_last_non_hidden_frame(stack): i = max(0, len(stack) - 1) while i and stack[i][0].f_locals.get("__tracebackhide__", False): i -= 1 return i def post_mortem(t): pdb = py.std.pdb class Pdb(pdb.Pdb): def get_stack(self, f, t): stack, i = pdb.Pdb.get_stack(self, f, t) if f is None: i = _find_last_non_hidden_frame(stack) return stack, i p = Pdb() p.reset() p.interaction(None, t) pytest-2.5.1/_pytest/pastebin.py0000664000175000017500000000502012254002202016267 0ustar hpkhpk00000000000000""" submit failure or test session information to a pastebin service. """ import py, sys class url: base = "http://bpaste.net" xmlrpc = base + "/xmlrpc/" show = base + "/show/" def pytest_addoption(parser): group = parser.getgroup("terminal reporting") group._addoption('--pastebin', metavar="mode", action='store', dest="pastebin", default=None, choices=['failed', 'all'], help="send failed|all info to bpaste.net pastebin service.") def pytest_configure(__multicall__, config): import tempfile __multicall__.execute() if config.option.pastebin == "all": config._pastebinfile = tempfile.TemporaryFile('w+') tr = config.pluginmanager.getplugin('terminalreporter') oldwrite = tr._tw.write def tee_write(s, **kwargs): oldwrite(s, **kwargs) config._pastebinfile.write(str(s)) tr._tw.write = tee_write def pytest_unconfigure(config): if hasattr(config, '_pastebinfile'): config._pastebinfile.seek(0) sessionlog = config._pastebinfile.read() config._pastebinfile.close() del config._pastebinfile proxyid = getproxy().newPaste("python", sessionlog) pastebinurl = "%s%s" % (url.show, proxyid) sys.stderr.write("pastebin session-log: %s\n" % pastebinurl) tr = config.pluginmanager.getplugin('terminalreporter') del tr._tw.__dict__['write'] def getproxy(): if sys.version_info < (3, 0): from xmlrpclib import ServerProxy else: from xmlrpc.client import ServerProxy return ServerProxy(url.xmlrpc).pastes def pytest_terminal_summary(terminalreporter): if terminalreporter.config.option.pastebin != "failed": return tr = terminalreporter if 'failed' in tr.stats: terminalreporter.write_sep("=", "Sending information to Paste Service") if tr.config.option.debug: terminalreporter.write_line("xmlrpcurl: %s" %(url.xmlrpc,)) serverproxy = getproxy() for rep in terminalreporter.stats.get('failed'): try: msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc except AttributeError: msg = tr._getfailureheadline(rep) tw = py.io.TerminalWriter(stringio=True) rep.toterminal(tw) s = tw.stringio.getvalue() assert len(s) proxyid = serverproxy.newPaste("python", s) pastebinurl = "%s%s" % (url.show, proxyid) tr.write_line("%s --> %s" %(msg, pastebinurl)) pytest-2.5.1/_pytest/_argcomplete.py0000664000175000017500000000726012254002202017133 0ustar hpkhpk00000000000000 """allow bash-completion for argparse with argcomplete if installed needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail to find the magic string, so _ARGCOMPLETE env. var is never set, and this does not need special code. argcomplete does not support python 2.5 (although the changes for that are minor). Function try_argcomplete(parser) should be called directly before the call to ArgumentParser.parse_args(). The filescompleter is what you normally would use on the positional arguments specification, in order to get "dirname/" after "dirn" instead of the default "dirname ": optparser.add_argument(Config._file_or_dir, nargs='*' ).completer=filescompleter Other, application specific, completers should go in the file doing the add_argument calls as they need to be specified as .completer attributes as well. (If argcomplete is not installed, the function the attribute points to will not be used). SPEEDUP ======= The generic argcomplete script for bash-completion (/etc/bash_completion.d/python-argcomplete.sh ) uses a python program to determine startup script generated by pip. You can speed up completion somewhat by changing this script to include # PYTHON_ARGCOMPLETE_OK so the the python-argcomplete-check-easy-install-script does not need to be called to find the entry point of the code and see if that is marked with PYTHON_ARGCOMPLETE_OK INSTALL/DEBUGGING ================= To include this support in another application that has setup.py generated scripts: - add the line: # PYTHON_ARGCOMPLETE_OK near the top of the main python entry point - include in the file calling parse_args(): from _argcomplete import try_argcomplete, filescompleter , call try_argcomplete just before parse_args(), and optionally add filescompleter to the positional arguments' add_argument() If things do not work right away: - switch on argcomplete debugging with (also helpful when doing custom completers): export _ARC_DEBUG=1 - run: python-argcomplete-check-easy-install-script $(which appname) echo $? will echo 0 if the magic line has been found, 1 if not - sometimes it helps to find early on errors using: _ARGCOMPLETE=1 _ARC_DEBUG=1 appname which should throw a KeyError: 'COMPLINE' (which is properly set by the global argcomplete script). """ import sys import os from glob import glob class FastFilesCompleter: 'Fast file completer class' def __init__(self, directories=True): self.directories = directories def __call__(self, prefix, **kwargs): """only called on non option completions""" if os.path.sep in prefix[1:]: # prefix_dir = len(os.path.dirname(prefix) + os.path.sep) else: prefix_dir = 0 completion = [] globbed = [] if '*' not in prefix and '?' not in prefix: if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash globbed.extend(glob(prefix + '.*')) prefix += '*' globbed.extend(glob(prefix)) for x in sorted(globbed): if os.path.isdir(x): x += '/' # append stripping the prefix (like bash, not like compgen) completion.append(x[prefix_dir:]) return completion if os.environ.get('_ARGCOMPLETE'): # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format if sys.version_info[:2] < (2, 6): sys.exit(1) try: import argcomplete.completers except ImportError: sys.exit(-1) filescompleter = FastFilesCompleter() def try_argcomplete(parser): argcomplete.autocomplete(parser) else: def try_argcomplete(parser): pass filescompleter = None pytest-2.5.1/_pytest/python.py0000664000175000017500000022341112254002202016011 0ustar hpkhpk00000000000000""" Python test discovery, setup and run of test functions. """ import py import inspect import sys import pytest from _pytest.mark import MarkDecorator from py._code.code import TerminalRepr import _pytest cutdir = py.path.local(_pytest.__file__).dirpath() NoneType = type(None) NOTSET = object() callable = py.builtin.callable def getfslineno(obj): # xxx let decorators etc specify a sane ordering while hasattr(obj, "__wrapped__"): obj = obj.__wrapped__ if hasattr(obj, 'place_as'): obj = obj.place_as fslineno = py.code.getfslineno(obj) assert isinstance(fslineno[1], int), obj return fslineno def getimfunc(func): try: return func.__func__ except AttributeError: try: return func.im_func except AttributeError: return func class FixtureFunctionMarker: def __init__(self, scope, params, autouse=False, yieldctx=False, ids=None): self.scope = scope self.params = params self.autouse = autouse self.yieldctx = yieldctx self.ids = ids def __call__(self, function): if inspect.isclass(function): raise ValueError( "class fixtures not supported (may be in the future)") function._pytestfixturefunction = self return function def fixture(scope="function", params=None, autouse=False, ids=None): """ (return a) decorator to mark a fixture factory function. This decorator can be used (with or or without parameters) to define a fixture function. The name of the fixture function can later be referenced to cause its invocation ahead of running tests: test modules or classes can use the pytest.mark.usefixtures(fixturename) marker. Test functions can directly use fixture names as input arguments in which case the fixture instance returned from the fixture function will be injected. :arg scope: the scope for which this fixture is shared, one of "function" (default), "class", "module", "session". :arg params: an optional list of parameters which will cause multiple invocations of the fixture function and all of the tests using it. :arg autouse: if True, the fixture func is activated for all tests that can see it. If False (the default) then an explicit reference is needed to activate the fixture. :arg ids: list of string ids each corresponding to the params so that they are part of the test id. If no ids are provided they will be generated automatically from the params. """ if callable(scope) and params is None and autouse == False: # direct decoration return FixtureFunctionMarker( "function", params, autouse)(scope) if params is not None and not isinstance(params, (list, tuple)): params = list(params) return FixtureFunctionMarker(scope, params, autouse, ids=ids) def yield_fixture(scope="function", params=None, autouse=False, ids=None): """ (return a) decorator to mark a yield-fixture factory function (EXPERIMENTAL). This takes the same arguments as :py:func:`pytest.fixture` but expects a fixture function to use a ``yield`` instead of a ``return`` statement to provide a fixture. See http://pytest.org/en/latest/yieldfixture.html for more info. """ if callable(scope) and params is None and autouse == False: # direct decoration return FixtureFunctionMarker( "function", params, autouse, yieldctx=True)(scope) else: return FixtureFunctionMarker(scope, params, autouse, yieldctx=True, ids=ids) defaultfuncargprefixmarker = fixture() def pyobj_property(name): def get(self): node = self.getparent(getattr(pytest, name)) if node is not None: return node.obj doc = "python %s object this node was collected from (can be None)." % ( name.lower(),) return property(get, None, None, doc) def pytest_addoption(parser): group = parser.getgroup("general") group.addoption('--fixtures', '--funcargs', action="store_true", dest="showfixtures", default=False, help="show available fixtures, sorted by plugin appearance") parser.addini("usefixtures", type="args", default=[], help="list of default fixtures to be used with this project") parser.addini("python_files", type="args", default=('test_*.py', '*_test.py'), help="glob-style file patterns for Python test module discovery") parser.addini("python_classes", type="args", default=("Test",), help="prefixes for Python test class discovery") parser.addini("python_functions", type="args", default=("test",), help="prefixes for Python test function and method discovery") def pytest_cmdline_main(config): if config.option.showfixtures: showfixtures(config) return 0 def pytest_generate_tests(metafunc): try: markers = metafunc.function.parametrize except AttributeError: return for marker in markers: metafunc.parametrize(*marker.args, **marker.kwargs) def pytest_configure(config): config.addinivalue_line("markers", "parametrize(argnames, argvalues): call a test function multiple " "times passing in different arguments in turn. argvalues generally " "needs to be a list of values if argnames specifies only one name " "or a list of tuples of values if argnames specifies multiple names. " "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " "decorated test function, one with arg1=1 and another with arg1=2." "see http://pytest.org/latest/parametrize.html for more info and " "examples." ) config.addinivalue_line("markers", "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures " ) def pytest_sessionstart(session): session._fixturemanager = FixtureManager(session) @pytest.mark.trylast def pytest_namespace(): raises.Exception = pytest.fail.Exception return { 'fixture': fixture, 'yield_fixture': yield_fixture, 'raises' : raises, 'collect': { 'Module': Module, 'Class': Class, 'Instance': Instance, 'Function': Function, 'Generator': Generator, '_fillfuncargs': fillfixtures} } @fixture(scope="session") def pytestconfig(request): """ the pytest config object with access to command line opts.""" return request.config def pytest_pyfunc_call(__multicall__, pyfuncitem): if not __multicall__.execute(): testfunction = pyfuncitem.obj if pyfuncitem._isyieldedfunction(): testfunction(*pyfuncitem._args) else: funcargs = pyfuncitem.funcargs testargs = {} for arg in pyfuncitem._fixtureinfo.argnames: testargs[arg] = funcargs[arg] testfunction(**testargs) def pytest_collect_file(path, parent): ext = path.ext if ext == ".py": if not parent.session.isinitpath(path): for pat in parent.config.getini('python_files'): if path.fnmatch(pat): break else: return ihook = parent.session.gethookproxy(path) return ihook.pytest_pycollect_makemodule(path=path, parent=parent) def pytest_pycollect_makemodule(path, parent): return Module(path, parent) def pytest_pycollect_makeitem(__multicall__, collector, name, obj): res = __multicall__.execute() if res is not None: return res if inspect.isclass(obj): #if hasattr(collector.obj, 'unittest'): # return # we assume it's a mixin class for a TestCase derived one if collector.classnamefilter(name): Class = collector._getcustomclass("Class") return Class(name, parent=collector) elif collector.funcnamefilter(name) and hasattr(obj, '__call__') and \ getfixturemarker(obj) is None: if is_generator(obj): return Generator(name, parent=collector) else: return list(collector._genfunctions(name, obj)) def is_generator(func): try: return py.code.getrawcode(func).co_flags & 32 # generator function except AttributeError: # builtin functions have no bytecode # assume them to not be generators return False class PyobjContext(object): module = pyobj_property("Module") cls = pyobj_property("Class") instance = pyobj_property("Instance") class PyobjMixin(PyobjContext): def obj(): def fget(self): try: return self._obj except AttributeError: self._obj = obj = self._getobj() return obj def fset(self, value): self._obj = value return property(fget, fset, None, "underlying python object") obj = obj() def _getobj(self): return getattr(self.parent.obj, self.name) def getmodpath(self, stopatmodule=True, includemodule=False): """ return python path relative to the containing module. """ chain = self.listchain() chain.reverse() parts = [] for node in chain: if isinstance(node, Instance): continue name = node.name if isinstance(node, Module): assert name.endswith(".py") name = name[:-3] if stopatmodule: if includemodule: parts.append(name) break parts.append(name) parts.reverse() s = ".".join(parts) return s.replace(".[", "[") def _getfslineno(self): return getfslineno(self.obj) def reportinfo(self): # XXX caching? obj = self.obj if hasattr(obj, 'compat_co_firstlineno'): # nose compatibility fspath = sys.modules[obj.__module__].__file__ if fspath.endswith(".pyc"): fspath = fspath[:-1] lineno = obj.compat_co_firstlineno else: fspath, lineno = getfslineno(obj) modpath = self.getmodpath() assert isinstance(lineno, int) return fspath, lineno, modpath class PyCollector(PyobjMixin, pytest.Collector): def funcnamefilter(self, name): for prefix in self.config.getini("python_functions"): if name.startswith(prefix): return True def classnamefilter(self, name): for prefix in self.config.getini("python_classes"): if name.startswith(prefix): return True def collect(self): # NB. we avoid random getattrs and peek in the __dict__ instead # (XXX originally introduced from a PyPy need, still true?) dicts = [getattr(self.obj, '__dict__', {})] for basecls in inspect.getmro(self.obj.__class__): dicts.append(basecls.__dict__) seen = {} l = [] for dic in dicts: for name, obj in dic.items(): if name in seen: continue seen[name] = True res = self.makeitem(name, obj) if res is None: continue if not isinstance(res, list): res = [res] l.extend(res) l.sort(key=lambda item: item.reportinfo()[:2]) return l def makeitem(self, name, obj): #assert self.ihook.fspath == self.fspath, self return self.ihook.pytest_pycollect_makeitem( collector=self, name=name, obj=obj) def _genfunctions(self, name, funcobj): module = self.getparent(Module).obj clscol = self.getparent(Class) cls = clscol and clscol.obj or None transfer_markers(funcobj, cls, module) fm = self.session._fixturemanager fixtureinfo = fm.getfixtureinfo(self, funcobj, cls) metafunc = Metafunc(funcobj, fixtureinfo, self.config, cls=cls, module=module) gentesthook = self.config.hook.pytest_generate_tests extra = [module] if cls is not None: extra.append(cls()) plugins = self.getplugins() + extra gentesthook.pcall(plugins, metafunc=metafunc) Function = self._getcustomclass("Function") if not metafunc._calls: yield Function(name, parent=self) else: # add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs add_funcarg_pseudo_fixture_def(self, metafunc, fm) for callspec in metafunc._calls: subname = "%s[%s]" %(name, callspec.id) yield Function(name=subname, parent=self, callspec=callspec, callobj=funcobj, keywords={callspec.id:True}) def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): # this function will transform all collected calls to a functions # if they use direct funcargs (i.e. direct parametrization) # because we want later test execution to be able to rely on # an existing FixtureDef structure for all arguments. # XXX we can probably avoid this algorithm if we modify CallSpec2 # to directly care for creating the fixturedefs within its methods. if not metafunc._calls[0].funcargs: return # this function call does not have direct parametrization # collect funcargs of all callspecs into a list of values arg2params = {} arg2scope = {} arg2fixturedefs = metafunc._arg2fixturedefs for param_index, callspec in enumerate(metafunc._calls): for argname, argvalue in callspec.funcargs.items(): arg2params.setdefault(argname, []).append(argvalue) if argname not in arg2scope: scopenum = callspec._arg2scopenum.get(argname, scopenum_function) arg2scope[argname] = scopes[scopenum] callspec.indices[argname] = param_index for argname in callspec.funcargs: assert argname not in callspec.params callspec.params.update(callspec.funcargs) callspec.funcargs.clear() # register artificial FixtureDef's so that later at test execution # time we can rely on a proper FixtureDef to exist for fixture setup. for argname, valuelist in arg2params.items(): # if we have a scope that is higher than function we need # to make sure we only ever create an according fixturedef on # a per-scope basis. We thus store and cache the fixturedef on the # node related to the scope. scope = arg2scope[argname] node = None if scope != "function": node = get_scope_node(collector, scope) if node is None: assert scope == "class" and isinstance(collector, Module) # use module-level collector for class-scope (for now) node = collector if node and argname in node._name2pseudofixturedef: arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]] else: fixturedef = FixtureDef(fixturemanager, '', argname, get_direct_param_fixture_func, arg2scope[argname], valuelist, False, False) arg2fixturedefs[argname] = [fixturedef] if node is not None: node._name2pseudofixturedef[argname] = fixturedef def get_direct_param_fixture_func(request): return request.param class FuncFixtureInfo: def __init__(self, argnames, names_closure, name2fixturedefs): self.argnames = argnames self.names_closure = names_closure self.name2fixturedefs = name2fixturedefs def transfer_markers(funcobj, cls, mod): # XXX this should rather be code in the mark plugin or the mark # plugin should merge with the python plugin. for holder in (cls, mod): try: pytestmark = holder.pytestmark except AttributeError: continue if isinstance(pytestmark, list): for mark in pytestmark: mark(funcobj) else: pytestmark(funcobj) class Module(pytest.File, PyCollector): """ Collector for test classes and functions. """ def _getobj(self): return self._memoizedcall('_obj', self._importtestmodule) def collect(self): self.session._fixturemanager.parsefactories(self) return super(Module, self).collect() def _importtestmodule(self): # we assume we are only called once per module try: mod = self.fspath.pyimport(ensuresyspath=True) except SyntaxError: excinfo = py.code.ExceptionInfo() raise self.CollectError(excinfo.getrepr(style="short")) except self.fspath.ImportMismatchError: e = sys.exc_info()[1] raise self.CollectError( "import file mismatch:\n" "imported module %r has this __file__ attribute:\n" " %s\n" "which is not the same as the test file we want to collect:\n" " %s\n" "HINT: remove __pycache__ / .pyc files and/or use a " "unique basename for your test file modules" % e.args ) #print "imported test module", mod self.config.pluginmanager.consider_module(mod) return mod def setup(self): setup_module = xunitsetup(self.obj, "setUpModule") if setup_module is None: setup_module = xunitsetup(self.obj, "setup_module") if setup_module is not None: #XXX: nose compat hack, move to nose plugin # if it takes a positional arg, its probably a pytest style one # so we pass the current module object if inspect.getargspec(setup_module)[0]: setup_module(self.obj) else: setup_module() fin = getattr(self.obj, 'tearDownModule', None) if fin is None: fin = getattr(self.obj, 'teardown_module', None) if fin is not None: #XXX: nose compat hack, move to nose plugin # if it takes a positional arg, its probably a py.test style one # so we pass the current module object if inspect.getargspec(fin)[0]: finalizer = lambda: fin(self.obj) else: finalizer = fin self.addfinalizer(finalizer) class Class(PyCollector): """ Collector for test methods. """ def collect(self): if hasinit(self.obj): pytest.skip("class %s.%s with __init__ won't get collected" % ( self.obj.__module__, self.obj.__name__, )) return [self._getcustomclass("Instance")(name="()", parent=self)] def setup(self): setup_class = xunitsetup(self.obj, 'setup_class') if setup_class is not None: setup_class = getattr(setup_class, 'im_func', setup_class) setup_class = getattr(setup_class, '__func__', setup_class) setup_class(self.obj) fin_class = getattr(self.obj, 'teardown_class', None) if fin_class is not None: fin_class = getattr(fin_class, 'im_func', fin_class) fin_class = getattr(fin_class, '__func__', fin_class) self.addfinalizer(lambda: fin_class(self.obj)) class Instance(PyCollector): def _getobj(self): obj = self.parent.obj() return obj def collect(self): self.session._fixturemanager.parsefactories(self) return super(Instance, self).collect() def newinstance(self): self.obj = self._getobj() return self.obj class FunctionMixin(PyobjMixin): """ mixin for the code common to Function and Generator. """ def setup(self): """ perform setup for this test function. """ if hasattr(self, '_preservedparent'): obj = self._preservedparent elif isinstance(self.parent, Instance): obj = self.parent.newinstance() self.obj = self._getobj() else: obj = self.parent.obj if inspect.ismethod(self.obj): setup_name = 'setup_method' teardown_name = 'teardown_method' else: setup_name = 'setup_function' teardown_name = 'teardown_function' setup_func_or_method = xunitsetup(obj, setup_name) if setup_func_or_method is not None: setup_func_or_method(self.obj) fin = getattr(obj, teardown_name, None) if fin is not None: self.addfinalizer(lambda: fin(self.obj)) def _prunetraceback(self, excinfo): if hasattr(self, '_obj') and not self.config.option.fulltrace: code = py.code.Code(self.obj) path, firstlineno = code.path, code.firstlineno traceback = excinfo.traceback ntraceback = traceback.cut(path=path, firstlineno=firstlineno) if ntraceback == traceback: ntraceback = ntraceback.cut(path=path) if ntraceback == traceback: ntraceback = ntraceback.cut(excludepath=cutdir) excinfo.traceback = ntraceback.filter() def _repr_failure_py(self, excinfo, style="long"): if excinfo.errisinstance(pytest.fail.Exception): if not excinfo.value.pytrace: return str(excinfo.value) return super(FunctionMixin, self)._repr_failure_py(excinfo, style=style) def repr_failure(self, excinfo, outerr=None): assert outerr is None, "XXX outerr usage is deprecated" return self._repr_failure_py(excinfo, style=self.config.option.tbstyle) class Generator(FunctionMixin, PyCollector): def collect(self): # test generators are seen as collectors but they also # invoke setup/teardown on popular request # (induced by the common "test_*" naming shared with normal tests) self.session._setupstate.prepare(self) # see FunctionMixin.setup and test_setupstate_is_preserved_134 self._preservedparent = self.parent.obj l = [] seen = {} for i, x in enumerate(self.obj()): name, call, args = self.getcallargs(x) if not callable(call): raise TypeError("%r yielded non callable test %r" %(self.obj, call,)) if name is None: name = "[%d]" % i else: name = "['%s']" % name if name in seen: raise ValueError("%r generated tests with non-unique name %r" %(self, name)) seen[name] = True l.append(self.Function(name, self, args=args, callobj=call)) return l def getcallargs(self, obj): if not isinstance(obj, (tuple, list)): obj = (obj,) # explict naming if isinstance(obj[0], py.builtin._basestring): name = obj[0] obj = obj[1:] else: name = None call, args = obj[0], obj[1:] return name, call, args def hasinit(obj): init = getattr(obj, '__init__', None) if init: if init != object.__init__: return True def fillfixtures(function): """ fill missing funcargs for a test function. """ try: request = function._request except AttributeError: # XXX this special code path is only expected to execute # with the oejskit plugin. It uses classes with funcargs # and we thus have to work a bit to allow this. fm = function.session._fixturemanager fi = fm.getfixtureinfo(function.parent, function.obj, None) function._fixtureinfo = fi request = function._request = FixtureRequest(function) request._fillfixtures() # prune out funcargs for jstests newfuncargs = {} for name in fi.argnames: newfuncargs[name] = function.funcargs[name] function.funcargs = newfuncargs else: request._fillfixtures() _notexists = object() class CallSpec2(object): def __init__(self, metafunc): self.metafunc = metafunc self.funcargs = {} self._idlist = [] self.params = {} self._globalid = _notexists self._globalid_args = set() self._globalparam = _notexists self._arg2scopenum = {} # used for sorting parametrized resources self.keywords = {} self.indices = {} def copy(self, metafunc): cs = CallSpec2(self.metafunc) cs.funcargs.update(self.funcargs) cs.params.update(self.params) cs.keywords.update(self.keywords) cs.indices.update(self.indices) cs._arg2scopenum.update(self._arg2scopenum) cs._idlist = list(self._idlist) cs._globalid = self._globalid cs._globalid_args = self._globalid_args cs._globalparam = self._globalparam return cs def _checkargnotcontained(self, arg): if arg in self.params or arg in self.funcargs: raise ValueError("duplicate %r" %(arg,)) def getparam(self, name): try: return self.params[name] except KeyError: if self._globalparam is _notexists: raise ValueError(name) return self._globalparam @property def id(self): return "-".join(map(str, filter(None, self._idlist))) def setmulti(self, valtype, argnames, valset, id, keywords, scopenum, param_index): for arg,val in zip(argnames, valset): self._checkargnotcontained(arg) getattr(self, valtype)[arg] = val self.indices[arg] = param_index self._arg2scopenum[arg] = scopenum if val is _notexists: self._emptyparamspecified = True self._idlist.append(id) self.keywords.update(keywords) def setall(self, funcargs, id, param): for x in funcargs: self._checkargnotcontained(x) self.funcargs.update(funcargs) if id is not _notexists: self._idlist.append(id) if param is not _notexists: assert self._globalparam is _notexists self._globalparam = param for arg in funcargs: self._arg2scopenum[arg] = scopenum_function class FuncargnamesCompatAttr: """ helper class so that Metafunc, Function and FixtureRequest don't need to each define the "funcargnames" compatibility attribute. """ @property def funcargnames(self): """ alias attribute for ``fixturenames`` for pre-2.3 compatibility""" return self.fixturenames class Metafunc(FuncargnamesCompatAttr): def __init__(self, function, fixtureinfo, config, cls=None, module=None): self.config = config self.module = module self.function = function self.fixturenames = fixtureinfo.names_closure self._arg2fixturedefs = fixtureinfo.name2fixturedefs self.cls = cls self.module = module self._calls = [] self._ids = py.builtin.set() def parametrize(self, argnames, argvalues, indirect=False, ids=None, scope=None): """ Add new invocations to the underlying test function using the list of argvalues for the given argnames. Parametrization is performed during the collection phase. If you need to setup expensive resources see about setting indirect=True to do it rather at test setup time. :arg argnames: a comma-separated string denoting one or more argument names, or a list/tuple of argument strings. :arg argvalues: The list of argvalues determines how often a test is invoked with different argument values. If only one argname was specified argvalues is a list of simple values. If N argnames were specified, argvalues must be a list of N-tuples, where each tuple-element specifies a value for its respective argname. :arg indirect: if True each argvalue corresponding to an argname will be passed as request.param to its respective argname fixture function so that it can perform more expensive setups during the setup phase of a test rather than at collection time. :arg ids: list of string ids each corresponding to the argvalues so that they are part of the test id. If no ids are provided they will be generated automatically from the argvalues. :arg scope: if specified it denotes the scope of the parameters. The scope is used for grouping tests by parameter instances. It will also override any fixture-function defined scope, allowing to set a dynamic scope using test context or configuration. """ # individual parametrized argument sets can be wrapped in a series # of markers in which case we unwrap the values and apply the mark # at Function init newkeywords = {} unwrapped_argvalues = [] for i, argval in enumerate(argvalues): while isinstance(argval, MarkDecorator): newmark = MarkDecorator(argval.markname, argval.args[:-1], argval.kwargs) newmarks = newkeywords.setdefault(i, {}) newmarks[newmark.markname] = newmark argval = argval.args[-1] unwrapped_argvalues.append(argval) argvalues = unwrapped_argvalues if not isinstance(argnames, (tuple, list)): argnames = [x.strip() for x in argnames.split(",") if x.strip()] if len(argnames) == 1: argvalues = [(val,) for val in argvalues] if not argvalues: argvalues = [(_notexists,) * len(argnames)] if scope is None: scope = "function" scopenum = scopes.index(scope) if not indirect: #XXX should we also check for the opposite case? for arg in argnames: if arg not in self.fixturenames: raise ValueError("%r uses no fixture %r" %( self.function, arg)) valtype = indirect and "params" or "funcargs" if ids and len(ids) != len(argvalues): raise ValueError('%d tests specified with %d ids' %( len(argvalues), len(ids))) if not ids: ids = idmaker(argnames, argvalues) newcalls = [] for callspec in self._calls or [CallSpec2(self)]: for param_index, valset in enumerate(argvalues): assert len(valset) == len(argnames) newcallspec = callspec.copy(self) newcallspec.setmulti(valtype, argnames, valset, ids[param_index], newkeywords.get(param_index, {}), scopenum, param_index) newcalls.append(newcallspec) self._calls = newcalls def addcall(self, funcargs=None, id=_notexists, param=_notexists): """ (deprecated, use parametrize) Add a new call to the underlying test function during the collection phase of a test run. Note that request.addcall() is called during the test collection phase prior and independently to actual test execution. You should only use addcall() if you need to specify multiple arguments of a test function. :arg funcargs: argument keyword dictionary used when invoking the test function. :arg id: used for reporting and identification purposes. If you don't supply an `id` an automatic unique id will be generated. :arg param: a parameter which will be exposed to a later fixture function invocation through the ``request.param`` attribute. """ assert funcargs is None or isinstance(funcargs, dict) if funcargs is not None: for name in funcargs: if name not in self.fixturenames: pytest.fail("funcarg %r not used in this function." % name) else: funcargs = {} if id is None: raise ValueError("id=None not allowed") if id is _notexists: id = len(self._calls) id = str(id) if id in self._ids: raise ValueError("duplicate id %r" % id) self._ids.add(id) cs = CallSpec2(self) cs.setall(funcargs, id, param) self._calls.append(cs) def idmaker(argnames, argvalues): idlist = [] for valindex, valset in enumerate(argvalues): this_id = [] for nameindex, val in enumerate(valset): if not isinstance(val, (float, int, str, bool, NoneType)): this_id.append(str(argnames[nameindex])+str(valindex)) else: this_id.append(str(val)) idlist.append("-".join(this_id)) return idlist def showfixtures(config): from _pytest.main import wrap_session return wrap_session(config, _showfixtures_main) def _showfixtures_main(config, session): session.perform_collect() curdir = py.path.local() if session.items: nodeid = session.items[0].nodeid else: part = session._initialparts[0] nodeid = "::".join(map(str, [curdir.bestrelpath(part[0])] + part[1:])) nodeid.replace(session.fspath.sep, "/") tw = py.io.TerminalWriter() verbose = config.getvalue("verbose") fm = session._fixturemanager available = [] for argname in fm._arg2fixturedefs: fixturedefs = fm.getfixturedefs(argname, nodeid) assert fixturedefs is not None if not fixturedefs: continue fixturedef = fixturedefs[-1] loc = getlocation(fixturedef.func, curdir) available.append((len(fixturedef.baseid), fixturedef.func.__module__, curdir.bestrelpath(loc), fixturedef.argname, fixturedef)) available.sort() currentmodule = None for baseid, module, bestrel, argname, fixturedef in available: if currentmodule != module: if not module.startswith("_pytest."): tw.line() tw.sep("-", "fixtures defined from %s" %(module,)) currentmodule = module if verbose <= 0 and argname[0] == "_": continue if verbose > 0: funcargspec = "%s -- %s" %(argname, bestrel,) else: funcargspec = argname tw.line(funcargspec, green=True) loc = getlocation(fixturedef.func, curdir) doc = fixturedef.func.__doc__ or "" if doc: for line in doc.split("\n"): tw.line(" " + line.strip()) else: tw.line(" %s: no docstring available" %(loc,), red=True) def getlocation(function, curdir): import inspect fn = py.path.local(inspect.getfile(function)) lineno = py.builtin._getcode(function).co_firstlineno if fn.relto(curdir): fn = fn.relto(curdir) return "%s:%d" %(fn, lineno+1) # builtin pytest.raises helper def raises(ExpectedException, *args, **kwargs): """ assert that a code block/function call raises @ExpectedException and raise a failure exception otherwise. This helper produces a ``py.code.ExceptionInfo()`` object. If using Python 2.5 or above, you may use this function as a context manager:: >>> with raises(ZeroDivisionError): ... 1/0 Or you can specify a callable by passing a to-be-called lambda:: >>> raises(ZeroDivisionError, lambda: 1/0) or you can specify an arbitrary callable with arguments:: >>> def f(x): return 1/x ... >>> raises(ZeroDivisionError, f, 0) >>> raises(ZeroDivisionError, f, x=0) A third possibility is to use a string to be executed:: >>> raises(ZeroDivisionError, "f(0)") """ __tracebackhide__ = True if ExpectedException is AssertionError: # we want to catch a AssertionError # replace our subclass with the builtin one # see https://bitbucket.org/hpk42/pytest/issue/176/pytestraises from _pytest.assertion.util import BuiltinAssertionError as ExpectedException if not args: return RaisesContext(ExpectedException) elif isinstance(args[0], str): code, = args assert isinstance(code, str) frame = sys._getframe(1) loc = frame.f_locals.copy() loc.update(kwargs) #print "raises frame scope: %r" % frame.f_locals try: code = py.code.Source(code).compile() py.builtin.exec_(code, frame.f_globals, loc) # XXX didn'T mean f_globals == f_locals something special? # this is destroyed here ... except ExpectedException: return py.code.ExceptionInfo() else: func = args[0] try: func(*args[1:], **kwargs) except ExpectedException: return py.code.ExceptionInfo() pytest.fail("DID NOT RAISE") class RaisesContext(object): def __init__(self, ExpectedException): self.ExpectedException = ExpectedException self.excinfo = None def __enter__(self): self.excinfo = object.__new__(py.code.ExceptionInfo) return self.excinfo def __exit__(self, *tp): __tracebackhide__ = True if tp[0] is None: pytest.fail("DID NOT RAISE") self.excinfo.__init__(tp) return issubclass(self.excinfo.type, self.ExpectedException) # # the basic py.test Function item # class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr): """ a Function Item is responsible for setting up and executing a Python test function. """ _genid = None def __init__(self, name, parent, args=None, config=None, callspec=None, callobj=NOTSET, keywords=None, session=None): super(Function, self).__init__(name, parent, config=config, session=session) self._args = args if callobj is not NOTSET: self.obj = callobj for name, val in (py.builtin._getfuncdict(self.obj) or {}).items(): self.keywords[name] = val if callspec: for name, val in callspec.keywords.items(): self.keywords[name] = val if keywords: for name, val in keywords.items(): self.keywords[name] = val isyield = self._isyieldedfunction() self._fixtureinfo = fi = self.session._fixturemanager.getfixtureinfo( self.parent, self.obj, self.cls, funcargs=not isyield) self.fixturenames = fi.names_closure if callspec is not None: self.callspec = callspec self._initrequest() def _initrequest(self): self.funcargs = {} if self._isyieldedfunction(): assert not hasattr(self, "callspec"), ( "yielded functions (deprecated) cannot have funcargs") else: if hasattr(self, "callspec"): callspec = self.callspec assert not callspec.funcargs self._genid = callspec.id if hasattr(callspec, "param"): self.param = callspec.param self._request = FixtureRequest(self) @property def function(self): "underlying python 'function' object" return getattr(self.obj, 'im_func', self.obj) def _getobj(self): name = self.name i = name.find("[") # parametrization if i != -1: name = name[:i] return getattr(self.parent.obj, name) @property def _pyfuncitem(self): "(compatonly) for code expecting pytest-2.2 style request objects" return self def _isyieldedfunction(self): return getattr(self, "_args", None) is not None def runtest(self): """ execute the underlying test function. """ self.ihook.pytest_pyfunc_call(pyfuncitem=self) def setup(self): # check if parametrization happend with an empty list try: self.callspec._emptyparamspecified except AttributeError: pass else: fs, lineno = self._getfslineno() pytest.skip("got empty parameter set, function %s at %s:%d" %( self.function.__name__, fs, lineno)) super(Function, self).setup() fillfixtures(self) scope2props = dict(session=()) scope2props["module"] = ("fspath", "module") scope2props["class"] = scope2props["module"] + ("cls",) scope2props["instance"] = scope2props["class"] + ("instance", ) scope2props["function"] = scope2props["instance"] + ("function", "keywords") def scopeproperty(name=None, doc=None): def decoratescope(func): scopename = name or func.__name__ def provide(self): if func.__name__ in scope2props[self.scope]: return func(self) raise AttributeError("%s not available in %s-scoped context" % ( scopename, self.scope)) return property(provide, None, None, func.__doc__) return decoratescope class FixtureRequest(FuncargnamesCompatAttr): """ A request for a fixture from a test or fixture function. A request object gives access to the requesting test context and has an optional ``param`` attribute in case the fixture is parametrized indirectly. """ def __init__(self, pyfuncitem): self._pyfuncitem = pyfuncitem #: fixture for which this request is being performed self.fixturename = None #: Scope string, one of "function", "cls", "module", "session" self.scope = "function" self._funcargs = {} self._fixturedefs = {} fixtureinfo = pyfuncitem._fixtureinfo self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() self._arg2index = {} self.fixturenames = fixtureinfo.names_closure self._fixturemanager = pyfuncitem.session._fixturemanager @property def node(self): """ underlying collection node (depends on current request scope)""" return self._getscopeitem(self.scope) def _getnextfixturedef(self, argname): fixturedefs = self._arg2fixturedefs.get(argname, None) if fixturedefs is None: # we arrive here because of a a dynamic call to # getfuncargvalue(argname) usage which was naturally # not known at parsing/collection time fixturedefs = self._fixturemanager.getfixturedefs( argname, self._pyfuncitem.parent.nodeid) self._arg2fixturedefs[argname] = fixturedefs # fixturedefs list is immutable so we maintain a decreasing index index = self._arg2index.get(argname, 0) - 1 if fixturedefs is None or (-index > len(fixturedefs)): raise FixtureLookupError(argname, self) self._arg2index[argname] = index return fixturedefs[index] @property def config(self): """ the pytest config object associated with this request. """ return self._pyfuncitem.config @scopeproperty() def function(self): """ test function object if the request has a per-function scope. """ return self._pyfuncitem.obj @scopeproperty("class") def cls(self): """ class (can be None) where the test function was collected. """ clscol = self._pyfuncitem.getparent(pytest.Class) if clscol: return clscol.obj @property def instance(self): """ instance (can be None) on which test function was collected. """ # unittest support hack, see _pytest.unittest.TestCaseFunction try: return self._pyfuncitem._testcase except AttributeError: function = getattr(self, "function", None) if function is not None: return py.builtin._getimself(function) @scopeproperty() def module(self): """ python module object where the test function was collected. """ return self._pyfuncitem.getparent(pytest.Module).obj @scopeproperty() def fspath(self): """ the file system path of the test module which collected this test. """ return self._pyfuncitem.fspath @property def keywords(self): """ keywords/markers dictionary for the underlying node. """ return self.node.keywords @property def session(self): """ pytest session object. """ return self._pyfuncitem.session def addfinalizer(self, finalizer): """add finalizer/teardown function to be called after the last test within the requesting test context finished execution. """ # XXX usually this method is shadowed by fixturedef specific ones self._addfinalizer(finalizer, scope=self.scope) def _addfinalizer(self, finalizer, scope): colitem = self._getscopeitem(scope) self._pyfuncitem.session._setupstate.addfinalizer( finalizer=finalizer, colitem=colitem) def applymarker(self, marker): """ Apply a marker to a single test function invocation. This method is useful if you don't want to have a keyword/marker on all function invocations. :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object created by a call to ``py.test.mark.NAME(...)``. """ try: self.node.keywords[marker.markname] = marker except AttributeError: raise ValueError(marker) def raiseerror(self, msg): """ raise a FixtureLookupError with the given message. """ raise self._fixturemanager.FixtureLookupError(None, self, msg) def _fillfixtures(self): item = self._pyfuncitem fixturenames = getattr(item, "fixturenames", self.fixturenames) for argname in fixturenames: if argname not in item.funcargs: item.funcargs[argname] = self.getfuncargvalue(argname) def cached_setup(self, setup, teardown=None, scope="module", extrakey=None): """ (deprecated) Return a testing resource managed by ``setup`` & ``teardown`` calls. ``scope`` and ``extrakey`` determine when the ``teardown`` function will be called so that subsequent calls to ``setup`` would recreate the resource. With pytest-2.3 you often do not need ``cached_setup()`` as you can directly declare a scope on a fixture function and register a finalizer through ``request.addfinalizer()``. :arg teardown: function receiving a previously setup resource. :arg setup: a no-argument function creating a resource. :arg scope: a string value out of ``function``, ``class``, ``module`` or ``session`` indicating the caching lifecycle of the resource. :arg extrakey: added to internal caching key of (funcargname, scope). """ if not hasattr(self.config, '_setupcache'): self.config._setupcache = {} # XXX weakref? cachekey = (self.fixturename, self._getscopeitem(scope), extrakey) cache = self.config._setupcache try: val = cache[cachekey] except KeyError: __tracebackhide__ = True if scopemismatch(self.scope, scope): raise ScopeMismatchError("You tried to access a %r scoped " "resource with a %r scoped request object" %( (scope, self.scope))) __tracebackhide__ = False val = setup() cache[cachekey] = val if teardown is not None: def finalizer(): del cache[cachekey] teardown(val) self._addfinalizer(finalizer, scope=scope) return val def getfuncargvalue(self, argname): """ Dynamically retrieve a named fixture function argument. As of pytest-2.3, it is easier and usually better to access other fixture values by stating it as an input argument in the fixture function. If you only can decide about using another fixture at test setup time, you may use this function to retrieve it inside a fixture function body. """ return self._get_active_fixturedef(argname).cached_result[0] def _get_active_fixturedef(self, argname): try: return self._fixturedefs[argname] except KeyError: try: fixturedef = self._getnextfixturedef(argname) except FixtureLookupError: if argname == "request": class PseudoFixtureDef: cached_result = (self, [0]) return PseudoFixtureDef raise result = self._getfuncargvalue(fixturedef) self._funcargs[argname] = result self._fixturedefs[argname] = fixturedef return fixturedef def _get_fixturestack(self): current = self l = [] while 1: fixturedef = getattr(current, "_fixturedef", None) if fixturedef is None: l.reverse() return l l.append(fixturedef) current = current._parent_request def _getfuncargvalue(self, fixturedef): # prepare a subrequest object before calling fixture function # (latter managed by fixturedef) argname = fixturedef.argname funcitem = self._pyfuncitem scope = fixturedef.scope try: param = funcitem.callspec.getparam(argname) except (AttributeError, ValueError): param = NOTSET param_index = 0 else: # indices might not be set if old-style metafunc.addcall() was used param_index = funcitem.callspec.indices.get(argname, 0) # if a parametrize invocation set a scope it will override # the static scope defined with the fixture function paramscopenum = funcitem.callspec._arg2scopenum.get(argname) if paramscopenum is not None: scope = scopes[paramscopenum] subrequest = SubRequest(self, scope, param, param_index, fixturedef) # check if a higher-level scoped fixture accesses a lower level one if scope is not None: __tracebackhide__ = True if scopemismatch(self.scope, scope): # try to report something helpful lines = subrequest._factorytraceback() raise ScopeMismatchError("You tried to access the %r scoped " "fixture %r with a %r scoped request object, " "involved factories\n%s" %( (scope, argname, self.scope, "\n".join(lines)))) __tracebackhide__ = False try: # call the fixture function val = fixturedef.execute(request=subrequest) finally: # if fixture function failed it might have registered finalizers self.session._setupstate.addfinalizer(fixturedef.finish, subrequest.node) return val def _factorytraceback(self): lines = [] for fixturedef in self._get_fixturestack(): factory = fixturedef.func fs, lineno = getfslineno(factory) p = self._pyfuncitem.session.fspath.bestrelpath(fs) args = inspect.formatargspec(*inspect.getargspec(factory)) lines.append("%s:%d: def %s%s" %( p, lineno, factory.__name__, args)) return lines def _getscopeitem(self, scope): if scope == "function": # this might also be a non-function Item despite its attribute name return self._pyfuncitem node = get_scope_node(self._pyfuncitem, scope) if node is None and scope == "class": # fallback to function item itself node = self._pyfuncitem assert node return node def __repr__(self): return "" %(self.node) class SubRequest(FixtureRequest): """ a sub request for handling getting a fixture from a test function/fixture. """ def __init__(self, request, scope, param, param_index, fixturedef): self._parent_request = request self.fixturename = fixturedef.argname if param is not NOTSET: self.param = param self.param_index = param_index self.scope = scope self._fixturedef = fixturedef self.addfinalizer = fixturedef.addfinalizer self._pyfuncitem = request._pyfuncitem self._funcargs = request._funcargs self._fixturedefs = request._fixturedefs self._arg2fixturedefs = request._arg2fixturedefs self._arg2index = request._arg2index self.fixturenames = request.fixturenames self._fixturemanager = request._fixturemanager def __repr__(self): return "" % (self.fixturename, self._pyfuncitem) class ScopeMismatchError(Exception): """ A fixture function tries to use a different fixture function which which has a lower scope (e.g. a Session one calls a function one) """ scopes = "session module class function".split() scopenum_function = scopes.index("function") def scopemismatch(currentscope, newscope): return scopes.index(newscope) > scopes.index(currentscope) class FixtureLookupError(LookupError): """ could not return a requested Fixture (missing or invalid). """ def __init__(self, argname, request, msg=None): self.argname = argname self.request = request self.fixturestack = request._get_fixturestack() self.msg = msg def formatrepr(self): tblines = [] addline = tblines.append stack = [self.request._pyfuncitem.obj] stack.extend(map(lambda x: x.func, self.fixturestack)) msg = self.msg if msg is not None: stack = stack[:-1] # the last fixture raise an error, let's present # it at the requesting side for function in stack: fspath, lineno = getfslineno(function) lines, _ = inspect.getsourcelines(function) addline("file %s, line %s" % (fspath, lineno+1)) for i, line in enumerate(lines): line = line.rstrip() addline(" " + line) if line.lstrip().startswith('def'): break if msg is None: fm = self.request._fixturemanager available = [] for name, fixturedef in fm._arg2fixturedefs.items(): parentid = self.request._pyfuncitem.parent.nodeid faclist = list(fm._matchfactories(fixturedef, parentid)) if faclist: available.append(name) msg = "fixture %r not found" % (self.argname,) msg += "\n available fixtures: %s" %(", ".join(available),) msg += "\n use 'py.test --fixtures [testpath]' for help on them." return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) class FixtureLookupErrorRepr(TerminalRepr): def __init__(self, filename, firstlineno, tblines, errorstring, argname): self.tblines = tblines self.errorstring = errorstring self.filename = filename self.firstlineno = firstlineno self.argname = argname def toterminal(self, tw): #tw.line("FixtureLookupError: %s" %(self.argname), red=True) for tbline in self.tblines: tw.line(tbline.rstrip()) for line in self.errorstring.split("\n"): tw.line(" " + line.strip(), red=True) tw.line() tw.line("%s:%d" % (self.filename, self.firstlineno+1)) class FixtureManager: """ pytest fixtures definitions and information is stored and managed from this class. During collection fm.parsefactories() is called multiple times to parse fixture function definitions into FixtureDef objects and internal data structures. During collection of test functions, metafunc-mechanics instantiate a FuncFixtureInfo object which is cached per node/func-name. This FuncFixtureInfo object is later retrieved by Function nodes which themselves offer a fixturenames attribute. The FuncFixtureInfo object holds information about fixtures and FixtureDefs relevant for a particular function. An initial list of fixtures is assembled like this: - ini-defined usefixtures - autouse-marked fixtures along the collection chain up from the function - usefixtures markers at module/class/function level - test function funcargs Subsequently the funcfixtureinfo.fixturenames attribute is computed as the closure of the fixtures needed to setup the initial fixtures, i. e. fixtures needed by fixture functions themselves are appended to the fixturenames list. Upon the test-setup phases all fixturenames are instantiated, retrieved by a lookup of their FuncFixtureInfo. """ _argprefix = "pytest_funcarg__" FixtureLookupError = FixtureLookupError FixtureLookupErrorRepr = FixtureLookupErrorRepr def __init__(self, session): self.session = session self.config = session.config self._arg2fixturedefs = {} self._seenplugins = set() self._holderobjseen = set() self._arg2finish = {} self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))] session.config.pluginmanager.register(self, "funcmanage") self._nodename2fixtureinfo = {} def getfixtureinfo(self, node, func, cls, funcargs=True): # node is the "collection node" for "func" key = (node, func) try: return self._nodename2fixtureinfo[key] except KeyError: pass if funcargs and not hasattr(node, "nofuncargs"): if cls is not None: startindex = 1 else: startindex = None argnames = getfuncargnames(func, startindex) else: argnames = () usefixtures = getattr(func, "usefixtures", None) initialnames = argnames if usefixtures is not None: initialnames = usefixtures.args + initialnames fm = node.session._fixturemanager names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames, node) fixtureinfo = FuncFixtureInfo(argnames, names_closure, arg2fixturedefs) self._nodename2fixtureinfo[key] = fixtureinfo return fixtureinfo ### XXX this hook should be called for historic events like pytest_configure ### so that we don't have to do the below pytest_configure hook def pytest_plugin_registered(self, plugin): if plugin in self._seenplugins: return nodeid = None try: p = py.path.local(plugin.__file__) except AttributeError: pass else: if p.basename.startswith("conftest.py"): nodeid = p.dirpath().relto(self.session.fspath) if p.sep != "/": nodeid = nodeid.replace(p.sep, "/") self.parsefactories(plugin, nodeid) self._seenplugins.add(plugin) @pytest.mark.tryfirst def pytest_configure(self, config): plugins = config.pluginmanager.getplugins() for plugin in plugins: self.pytest_plugin_registered(plugin) def _getautousenames(self, nodeid): """ return a tuple of fixture names to be used. """ autousenames = [] for baseid, basenames in self._nodeid_and_autousenames: if nodeid.startswith(baseid): if baseid: i = len(baseid) nextchar = nodeid[i:i+1] if nextchar and nextchar not in ":/": continue autousenames.extend(basenames) # make sure autousenames are sorted by scope, scopenum 0 is session autousenames.sort( key=lambda x: self._arg2fixturedefs[x][-1].scopenum) return autousenames def getfixtureclosure(self, fixturenames, parentnode): # collect the closure of all fixtures , starting with the given # fixturenames as the initial set. As we have to visit all # factory definitions anyway, we also return a arg2fixturedefs # mapping so that the caller can reuse it and does not have # to re-discover fixturedefs again for each fixturename # (discovering matching fixtures for a given name/node is expensive) parentid = parentnode.nodeid fixturenames_closure = self._getautousenames(parentid) def merge(otherlist): for arg in otherlist: if arg not in fixturenames_closure: fixturenames_closure.append(arg) merge(fixturenames) arg2fixturedefs = {} lastlen = -1 while lastlen != len(fixturenames_closure): lastlen = len(fixturenames_closure) for argname in fixturenames_closure: if argname in arg2fixturedefs: continue fixturedefs = self.getfixturedefs(argname, parentid) if fixturedefs: arg2fixturedefs[argname] = fixturedefs merge(fixturedefs[-1].argnames) return fixturenames_closure, arg2fixturedefs def pytest_generate_tests(self, metafunc): for argname in metafunc.fixturenames: faclist = metafunc._arg2fixturedefs.get(argname) if faclist is None: continue # will raise FixtureLookupError at setup time for fixturedef in faclist: if fixturedef.params is not None: metafunc.parametrize(argname, fixturedef.params, indirect=True, scope=fixturedef.scope, ids=fixturedef.ids) def pytest_collection_modifyitems(self, items): # separate parametrized setups items[:] = reorder_items(items) def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False): if nodeid is not NOTSET: holderobj = node_or_obj else: holderobj = node_or_obj.obj nodeid = node_or_obj.nodeid if holderobj in self._holderobjseen: return self._holderobjseen.add(holderobj) autousenames = [] for name in dir(holderobj): obj = getattr(holderobj, name, None) if not callable(obj): continue # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style) # or are "@pytest.fixture" marked marker = getfixturemarker(obj) if marker is None: if not name.startswith(self._argprefix): continue marker = defaultfuncargprefixmarker name = name[len(self._argprefix):] elif not isinstance(marker, FixtureFunctionMarker): # magic globals with __getattr__ might have got us a wrong # fixture attribute continue else: assert not name.startswith(self._argprefix) fixturedef = FixtureDef(self, nodeid, name, obj, marker.scope, marker.params, yieldctx=marker.yieldctx, unittest=unittest, ids=marker.ids) faclist = self._arg2fixturedefs.setdefault(name, []) if fixturedef.has_location: faclist.append(fixturedef) else: # fixturedefs with no location are at the front # so this inserts the current fixturedef after the # existing fixturedefs from external plugins but # before the fixturedefs provided in conftests. i = len([f for f in faclist if not f.has_location]) faclist.insert(i, fixturedef) if marker.autouse: autousenames.append(name) if autousenames: self._nodeid_and_autousenames.append((nodeid or '', autousenames)) def getfixturedefs(self, argname, nodeid): try: fixturedefs = self._arg2fixturedefs[argname] except KeyError: return None else: return tuple(self._matchfactories(fixturedefs, nodeid)) def _matchfactories(self, fixturedefs, nodeid): for fixturedef in fixturedefs: if nodeid.startswith(fixturedef.baseid): yield fixturedef def fail_fixturefunc(fixturefunc, msg): fs, lineno = getfslineno(fixturefunc) location = "%s:%s" % (fs, lineno+1) source = py.code.Source(fixturefunc) pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False) def call_fixture_func(fixturefunc, request, kwargs, yieldctx): if yieldctx: if not is_generator(fixturefunc): fail_fixturefunc(fixturefunc, msg="yield_fixture requires yield statement in function") iter = fixturefunc(**kwargs) next = getattr(iter, "__next__", None) if next is None: next = getattr(iter, "next") res = next() def teardown(): try: next() except StopIteration: pass else: fail_fixturefunc(fixturefunc, "yield_fixture function has more than one 'yield'") request.addfinalizer(teardown) else: if is_generator(fixturefunc): fail_fixturefunc(fixturefunc, msg="pytest.fixture functions cannot use ``yield``. " "Instead write and return an inner function/generator " "and let the consumer call and iterate over it.") res = fixturefunc(**kwargs) return res class FixtureDef: """ A container for a factory definition. """ def __init__(self, fixturemanager, baseid, argname, func, scope, params, yieldctx, unittest=False, ids=None): self._fixturemanager = fixturemanager self.baseid = baseid or '' self.has_location = baseid is not None self.func = func self.argname = argname self.scope = scope self.scopenum = scopes.index(scope or "function") self.params = params startindex = unittest and 1 or None self.argnames = getfuncargnames(func, startindex=startindex) self.yieldctx = yieldctx self.unittest = unittest self.ids = ids self._finalizer = [] def addfinalizer(self, finalizer): self._finalizer.append(finalizer) def finish(self): while self._finalizer: func = self._finalizer.pop() func() try: del self.cached_result except AttributeError: pass def execute(self, request): # get required arguments and register our own finish() # with their finalization kwargs = {} for argname in self.argnames: fixturedef = request._get_active_fixturedef(argname) result, arg_cache_key = fixturedef.cached_result kwargs[argname] = result if argname != "request": fixturedef.addfinalizer(self.finish) my_cache_key = request.param_index cached_result = getattr(self, "cached_result", None) if cached_result is not None: #print argname, "Found cached_result", cached_result #print argname, "param_index", param_index result, cache_key = cached_result if my_cache_key == cache_key: #print request.fixturename, "CACHE HIT", repr(my_cache_key) return result #print request.fixturename, "CACHE MISS" # we have a previous but differently parametrized fixture instance # so we need to tear it down before creating a new one self.finish() assert not hasattr(self, "cached_result") if self.unittest: result = self.func(request.instance, **kwargs) else: fixturefunc = self.func # the fixture function needs to be bound to the actual # request.instance so that code working with "self" behaves # as expected. if request.instance is not None: fixturefunc = getimfunc(self.func) if fixturefunc != self.func: fixturefunc = fixturefunc.__get__(request.instance) result = call_fixture_func(fixturefunc, request, kwargs, self.yieldctx) self.cached_result = (result, my_cache_key) return result def __repr__(self): return ("" % (self.argname, self.scope, self.baseid)) def getfuncargnames(function, startindex=None): # XXX merge with main.py's varnames #assert not inspect.isclass(function) realfunction = function while hasattr(realfunction, "__wrapped__"): realfunction = realfunction.__wrapped__ if startindex is None: startindex = inspect.ismethod(function) and 1 or 0 if realfunction != function: startindex += len(getattr(function, "patchings", [])) function = realfunction argnames = inspect.getargs(py.code.getrawcode(function))[0] defaults = getattr(function, 'func_defaults', getattr(function, '__defaults__', None)) or () numdefaults = len(defaults) if numdefaults: return tuple(argnames[startindex:-numdefaults]) return tuple(argnames[startindex:]) # algorithm for sorting on a per-parametrized resource setup basis # it is called for scopenum==0 (session) first and performs sorting # down to the lower scopes such as to minimize number of "high scope" # setups and teardowns def reorder_items(items): argkeys_cache = {} for scopenum in range(0, scopenum_function): argkeys_cache[scopenum] = d = {} for item in items: keys = set(get_parametrized_fixture_keys(item, scopenum)) if keys: d[item] = keys return reorder_items_atscope(items, set(), argkeys_cache, 0) def reorder_items_atscope(items, ignore, argkeys_cache, scopenum): if scopenum >= scopenum_function or len(items) < 3: return items items_done = [] while 1: items_before, items_same, items_other, newignore = \ slice_items(items, ignore, argkeys_cache[scopenum]) items_before = reorder_items_atscope( items_before, ignore, argkeys_cache,scopenum+1) if items_same is None: # nothing to reorder in this scope assert items_other is None return items_done + items_before items_done.extend(items_before) items = items_same + items_other ignore = newignore def slice_items(items, ignore, scoped_argkeys_cache): # we pick the first item which uses a fixture instance in the # requested scope and which we haven't seen yet. We slice the input # items list into a list of items_nomatch, items_same and # items_other if scoped_argkeys_cache: # do we need to do work at all? it = iter(items) # first find a slicing key for i, item in enumerate(it): argkeys = scoped_argkeys_cache.get(item) if argkeys is not None: argkeys = argkeys.difference(ignore) if argkeys: # found a slicing key slicing_argkey = argkeys.pop() items_before = items[:i] items_same = [item] items_other = [] # now slice the remainder of the list for item in it: argkeys = scoped_argkeys_cache.get(item) if argkeys and slicing_argkey in argkeys and \ slicing_argkey not in ignore: items_same.append(item) else: items_other.append(item) newignore = ignore.copy() newignore.add(slicing_argkey) return (items_before, items_same, items_other, newignore) return items, None, None, None def get_parametrized_fixture_keys(item, scopenum): """ return list of keys for all parametrized arguments which match the specified scope. """ assert scopenum < scopenum_function # function try: cs = item.callspec except AttributeError: pass else: # cs.indictes.items() is random order of argnames but # then again different functions (items) can change order of # arguments so it doesn't matter much probably for argname, param_index in cs.indices.items(): if cs._arg2scopenum[argname] != scopenum: continue if scopenum == 0: # session key = (argname, param_index) elif scopenum == 1: # module key = (argname, param_index, item.fspath) elif scopenum == 2: # class key = (argname, param_index, item.fspath, item.cls) yield key def xunitsetup(obj, name): meth = getattr(obj, name, None) if getfixturemarker(meth) is None: return meth def getfixturemarker(obj): """ return fixturemarker or None if it doesn't exist or raised exceptions.""" try: return getattr(obj, "_pytestfixturefunction", None) except KeyboardInterrupt: raise except Exception: # some objects raise errors like request (from flask import request) # we don't expect them to be fixture functions return None scopename2class = { 'class': Class, 'module': Module, 'function': pytest.Item, } def get_scope_node(node, scope): cls = scopename2class.get(scope) if cls is None: if scope == "session": return node.session raise ValueError("unknown scope") return node.getparent(cls) pytest-2.5.1/_pytest/skipping.py0000664000175000017500000002311512254002202016313 0ustar hpkhpk00000000000000""" support for skip/xfail functions and markers. """ import py, pytest import sys def pytest_addoption(parser): group = parser.getgroup("general") group.addoption('--runxfail', action="store_true", dest="runxfail", default=False, help="run tests even if they are marked xfail") def pytest_configure(config): if config.option.runxfail: old = pytest.xfail config._cleanup.append(lambda: setattr(pytest, "xfail", old)) def nop(*args, **kwargs): pass nop.Exception = XFailed setattr(pytest, "xfail", nop) config.addinivalue_line("markers", "skipif(condition): skip the given test function if eval(condition) " "results in a True value. Evaluation happens within the " "module global context. Example: skipif('sys.platform == \"win32\"') " "skips the test if we are on the win32 platform. see " "http://pytest.org/latest/skipping.html" ) config.addinivalue_line("markers", "xfail(condition, reason=None, run=True): mark the the test function " "as an expected failure if eval(condition) has a True value. " "Optionally specify a reason for better reporting and run=False if " "you don't even want to execute the test function. See " "http://pytest.org/latest/skipping.html" ) def pytest_namespace(): return dict(xfail=xfail) class XFailed(pytest.fail.Exception): """ raised from an explicit call to py.test.xfail() """ def xfail(reason=""): """ xfail an executing test or setup functions with the given reason.""" __tracebackhide__ = True raise XFailed(reason) xfail.Exception = XFailed class MarkEvaluator: def __init__(self, item, name): self.item = item self.name = name @property def holder(self): return self.item.keywords.get(self.name, None) def __bool__(self): return bool(self.holder) __nonzero__ = __bool__ def wasvalid(self): return not hasattr(self, 'exc') def istrue(self): try: return self._istrue() except KeyboardInterrupt: raise except: self.exc = sys.exc_info() if isinstance(self.exc[1], SyntaxError): msg = [" " * (self.exc[1].offset + 4) + "^",] msg.append("SyntaxError: invalid syntax") else: msg = py.std.traceback.format_exception_only(*self.exc[:2]) pytest.fail("Error evaluating %r expression\n" " %s\n" "%s" %(self.name, self.expr, "\n".join(msg)), pytrace=False) def _getglobals(self): d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config} func = self.item.obj try: d.update(func.__globals__) except AttributeError: d.update(func.func_globals) return d def _istrue(self): if self.holder: d = self._getglobals() if self.holder.args: self.result = False for expr in self.holder.args: self.expr = expr if isinstance(expr, py.builtin._basestring): result = cached_eval(self.item.config, expr, d) else: if self.get("reason") is None: # XXX better be checked at collection time pytest.fail("you need to specify reason=STRING " "when using booleans as conditions.") result = bool(expr) if result: self.result = True self.expr = expr break else: self.result = True return getattr(self, 'result', False) def get(self, attr, default=None): return self.holder.kwargs.get(attr, default) def getexplanation(self): expl = self.get('reason', None) if not expl: if not hasattr(self, 'expr'): return "" else: return "condition: " + str(self.expr) return expl @pytest.mark.tryfirst def pytest_runtest_setup(item): if not isinstance(item, pytest.Function): return evalskip = MarkEvaluator(item, 'skipif') if evalskip.istrue(): py.test.skip(evalskip.getexplanation()) item._evalxfail = MarkEvaluator(item, 'xfail') check_xfail_no_run(item) def pytest_pyfunc_call(pyfuncitem): check_xfail_no_run(pyfuncitem) def check_xfail_no_run(item): if not item.config.option.runxfail: evalxfail = item._evalxfail if evalxfail.istrue(): if not evalxfail.get('run', True): py.test.xfail("[NOTRUN] " + evalxfail.getexplanation()) def pytest_runtest_makereport(__multicall__, item, call): if not isinstance(item, pytest.Function): return # unitttest special case, see setting of _unexpectedsuccess if hasattr(item, '_unexpectedsuccess'): rep = __multicall__.execute() if rep.when == "call": # we need to translate into how py.test encodes xpass rep.wasxfail = "reason: " + repr(item._unexpectedsuccess) rep.outcome = "failed" return rep if not (call.excinfo and call.excinfo.errisinstance(py.test.xfail.Exception)): evalxfail = getattr(item, '_evalxfail', None) if not evalxfail: return if call.excinfo and call.excinfo.errisinstance(py.test.xfail.Exception): if not item.config.getvalue("runxfail"): rep = __multicall__.execute() rep.wasxfail = "reason: " + call.excinfo.value.msg rep.outcome = "skipped" return rep rep = __multicall__.execute() evalxfail = item._evalxfail if not rep.skipped: if not item.config.option.runxfail: if evalxfail.wasvalid() and evalxfail.istrue(): if call.excinfo: rep.outcome = "skipped" elif call.when == "call": rep.outcome = "failed" else: return rep rep.wasxfail = evalxfail.getexplanation() return rep return rep # called by terminalreporter progress reporting def pytest_report_teststatus(report): if hasattr(report, "wasxfail"): if report.skipped: return "xfailed", "x", "xfail" elif report.failed: return "xpassed", "X", "XPASS" # called by the terminalreporter instance/plugin def pytest_terminal_summary(terminalreporter): tr = terminalreporter if not tr.reportchars: #for name in "xfailed skipped failed xpassed": # if not tr.stats.get(name, 0): # tr.write_line("HINT: use '-r' option to see extra " # "summary info about tests") # break return lines = [] for char in tr.reportchars: if char == "x": show_xfailed(terminalreporter, lines) elif char == "X": show_xpassed(terminalreporter, lines) elif char in "fF": show_simple(terminalreporter, lines, 'failed', "FAIL %s") elif char in "sS": show_skipped(terminalreporter, lines) elif char == "E": show_simple(terminalreporter, lines, 'error', "ERROR %s") if lines: tr._tw.sep("=", "short test summary info") for line in lines: tr._tw.line(line) def show_simple(terminalreporter, lines, stat, format): failed = terminalreporter.stats.get(stat) if failed: for rep in failed: pos = rep.nodeid lines.append(format %(pos, )) def show_xfailed(terminalreporter, lines): xfailed = terminalreporter.stats.get("xfailed") if xfailed: for rep in xfailed: pos = rep.nodeid reason = rep.wasxfail lines.append("XFAIL %s" % (pos,)) if reason: lines.append(" " + str(reason)) def show_xpassed(terminalreporter, lines): xpassed = terminalreporter.stats.get("xpassed") if xpassed: for rep in xpassed: pos = rep.nodeid reason = rep.wasxfail lines.append("XPASS %s %s" %(pos, reason)) def cached_eval(config, expr, d): if not hasattr(config, '_evalcache'): config._evalcache = {} try: return config._evalcache[expr] except KeyError: #import sys #print >>sys.stderr, ("cache-miss: %r" % expr) exprcode = py.code.compile(expr, mode="eval") config._evalcache[expr] = x = eval(exprcode, d) return x def folded_skips(skipped): d = {} for event in skipped: key = event.longrepr assert len(key) == 3, (event, key) d.setdefault(key, []).append(event) l = [] for key, events in d.items(): l.append((len(events),) + key) return l def show_skipped(terminalreporter, lines): tr = terminalreporter skipped = tr.stats.get('skipped', []) if skipped: #if not tr.hasopt('skipped'): # tr.write_line( # "%d skipped tests, specify -rs for more info" % # len(skipped)) # return fskips = folded_skips(skipped) if fskips: #tr.write_sep("_", "skipped test summary") for num, fspath, lineno, reason in fskips: if reason.startswith("Skipped: "): reason = reason[9:] lines.append("SKIP [%d] %s:%d: %s" % (num, fspath, lineno, reason)) pytest-2.5.1/_pytest/genscript.py0000664000175000017500000000515212254002202016466 0ustar hpkhpk00000000000000""" generate a single-file self-contained version of py.test """ import py import sys def find_toplevel(name): for syspath in py.std.sys.path: base = py.path.local(syspath) lib = base/name if lib.check(dir=1): return lib mod = base.join("%s.py" % name) if mod.check(file=1): return mod raise LookupError(name) def pkgname(toplevel, rootpath, path): parts = path.parts()[len(rootpath.parts()):] return '.'.join([toplevel] + [x.purebasename for x in parts]) def pkg_to_mapping(name): toplevel = find_toplevel(name) name2src = {} if toplevel.check(file=1): # module name2src[toplevel.purebasename] = toplevel.read() else: # package for pyfile in toplevel.visit('*.py'): pkg = pkgname(name, toplevel, pyfile) name2src[pkg] = pyfile.read() return name2src def compress_mapping(mapping): data = py.std.pickle.dumps(mapping, 2) data = py.std.zlib.compress(data, 9) data = py.std.base64.encodestring(data) data = data.decode('ascii') return data def compress_packages(names): mapping = {} for name in names: mapping.update(pkg_to_mapping(name)) return compress_mapping(mapping) def generate_script(entry, packages): data = compress_packages(packages) tmpl = py.path.local(__file__).dirpath().join('standalonetemplate.py') exe = tmpl.read() exe = exe.replace('@SOURCES@', data) exe = exe.replace('@ENTRY@', entry) return exe def pytest_addoption(parser): group = parser.getgroup("debugconfig") group.addoption("--genscript", action="store", default=None, dest="genscript", metavar="path", help="create standalone py.test script at given target path.") def pytest_cmdline_main(config): genscript = config.getvalue("genscript") if genscript: tw = py.io.TerminalWriter() deps = ['py', '_pytest', 'pytest'] if sys.version_info < (2,7): deps.append("argparse") tw.line("generated script will run on python2.5-python3.3++") else: tw.line("WARNING: generated script will not run on python2.6 " "or below due to 'argparse' dependency. Use python2.6 " "to generate a python2.5/6 compatible script", red=True) script = generate_script( 'import py; raise SystemExit(py.test.cmdline.main())', deps, ) genscript = py.path.local(genscript) genscript.write(script) tw.line("generated pytest standalone script: %s" % genscript, bold=True) return 0 pytest-2.5.1/_pytest/tmpdir.py0000664000175000017500000000470012254002202015765 0ustar hpkhpk00000000000000""" support for providing temporary directories to test functions. """ import pytest, py from _pytest.monkeypatch import monkeypatch class TempdirHandler: def __init__(self, config): self.config = config self.trace = config.trace.get("tmpdir") def ensuretemp(self, string, dir=1): """ (deprecated) return temporary directory path with the given string as the trailing part. It is usually better to use the 'tmpdir' function argument which provides an empty unique-per-test-invocation directory and is guaranteed to be empty. """ #py.log._apiwarn(">1.1", "use tmpdir function argument") return self.getbasetemp().ensure(string, dir=dir) def mktemp(self, basename, numbered=True): basetemp = self.getbasetemp() if not numbered: p = basetemp.mkdir(basename) else: p = py.path.local.make_numbered_dir(prefix=basename, keep=0, rootdir=basetemp, lock_timeout=None) self.trace("mktemp", p) return p def getbasetemp(self): """ return base temporary directory. """ try: return self._basetemp except AttributeError: basetemp = self.config.option.basetemp if basetemp: basetemp = py.path.local(basetemp) if basetemp.check(): basetemp.remove() basetemp.mkdir() else: basetemp = py.path.local.make_numbered_dir(prefix='pytest-') self._basetemp = t = basetemp.realpath() self.trace("new basetemp", t) return t def finish(self): self.trace("finish") def pytest_configure(config): mp = monkeypatch() t = TempdirHandler(config) config._cleanup.extend([mp.undo, t.finish]) mp.setattr(config, '_tmpdirhandler', t, raising=False) mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False) @pytest.fixture def tmpdir(request): """return a temporary directory path object which is unique to each test function invocation, created as a sub directory of the base temporary directory. The returned object is a `py.path.local`_ path object. """ name = request.node.name name = py.std.re.sub("[\W]", "_", name) MAXVAL = 30 if len(name) > MAXVAL: name = name[:MAXVAL] x = request.config._tmpdirhandler.mktemp(name, numbered=True) return x pytest-2.5.1/_pytest/mark.py0000664000175000017500000002250012254002202015416 0ustar hpkhpk00000000000000""" generic mechanism for marking and selecting python functions. """ import py def pytest_namespace(): return {'mark': MarkGenerator()} def pytest_addoption(parser): group = parser.getgroup("general") group._addoption( '-k', action="store", dest="keyword", default='', metavar="EXPRESSION", help="only run tests which match the given substring expression. " "An expression is a python evaluatable expression " "where all names are substring-matched against test names " "and their parent classes. Example: -k 'test_method or test " "other' matches all test functions and classes whose name " "contains 'test_method' or 'test_other'. " "Additionally keywords are matched to classes and functions " "containing extra names in their 'extra_keyword_matches' set, " "as well as functions which have names assigned directly to them." ) group._addoption( "-m", action="store", dest="markexpr", default="", metavar="MARKEXPR", help="only run tests matching given mark expression. " "example: -m 'mark1 and not mark2'." ) group.addoption( "--markers", action="store_true", help="show markers (builtin, plugin and per-project ones)." ) parser.addini("markers", "markers for test functions", 'linelist') def pytest_cmdline_main(config): if config.option.markers: config.do_configure() tw = py.io.TerminalWriter() for line in config.getini("markers"): name, rest = line.split(":", 1) tw.write("@pytest.mark.%s:" % name, bold=True) tw.line(rest) tw.line() config.do_unconfigure() return 0 pytest_cmdline_main.tryfirst = True def pytest_collection_modifyitems(items, config): keywordexpr = config.option.keyword matchexpr = config.option.markexpr if not keywordexpr and not matchexpr: return selectuntil = False if keywordexpr[-1:] == ":": selectuntil = True keywordexpr = keywordexpr[:-1] remaining = [] deselected = [] for colitem in items: if keywordexpr and not matchkeyword(colitem, keywordexpr): deselected.append(colitem) else: if selectuntil: keywordexpr = None if matchexpr: if not matchmark(colitem, matchexpr): deselected.append(colitem) continue remaining.append(colitem) if deselected: config.hook.pytest_deselected(items=deselected) items[:] = remaining class MarkMapping: """Provides a local mapping for markers where item access resolves to True if the marker is present. """ def __init__(self, keywords): mymarks = set() for key, value in keywords.items(): if isinstance(value, MarkInfo) or isinstance(value, MarkDecorator): mymarks.add(key) self._mymarks = mymarks def __getitem__(self, name): return name in self._mymarks class KeywordMapping: """Provides a local mapping for keywords. Given a list of names, map any substring of one of these names to True. """ def __init__(self, names): self._names = names def __getitem__(self, subname): for name in self._names: if subname in name: return True return False def matchmark(colitem, markexpr): """Tries to match on any marker names, attached to the given colitem.""" return eval(markexpr, {}, MarkMapping(colitem.keywords)) def matchkeyword(colitem, keywordexpr): """Tries to match given keyword expression to given collector item. Will match on the name of colitem, including the names of its parents. Only matches names of items which are either a :class:`Class` or a :class:`Function`. Additionally, matches on names in the 'extra_keyword_matches' set of any item, as well as names directly assigned to test functions. """ keywordexpr = keywordexpr.replace("-", "not ") mapped_names = set() # Add the names of the current item and any parent items import pytest for item in colitem.listchain(): if not isinstance(item, pytest.Instance): mapped_names.add(item.name) # Add the names added as extra keywords to current or parent items for name in colitem.listextrakeywords(): mapped_names.add(name) # Add the names attached to the current function through direct assignment if hasattr(colitem, 'function'): for name in colitem.function.__dict__: mapped_names.add(name) mapping = KeywordMapping(mapped_names) if " " not in keywordexpr: # special case to allow for simple "-k pass" and "-k 1.3" return mapping[keywordexpr] elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]: return not mapping[keywordexpr[4:]] return eval(keywordexpr, {}, mapping) def pytest_configure(config): import pytest if config.option.strict: pytest.mark._config = config class MarkGenerator: """ Factory for :class:`MarkDecorator` objects - exposed as a ``py.test.mark`` singleton instance. Example:: import py @py.test.mark.slowtest def test_function(): pass will set a 'slowtest' :class:`MarkInfo` object on the ``test_function`` object. """ def __getattr__(self, name): if name[0] == "_": raise AttributeError(name) if hasattr(self, '_config'): self._check(name) return MarkDecorator(name) def _check(self, name): try: if name in self._markers: return except AttributeError: pass self._markers = l = set() for line in self._config.getini("markers"): beginning = line.split(":", 1) x = beginning[0].split("(", 1)[0] l.add(x) if name not in self._markers: raise AttributeError("%r not a registered marker" % (name,)) def istestfunc(func): return hasattr(func, "__call__") and \ getattr(func, "__name__", "") != "" class MarkDecorator: """ A decorator for test functions and test classes. When applied it will create :class:`MarkInfo` objects which may be :ref:`retrieved by hooks as item keywords `. MarkDecorator instances are often created like this:: mark1 = py.test.mark.NAME # simple MarkDecorator mark2 = py.test.mark.NAME(name1=value) # parametrized MarkDecorator and can then be applied as decorators to test functions:: @mark2 def test_function(): pass """ def __init__(self, name, args=None, kwargs=None): self.name = name self.args = args or () self.kwargs = kwargs or {} @property def markname(self): return self.name # for backward-compat (2.4.1 had this attr) def __repr__(self): d = self.__dict__.copy() name = d.pop('name') return "" % (name, d) def __call__(self, *args, **kwargs): """ if passed a single callable argument: decorate it with mark info. otherwise add *args/**kwargs in-place to mark information. """ if args: func = args[0] if len(args) == 1 and (istestfunc(func) or hasattr(func, '__bases__')): if hasattr(func, '__bases__'): if hasattr(func, 'pytestmark'): l = func.pytestmark if not isinstance(l, list): func.pytestmark = [l, self] else: l.append(self) else: func.pytestmark = [self] else: holder = getattr(func, self.name, None) if holder is None: holder = MarkInfo( self.name, self.args, self.kwargs ) setattr(func, self.name, holder) else: holder.add(self.args, self.kwargs) return func kw = self.kwargs.copy() kw.update(kwargs) args = self.args + args return self.__class__(self.name, args=args, kwargs=kw) class MarkInfo: """ Marking object created by :class:`MarkDecorator` instances. """ def __init__(self, name, args, kwargs): #: name of attribute self.name = name #: positional argument list, empty if none specified self.args = args #: keyword argument dictionary, empty if nothing specified self.kwargs = kwargs self._arglist = [(args, kwargs.copy())] def __repr__(self): return "" % ( self.name, self.args, self.kwargs ) def add(self, args, kwargs): """ add a MarkInfo with the given args and kwargs. """ self._arglist.append((args, kwargs)) self.args += args self.kwargs.update(kwargs) def __iter__(self): """ yield MarkInfo objects each relating to a marking-call. """ for args, kwargs in self._arglist: yield MarkInfo(self.name, args, kwargs) pytest-2.5.1/_pytest/runner.py0000664000175000017500000003741212254002202016005 0ustar hpkhpk00000000000000""" basic collect and runtest protocol implementations """ import py, sys from time import time from py._code.code import TerminalRepr def pytest_namespace(): return { 'fail' : fail, 'skip' : skip, 'importorskip' : importorskip, 'exit' : exit, } # # pytest plugin hooks def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "reporting", after="general") group.addoption('--durations', action="store", type=int, default=None, metavar="N", help="show N slowest setup/test durations (N=0 for all)."), def pytest_terminal_summary(terminalreporter): durations = terminalreporter.config.option.durations if durations is None: return tr = terminalreporter dlist = [] for replist in tr.stats.values(): for rep in replist: if hasattr(rep, 'duration'): dlist.append(rep) if not dlist: return dlist.sort(key=lambda x: x.duration) dlist.reverse() if not durations: tr.write_sep("=", "slowest test durations") else: tr.write_sep("=", "slowest %s test durations" % durations) dlist = dlist[:durations] for rep in dlist: nodeid = rep.nodeid.replace("::()::", "::") tr.write_line("%02.2fs %-8s %s" % (rep.duration, rep.when, nodeid)) def pytest_sessionstart(session): session._setupstate = SetupState() def pytest_sessionfinish(session): session._setupstate.teardown_all() class NodeInfo: def __init__(self, location): self.location = location def pytest_runtest_protocol(item, nextitem): item.ihook.pytest_runtest_logstart( nodeid=item.nodeid, location=item.location, ) runtestprotocol(item, nextitem=nextitem) return True def runtestprotocol(item, log=True, nextitem=None): hasrequest = hasattr(item, "_request") if hasrequest and not item._request: item._initrequest() rep = call_and_report(item, "setup", log) reports = [rep] if rep.passed: reports.append(call_and_report(item, "call", log)) reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) # after all teardown hooks have been called # want funcargs and request info to go away if hasrequest: item._request = False item.funcargs = None return reports def pytest_runtest_setup(item): item.session._setupstate.prepare(item) def pytest_runtest_call(item): item.runtest() def pytest_runtest_teardown(item, nextitem): item.session._setupstate.teardown_exact(item, nextitem) def pytest_report_teststatus(report): if report.when in ("setup", "teardown"): if report.failed: # category, shortletter, verbose-word return "error", "E", "ERROR" elif report.skipped: return "skipped", "s", "SKIPPED" else: return "", "", "" # # Implementation def call_and_report(item, when, log=True, **kwds): call = call_runtest_hook(item, when, **kwds) hook = item.ihook report = hook.pytest_runtest_makereport(item=item, call=call) if log: hook.pytest_runtest_logreport(report=report) if check_interactive_exception(call, report): hook.pytest_exception_interact(node=item, call=call, report=report) return report def check_interactive_exception(call, report): return call.excinfo and not ( hasattr(report, "wasxfail") or call.excinfo.errisinstance(skip.Exception) or call.excinfo.errisinstance(py.std.bdb.BdbQuit)) def call_runtest_hook(item, when, **kwds): hookname = "pytest_runtest_" + when ihook = getattr(item.ihook, hookname) return CallInfo(lambda: ihook(item=item, **kwds), when=when) class CallInfo: """ Result/Exception info a function invocation. """ #: None or ExceptionInfo object. excinfo = None def __init__(self, func, when): #: context of invocation: one of "setup", "call", #: "teardown", "memocollect" self.when = when self.start = time() try: try: self.result = func() except KeyboardInterrupt: raise except: self.excinfo = py.code.ExceptionInfo() finally: self.stop = time() def __repr__(self): if self.excinfo: status = "exception: %s" % str(self.excinfo.value) else: status = "result: %r" % (self.result,) return "" % (self.when, status) def getslaveinfoline(node): try: return node._slaveinfocache except AttributeError: d = node.slaveinfo ver = "%s.%s.%s" % d['version_info'][:3] node._slaveinfocache = s = "[%s] %s -- Python %s %s" % ( d['id'], d['sysplatform'], ver, d['executable']) return s class BaseReport(object): def __init__(self, **kw): self.__dict__.update(kw) def toterminal(self, out): longrepr = self.longrepr if hasattr(self, 'node'): out.line(getslaveinfoline(self.node)) if hasattr(longrepr, 'toterminal'): longrepr.toterminal(out) else: try: out.line(longrepr) except UnicodeEncodeError: out.line("") passed = property(lambda x: x.outcome == "passed") failed = property(lambda x: x.outcome == "failed") skipped = property(lambda x: x.outcome == "skipped") @property def fspath(self): return self.nodeid.split("::")[0] def pytest_runtest_makereport(item, call): when = call.when duration = call.stop-call.start keywords = dict([(x,1) for x in item.keywords]) excinfo = call.excinfo if not call.excinfo: outcome = "passed" longrepr = None else: if not isinstance(excinfo, py.code.ExceptionInfo): outcome = "failed" longrepr = excinfo elif excinfo.errisinstance(py.test.skip.Exception): outcome = "skipped" r = excinfo._getreprcrash() longrepr = (str(r.path), r.lineno, r.message) else: outcome = "failed" if call.when == "call": longrepr = item.repr_failure(excinfo) else: # exception in setup or teardown longrepr = item._repr_failure_py(excinfo, style=item.config.option.tbstyle) return TestReport(item.nodeid, item.location, keywords, outcome, longrepr, when, duration=duration) class TestReport(BaseReport): """ Basic test report object (also used for setup and teardown calls if they fail). """ def __init__(self, nodeid, location, keywords, outcome, longrepr, when, sections=(), duration=0, **extra): #: normalized collection node id self.nodeid = nodeid #: a (filesystempath, lineno, domaininfo) tuple indicating the #: actual location of a test item - it might be different from the #: collected one e.g. if a method is inherited from a different module. self.location = location #: a name -> value dictionary containing all keywords and #: markers associated with a test invocation. self.keywords = keywords #: test outcome, always one of "passed", "failed", "skipped". self.outcome = outcome #: None or a failure representation. self.longrepr = longrepr #: one of 'setup', 'call', 'teardown' to indicate runtest phase. self.when = when #: list of (secname, data) extra information which needs to #: marshallable self.sections = list(sections) #: time it took to run just the test self.duration = duration self.__dict__.update(extra) def __repr__(self): return "" % ( self.nodeid, self.when, self.outcome) class TeardownErrorReport(BaseReport): outcome = "failed" when = "teardown" def __init__(self, longrepr, **extra): self.longrepr = longrepr self.sections = [] self.__dict__.update(extra) def pytest_make_collect_report(collector): call = CallInfo(collector._memocollect, "memocollect") longrepr = None if not call.excinfo: outcome = "passed" else: if call.excinfo.errisinstance(collector.skip_exceptions): outcome = "skipped" r = collector._repr_failure_py(call.excinfo, "line").reprcrash longrepr = (str(r.path), r.lineno, r.message) else: outcome = "failed" errorinfo = collector.repr_failure(call.excinfo) if not hasattr(errorinfo, "toterminal"): errorinfo = CollectErrorRepr(errorinfo) longrepr = errorinfo rep = CollectReport(collector.nodeid, outcome, longrepr, getattr(call, 'result', None)) rep.call = call # see collect_one_node return rep class CollectReport(BaseReport): def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra): self.nodeid = nodeid self.outcome = outcome self.longrepr = longrepr self.result = result or [] self.sections = list(sections) self.__dict__.update(extra) @property def location(self): return (self.fspath, None, self.fspath) def __repr__(self): return "" % ( self.nodeid, len(self.result), self.outcome) class CollectErrorRepr(TerminalRepr): def __init__(self, msg): self.longrepr = msg def toterminal(self, out): out.line(self.longrepr, red=True) class SetupState(object): """ shared state for setting up/tearing down test items or collectors. """ def __init__(self): self.stack = [] self._finalizers = {} def addfinalizer(self, finalizer, colitem): """ attach a finalizer to the given colitem. if colitem is None, this will add a finalizer that is called at the end of teardown_all(). if colitem is a tuple, it will be used as a key and needs an explicit call to _callfinalizers(key) later on. """ assert hasattr(finalizer, '__call__') #assert colitem in self.stack self._finalizers.setdefault(colitem, []).append(finalizer) def _pop_and_teardown(self): colitem = self.stack.pop() self._teardown_with_finalization(colitem) def _callfinalizers(self, colitem): finalizers = self._finalizers.pop(colitem, None) exc = None while finalizers: fin = finalizers.pop() try: fin() except Exception: # XXX Only first exception will be seen by user, # ideally all should be reported. if exc is None: exc = sys.exc_info() if exc: py.builtin._reraise(*exc) def _teardown_with_finalization(self, colitem): self._callfinalizers(colitem) if hasattr(colitem, "teardown"): colitem.teardown() for colitem in self._finalizers: assert colitem is None or colitem in self.stack \ or isinstance(colitem, tuple) def teardown_all(self): while self.stack: self._pop_and_teardown() for key in list(self._finalizers): self._teardown_with_finalization(key) assert not self._finalizers def teardown_exact(self, item, nextitem): needed_collectors = nextitem and nextitem.listchain() or [] self._teardown_towards(needed_collectors) def _teardown_towards(self, needed_collectors): while self.stack: if self.stack == needed_collectors[:len(self.stack)]: break self._pop_and_teardown() def prepare(self, colitem): """ setup objects along the collector chain to the test-method and teardown previously setup objects.""" needed_collectors = colitem.listchain() self._teardown_towards(needed_collectors) # check if the last collection node has raised an error for col in self.stack: if hasattr(col, '_prepare_exc'): py.builtin._reraise(*col._prepare_exc) for col in needed_collectors[len(self.stack):]: self.stack.append(col) try: col.setup() except Exception: col._prepare_exc = sys.exc_info() raise def collect_one_node(collector): ihook = collector.ihook ihook.pytest_collectstart(collector=collector) rep = ihook.pytest_make_collect_report(collector=collector) call = rep.__dict__.pop("call", None) if call and check_interactive_exception(call, rep): ihook.pytest_exception_interact(node=collector, call=call, report=rep) return rep # ============================================================= # Test OutcomeExceptions and helpers for creating them. class OutcomeException(Exception): """ OutcomeException and its subclass instances indicate and contain info about test and collection outcomes. """ def __init__(self, msg=None, pytrace=True): Exception.__init__(self, msg) self.msg = msg self.pytrace = pytrace def __repr__(self): if self.msg: return str(self.msg) return "<%s instance>" %(self.__class__.__name__,) __str__ = __repr__ class Skipped(OutcomeException): # XXX hackish: on 3k we fake to live in the builtins # in order to have Skipped exception printing shorter/nicer __module__ = 'builtins' class Failed(OutcomeException): """ raised from an explicit call to py.test.fail() """ __module__ = 'builtins' class Exit(KeyboardInterrupt): """ raised for immediate program exits (no tracebacks/summaries)""" def __init__(self, msg="unknown reason"): self.msg = msg KeyboardInterrupt.__init__(self, msg) # exposed helper methods def exit(msg): """ exit testing process as if KeyboardInterrupt was triggered. """ __tracebackhide__ = True raise Exit(msg) exit.Exception = Exit def skip(msg=""): """ skip an executing test with the given message. Note: it's usually better to use the py.test.mark.skipif marker to declare a test to be skipped under certain conditions like mismatching platforms or dependencies. See the pytest_skipping plugin for details. """ __tracebackhide__ = True raise Skipped(msg=msg) skip.Exception = Skipped def fail(msg="", pytrace=True): """ explicitely fail an currently-executing test with the given Message. :arg pytrace: if false the msg represents the full failure information and no python traceback will be reported. """ __tracebackhide__ = True raise Failed(msg=msg, pytrace=pytrace) fail.Exception = Failed def importorskip(modname, minversion=None): """ return imported module if it has at least "minversion" as its __version__ attribute. If no minversion is specified the a skip is only triggered if the module can not be imported. Note that version comparison only works with simple version strings like "1.2.3" but not "1.2.3.dev1" or others. """ __tracebackhide__ = True compile(modname, '', 'eval') # to catch syntaxerrors try: __import__(modname) except ImportError: skip("could not import %r" %(modname,)) mod = sys.modules[modname] if minversion is None: return mod verattr = getattr(mod, '__version__', None) def intver(verstring): return [int(x) for x in verstring.split(".")] if verattr is None or intver(verattr) < intver(minversion): skip("module %r has __version__ %r, required is: %r" %( modname, verattr, minversion)) return mod pytest-2.5.1/_pytest/nose.py0000664000175000017500000000514212254002202015433 0ustar hpkhpk00000000000000""" run test suites written for nose. """ import pytest, py import sys from _pytest import unittest def pytest_runtest_makereport(__multicall__, item, call): SkipTest = getattr(sys.modules.get('nose', None), 'SkipTest', None) if SkipTest: if call.excinfo and call.excinfo.errisinstance(SkipTest): # let's substitute the excinfo with a py.test.skip one call2 = call.__class__(lambda: pytest.skip(str(call.excinfo.value)), call.when) call.excinfo = call2.excinfo @pytest.mark.trylast def pytest_runtest_setup(item): if is_potential_nosetest(item): if isinstance(item.parent, pytest.Generator): gen = item.parent if not hasattr(gen, '_nosegensetup'): call_optional(gen.obj, 'setup') if isinstance(gen.parent, pytest.Instance): call_optional(gen.parent.obj, 'setup') gen._nosegensetup = True if not call_optional(item.obj, 'setup'): # call module level setup if there is no object level one call_optional(item.parent.obj, 'setup') #XXX this implies we only call teardown when setup worked item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item) def teardown_nose(item): if is_potential_nosetest(item): if not call_optional(item.obj, 'teardown'): call_optional(item.parent.obj, 'teardown') #if hasattr(item.parent, '_nosegensetup'): # #call_optional(item._nosegensetup, 'teardown') # del item.parent._nosegensetup def pytest_make_collect_report(collector): SkipTest = getattr(sys.modules.get('unittest', None), 'SkipTest', None) if SkipTest is not None: collector.skip_exceptions += (SkipTest,) SkipTest = getattr(sys.modules.get('nose', None), 'SkipTest', None) if SkipTest is not None: collector.skip_exceptions += (SkipTest,) if isinstance(collector, pytest.Generator): call_optional(collector.obj, 'setup') def is_potential_nosetest(item): # extra check needed since we do not do nose style setup/teardown # on direct unittest style classes return isinstance(item, pytest.Function) and \ not isinstance(item, unittest.TestCaseFunction) def call_optional(obj, name): method = getattr(obj, name, None) isfixture = hasattr(method, "_pytestfixturefunction") if method is not None and not isfixture and py.builtin.callable(method): # If there's any problems allow the exception to raise rather than # silently ignoring them method() return True pytest-2.5.1/_pytest/config.py0000664000175000017500000007736712254002202015756 0ustar hpkhpk00000000000000""" command line options, ini-file and conftest.py processing. """ import py import sys, os from _pytest import hookspec # the extension point definitions from _pytest.core import PluginManager # pytest startup def main(args=None, plugins=None): """ return exit code, after performing an in-process test run. :arg args: list of command line arguments. :arg plugins: list of plugin objects to be auto-registered during initialization. """ config = _prepareconfig(args, plugins) return config.hook.pytest_cmdline_main(config=config) class cmdline: # compatibility namespace main = staticmethod(main) class UsageError(Exception): """ error in py.test usage or invocation""" _preinit = [] default_plugins = ( "mark main terminal runner python pdb unittest capture skipping " "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript " "junitxml resultlog doctest").split() def _preloadplugins(): assert not _preinit _preinit.append(get_plugin_manager()) def get_plugin_manager(): if _preinit: return _preinit.pop(0) # subsequent calls to main will create a fresh instance pluginmanager = PytestPluginManager() pluginmanager.config = Config(pluginmanager) # XXX attr needed? for spec in default_plugins: pluginmanager.import_plugin(spec) return pluginmanager def _prepareconfig(args=None, plugins=None): if args is None: args = sys.argv[1:] elif isinstance(args, py.path.local): args = [str(args)] elif not isinstance(args, (tuple, list)): if not isinstance(args, str): raise ValueError("not a string or argument list: %r" % (args,)) args = py.std.shlex.split(args) pluginmanager = get_plugin_manager() if plugins: for plugin in plugins: pluginmanager.register(plugin) return pluginmanager.hook.pytest_cmdline_parse( pluginmanager=pluginmanager, args=args) class PytestPluginManager(PluginManager): def __init__(self, hookspecs=[hookspec]): super(PytestPluginManager, self).__init__(hookspecs=hookspecs) self.register(self) if os.environ.get('PYTEST_DEBUG'): err = sys.stderr encoding = getattr(err, 'encoding', 'utf8') try: err = py.io.dupfile(err, encoding=encoding) except Exception: pass self.trace.root.setwriter(err.write) def pytest_configure(self, config): config.addinivalue_line("markers", "tryfirst: mark a hook implementation function such that the " "plugin machinery will try to call it first/as early as possible.") config.addinivalue_line("markers", "trylast: mark a hook implementation function such that the " "plugin machinery will try to call it last/as late as possible.") class Parser: """ Parser for command line arguments and ini-file values. """ def __init__(self, usage=None, processopt=None): self._anonymous = OptionGroup("custom options", parser=self) self._groups = [] self._processopt = processopt self._usage = usage self._inidict = {} self._ininames = [] self.hints = [] def processoption(self, option): if self._processopt: if option.dest: self._processopt(option) def getgroup(self, name, description="", after=None): """ get (or create) a named option Group. :name: name of the option group. :description: long description for --help output. :after: name of other group, used for ordering --help output. The returned group object has an ``addoption`` method with the same signature as :py:func:`parser.addoption <_pytest.config.Parser.addoption>` but will be shown in the respective group in the output of ``pytest. --help``. """ for group in self._groups: if group.name == name: return group group = OptionGroup(name, description, parser=self) i = 0 for i, grp in enumerate(self._groups): if grp.name == after: break self._groups.insert(i+1, group) return group def addoption(self, *opts, **attrs): """ register a command line option. :opts: option names, can be short or long options. :attrs: same attributes which the ``add_option()`` function of the `argparse library `_ accepts. After command line parsing options are available on the pytest config object via ``config.option.NAME`` where ``NAME`` is usually set by passing a ``dest`` attribute, for example ``addoption("--long", dest="NAME", ...)``. """ self._anonymous.addoption(*opts, **attrs) def parse(self, args): from _pytest._argcomplete import try_argcomplete self.optparser = self._getparser() try_argcomplete(self.optparser) return self.optparser.parse_args([str(x) for x in args]) def _getparser(self): from _pytest._argcomplete import filescompleter optparser = MyOptionParser(self) groups = self._groups + [self._anonymous] for group in groups: if group.options: desc = group.description or group.name arggroup = optparser.add_argument_group(desc) for option in group.options: n = option.names() a = option.attrs() arggroup.add_argument(*n, **a) # bash like autocompletion for dirs (appending '/') optparser.add_argument(FILE_OR_DIR, nargs='*' ).completer=filescompleter return optparser def parse_setoption(self, args, option): parsedoption = self.parse(args) for name, value in parsedoption.__dict__.items(): setattr(option, name, value) return getattr(parsedoption, FILE_OR_DIR) def parse_known_args(self, args): optparser = self._getparser() args = [str(x) for x in args] return optparser.parse_known_args(args)[0] def addini(self, name, help, type=None, default=None): """ register an ini-file option. :name: name of the ini-variable :type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``. :default: default value if no ini-file option exists but is queried. The value of ini-variables can be retrieved via a call to :py:func:`config.getini(name) <_pytest.config.Config.getini>`. """ assert type in (None, "pathlist", "args", "linelist") self._inidict[name] = (help, type, default) self._ininames.append(name) class ArgumentError(Exception): """ Raised if an Argument instance is created with invalid or inconsistent arguments. """ def __init__(self, msg, option): self.msg = msg self.option_id = str(option) def __str__(self): if self.option_id: return "option %s: %s" % (self.option_id, self.msg) else: return self.msg class Argument: """class that mimics the necessary behaviour of py.std.optparse.Option """ _typ_map = { 'int': int, 'string': str, } # enable after some grace period for plugin writers TYPE_WARN = False def __init__(self, *names, **attrs): """store parms in private vars for use in add_argument""" self._attrs = attrs self._short_opts = [] self._long_opts = [] self.dest = attrs.get('dest') if self.TYPE_WARN: try: help = attrs['help'] if '%default' in help: py.std.warnings.warn( 'py.test now uses argparse. "%default" should be' ' changed to "%(default)s" ', FutureWarning, stacklevel=3) except KeyError: pass try: typ = attrs['type'] except KeyError: pass else: # this might raise a keyerror as well, don't want to catch that if isinstance(typ, py.builtin._basestring): if typ == 'choice': if self.TYPE_WARN: py.std.warnings.warn( 'type argument to addoption() is a string %r.' ' For parsearg this is optional and when supplied ' ' should be a type.' ' (options: %s)' % (typ, names), FutureWarning, stacklevel=3) # argparse expects a type here take it from # the type of the first element attrs['type'] = type(attrs['choices'][0]) else: if self.TYPE_WARN: py.std.warnings.warn( 'type argument to addoption() is a string %r.' ' For parsearg this should be a type.' ' (options: %s)' % (typ, names), FutureWarning, stacklevel=3) attrs['type'] = Argument._typ_map[typ] # used in test_parseopt -> test_parse_defaultgetter self.type = attrs['type'] else: self.type = typ try: # attribute existence is tested in Config._processopt self.default = attrs['default'] except KeyError: pass self._set_opt_strings(names) if not self.dest: if self._long_opts: self.dest = self._long_opts[0][2:].replace('-', '_') else: try: self.dest = self._short_opts[0][1:] except IndexError: raise ArgumentError( 'need a long or short option', self) def names(self): return self._short_opts + self._long_opts def attrs(self): # update any attributes set by processopt attrs = 'default dest help'.split() if self.dest: attrs.append(self.dest) for attr in attrs: try: self._attrs[attr] = getattr(self, attr) except AttributeError: pass if self._attrs.get('help'): a = self._attrs['help'] a = a.replace('%default', '%(default)s') #a = a.replace('%prog', '%(prog)s') self._attrs['help'] = a return self._attrs def _set_opt_strings(self, opts): """directly from optparse might not be necessary as this is passed to argparse later on""" for opt in opts: if len(opt) < 2: raise ArgumentError( "invalid option string %r: " "must be at least two characters long" % opt, self) elif len(opt) == 2: if not (opt[0] == "-" and opt[1] != "-"): raise ArgumentError( "invalid short option string %r: " "must be of the form -x, (x any non-dash char)" % opt, self) self._short_opts.append(opt) else: if not (opt[0:2] == "--" and opt[2] != "-"): raise ArgumentError( "invalid long option string %r: " "must start with --, followed by non-dash" % opt, self) self._long_opts.append(opt) def __repr__(self): retval = 'Argument(' if self._short_opts: retval += '_short_opts: ' + repr(self._short_opts) + ', ' if self._long_opts: retval += '_long_opts: ' + repr(self._long_opts) + ', ' retval += 'dest: ' + repr(self.dest) + ', ' if hasattr(self, 'type'): retval += 'type: ' + repr(self.type) + ', ' if hasattr(self, 'default'): retval += 'default: ' + repr(self.default) + ', ' if retval[-2:] == ', ': # always long enough to test ("Argument(" ) retval = retval[:-2] retval += ')' return retval class OptionGroup: def __init__(self, name, description="", parser=None): self.name = name self.description = description self.options = [] self.parser = parser def addoption(self, *optnames, **attrs): """ add an option to this group. if a shortened version of a long option is specified it will be suppressed in the help. addoption('--twowords', '--two-words') results in help showing '--two-words' only, but --twowords gets accepted **and** the automatic destination is in args.twowords """ option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=False) def _addoption(self, *optnames, **attrs): option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=True) def _addoption_instance(self, option, shortupper=False): if not shortupper: for opt in option._short_opts: if opt[0] == '-' and opt[1].islower(): raise ValueError("lowercase shortoptions reserved") if self.parser: self.parser.processoption(option) self.options.append(option) class MyOptionParser(py.std.argparse.ArgumentParser): def __init__(self, parser): self._parser = parser py.std.argparse.ArgumentParser.__init__(self, usage=parser._usage, add_help=False, formatter_class=DropShorterLongHelpFormatter) def format_epilog(self, formatter): hints = self._parser.hints if hints: s = "\n".join(["hint: " + x for x in hints]) + "\n" s = "\n" + s + "\n" return s return "" def parse_args(self, args=None, namespace=None): """allow splitting of positional arguments""" args, argv = self.parse_known_args(args, namespace) if argv: for arg in argv: if arg and arg[0] == '-': msg = py.std.argparse._('unrecognized arguments: %s') self.error(msg % ' '.join(argv)) getattr(args, FILE_OR_DIR).extend(argv) return args class DropShorterLongHelpFormatter(py.std.argparse.HelpFormatter): """shorten help for long options that differ only in extra hyphens - collapse **long** options that are the same except for extra hyphens - special action attribute map_long_option allows surpressing additional long options - shortcut if there are only two options and one of them is a short one - cache result on action object as this is called at least 2 times """ def _format_action_invocation(self, action): orgstr = py.std.argparse.HelpFormatter._format_action_invocation(self, action) if orgstr and orgstr[0] != '-': # only optional arguments return orgstr res = getattr(action, '_formatted_action_invocation', None) if res: return res options = orgstr.split(', ') if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2): # a shortcut for '-h, --help' or '--abc', '-a' action._formatted_action_invocation = orgstr return orgstr return_list = [] option_map = getattr(action, 'map_long_option', {}) if option_map is None: option_map = {} short_long = {} for option in options: if len(option) == 2 or option[2] == ' ': continue if not option.startswith('--'): raise ArgumentError('long optional argument without "--": [%s]' % (option), self) xxoption = option[2:] if xxoption.split()[0] not in option_map: shortened = xxoption.replace('-', '') if shortened not in short_long or \ len(short_long[shortened]) < len(xxoption): short_long[shortened] = xxoption # now short_long has been filled out to the longest with dashes # **and** we keep the right option ordering from add_argument for option in options: # if len(option) == 2 or option[2] == ' ': return_list.append(option) if option[2:] == short_long.get(option.replace('-', '')): return_list.append(option) action._formatted_action_invocation = ', '.join(return_list) return action._formatted_action_invocation class Conftest(object): """ the single place for accessing values and interacting towards conftest modules from py.test objects. """ def __init__(self, onimport=None, confcutdir=None): self._path2confmods = {} self._onimport = onimport self._conftestpath2mod = {} self._confcutdir = confcutdir def setinitial(self, args): """ try to find a first anchor path for looking up global values from conftests. This function is usually called _before_ argument parsing. conftest files may add command line options and we thus have no completely safe way of determining which parts of the arguments are actually related to options and which are file system paths. We just try here to get bootstrapped ... """ current = py.path.local() opt = '--confcutdir' for i in range(len(args)): opt1 = str(args[i]) if opt1.startswith(opt): if opt1 == opt: if len(args) > i: p = current.join(args[i+1], abs=True) elif opt1.startswith(opt + "="): p = current.join(opt1[len(opt)+1:], abs=1) self._confcutdir = p break foundanchor = False for arg in args: if hasattr(arg, 'startswith') and arg.startswith("--"): continue anchor = current.join(arg, abs=1) if exists(anchor): # we found some file object self._try_load_conftest(anchor) foundanchor = True if not foundanchor: self._try_load_conftest(current) def _try_load_conftest(self, anchor): self._path2confmods[None] = self.getconftestmodules(anchor) # let's also consider test* subdirs if anchor.check(dir=1): for x in anchor.listdir("test*"): if x.check(dir=1): self.getconftestmodules(x) def getconftestmodules(self, path): try: clist = self._path2confmods[path] except KeyError: if path is None: raise ValueError("missing default conftest.") clist = [] for parent in path.parts(): if self._confcutdir and self._confcutdir.relto(parent): continue conftestpath = parent.join("conftest.py") if conftestpath.check(file=1): clist.append(self.importconftest(conftestpath)) self._path2confmods[path] = clist return clist def rget(self, name, path=None): mod, value = self.rget_with_confmod(name, path) return value def rget_with_confmod(self, name, path=None): modules = self.getconftestmodules(path) modules.reverse() for mod in modules: try: return mod, getattr(mod, name) except AttributeError: continue raise KeyError(name) def importconftest(self, conftestpath): assert conftestpath.check(), conftestpath try: return self._conftestpath2mod[conftestpath] except KeyError: pkgpath = conftestpath.pypkgpath() if pkgpath is None: _ensure_removed_sysmodule(conftestpath.purebasename) self._conftestpath2mod[conftestpath] = mod = conftestpath.pyimport() dirpath = conftestpath.dirpath() if dirpath in self._path2confmods: for path, mods in self._path2confmods.items(): if path and path.relto(dirpath) or path == dirpath: assert mod not in mods mods.append(mod) self._postimport(mod) return mod def _postimport(self, mod): if self._onimport: self._onimport(mod) return mod def _ensure_removed_sysmodule(modname): try: del sys.modules[modname] except KeyError: pass class CmdOptions(object): """ holds cmdline options as attributes.""" def __init__(self, **kwargs): self.__dict__.update(kwargs) def __repr__(self): return "" %(self.__dict__,) FILE_OR_DIR = 'file_or_dir' class Config(object): """ access to configuration values, pluginmanager and plugin hooks. """ def __init__(self, pluginmanager): #: access to command line option as attributes. #: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead self.option = CmdOptions() _a = FILE_OR_DIR self._parser = Parser( usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a), processopt=self._processopt, ) #: a pluginmanager instance self.pluginmanager = pluginmanager self.trace = self.pluginmanager.trace.root.get("config") self._conftest = Conftest(onimport=self._onimportconftest) self.hook = self.pluginmanager.hook self._inicache = {} self._opt2dest = {} self._cleanup = [] self.pluginmanager.register(self, "pytestconfig") self.pluginmanager.set_register_callback(self._register_plugin) self._configured = False def _register_plugin(self, plugin, name): call_plugin = self.pluginmanager.call_plugin call_plugin(plugin, "pytest_addhooks", {'pluginmanager': self.pluginmanager}) self.hook.pytest_plugin_registered(plugin=plugin, manager=self.pluginmanager) dic = call_plugin(plugin, "pytest_namespace", {}) or {} if dic: import pytest setns(pytest, dic) call_plugin(plugin, "pytest_addoption", {'parser': self._parser}) if self._configured: call_plugin(plugin, "pytest_configure", {'config': self}) def do_configure(self): assert not self._configured self._configured = True self.hook.pytest_configure(config=self) def do_unconfigure(self): assert self._configured self._configured = False self.hook.pytest_unconfigure(config=self) self.pluginmanager.ensure_shutdown() def pytest_cmdline_parse(self, pluginmanager, args): assert self == pluginmanager.config, (self, pluginmanager.config) self.parse(args) return self def pytest_unconfigure(config): while config._cleanup: fin = config._cleanup.pop() fin() def notify_exception(self, excinfo, option=None): if option and option.fulltrace: style = "long" else: style = "native" excrepr = excinfo.getrepr(funcargs=True, showlocals=getattr(option, 'showlocals', False), style=style, ) res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) if not py.builtin.any(res): for line in str(excrepr).split("\n"): sys.stderr.write("INTERNALERROR> %s\n" %line) sys.stderr.flush() @classmethod def fromdictargs(cls, option_dict, args): """ constructor useable for subprocesses. """ pluginmanager = get_plugin_manager() config = pluginmanager.config config._preparse(args, addopts=False) config.option.__dict__.update(option_dict) for x in config.option.plugins: config.pluginmanager.consider_pluginarg(x) return config def _onimportconftest(self, conftestmodule): self.trace("loaded conftestmodule %r" %(conftestmodule,)) self.pluginmanager.consider_conftest(conftestmodule) def _processopt(self, opt): for name in opt._short_opts + opt._long_opts: self._opt2dest[name] = opt.dest if hasattr(opt, 'default') and opt.dest: if not hasattr(self.option, opt.dest): setattr(self.option, opt.dest, opt.default) def _getmatchingplugins(self, fspath): allconftests = self._conftest._conftestpath2mod.values() plugins = [x for x in self.pluginmanager.getplugins() if x not in allconftests] plugins += self._conftest.getconftestmodules(fspath) return plugins def pytest_load_initial_conftests(self, parser, args): self._conftest.setinitial(args) pytest_load_initial_conftests.trylast = True def _initini(self, args): self.inicfg = getcfg(args, ["pytest.ini", "tox.ini", "setup.cfg"]) self._parser.addini('addopts', 'extra command line options', 'args') self._parser.addini('minversion', 'minimally required pytest version') def _preparse(self, args, addopts=True): self._initini(args) if addopts: args[:] = self.getini("addopts") + args self._checkversion() self.pluginmanager.consider_preparse(args) self.pluginmanager.consider_setuptools_entrypoints() self.pluginmanager.consider_env() self.hook.pytest_load_initial_conftests(early_config=self, args=args, parser=self._parser) def _checkversion(self): import pytest minver = self.inicfg.get('minversion', None) if minver: ver = minver.split(".") myver = pytest.__version__.split(".") if myver < ver: raise pytest.UsageError( "%s:%d: requires pytest-%s, actual pytest-%s'" %( self.inicfg.config.path, self.inicfg.lineof('minversion'), minver, pytest.__version__)) def parse(self, args): # parse given cmdline arguments into this config object. # Note that this can only be called once per testing process. assert not hasattr(self, 'args'), ( "can only parse cmdline args at most once per Config object") self._origargs = args self._preparse(args) # XXX deprecated hook: self.hook.pytest_cmdline_preparse(config=self, args=args) self._parser.hints.extend(self.pluginmanager._hints) args = self._parser.parse_setoption(args, self.option) if not args: args.append(py.std.os.getcwd()) self.args = args def addinivalue_line(self, name, line): """ add a line to an ini-file option. The option must have been declared but might not yet be set in which case the line becomes the the first line in its value. """ x = self.getini(name) assert isinstance(x, list) x.append(line) # modifies the cached list inline def getini(self, name): """ return configuration value from an :ref:`ini file `. If the specified name hasn't been registered through a prior :py:func:`parser.addini ` call (usually from a plugin), a ValueError is raised. """ try: return self._inicache[name] except KeyError: self._inicache[name] = val = self._getini(name) return val def _getini(self, name): try: description, type, default = self._parser._inidict[name] except KeyError: raise ValueError("unknown configuration value: %r" %(name,)) try: value = self.inicfg[name] except KeyError: if default is not None: return default if type is None: return '' return [] if type == "pathlist": dp = py.path.local(self.inicfg.config.path).dirpath() l = [] for relpath in py.std.shlex.split(value): l.append(dp.join(relpath, abs=True)) return l elif type == "args": return py.std.shlex.split(value) elif type == "linelist": return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] else: assert type is None return value def _getconftest_pathlist(self, name, path=None): try: mod, relroots = self._conftest.rget_with_confmod(name, path) except KeyError: return None modpath = py.path.local(mod.__file__).dirpath() l = [] for relroot in relroots: if not isinstance(relroot, py.path.local): relroot = relroot.replace("/", py.path.local.sep) relroot = modpath.join(relroot, abs=True) l.append(relroot) return l def _getconftest(self, name, path=None, check=False): if check: self._checkconftest(name) return self._conftest.rget(name, path) def getoption(self, name): """ return command line option value. :arg name: name of the option. You may also specify the literal ``--OPT`` option instead of the "dest" option name. """ name = self._opt2dest.get(name, name) try: return getattr(self.option, name) except AttributeError: raise ValueError("no option named %r" % (name,)) def getvalue(self, name, path=None): """ return command line option value. :arg name: name of the command line option (deprecated) if we can't find the option also lookup the name in a matching conftest file. """ try: return getattr(self.option, name) except AttributeError: return self._getconftest(name, path, check=False) def getvalueorskip(self, name, path=None): """ (deprecated) return getvalue(name) or call py.test.skip if no value exists. """ __tracebackhide__ = True try: val = self.getvalue(name, path) if val is None: raise KeyError(name) return val except KeyError: py.test.skip("no %r value found" %(name,)) def exists(path, ignore=EnvironmentError): try: return path.check() except ignore: return False def getcfg(args, inibasenames): args = [x for x in args if not str(x).startswith("-")] if not args: args = [py.path.local()] for arg in args: arg = py.path.local(arg) for base in arg.parts(reverse=True): for inibasename in inibasenames: p = base.join(inibasename) if exists(p): iniconfig = py.iniconfig.IniConfig(p) if 'pytest' in iniconfig.sections: return iniconfig['pytest'] return {} def setns(obj, dic): import pytest for name, value in dic.items(): if isinstance(value, dict): mod = getattr(obj, name, None) if mod is None: modname = "pytest.%s" % name mod = py.std.types.ModuleType(modname) sys.modules[modname] = mod mod.__all__ = [] setattr(obj, name, mod) obj.__all__.append(name) setns(mod, value) else: setattr(obj, name, value) obj.__all__.append(name) #if obj != pytest: # pytest.__all__.append(name) setattr(pytest, name, value) pytest-2.5.1/_pytest/standalonetemplate.py0000664000175000017500000000375112254002202020357 0ustar hpkhpk00000000000000#! /usr/bin/env python sources = """ @SOURCES@""" import sys import base64 import zlib class DictImporter(object): def __init__(self, sources): self.sources = sources def find_module(self, fullname, path=None): if fullname == "argparse" and sys.version_info >= (2,7): # we were generated with = (3, 0): exec("def do_exec(co, loc): exec(co, loc)\n") import pickle sources = sources.encode("ascii") # ensure bytes sources = pickle.loads(zlib.decompress(base64.decodebytes(sources))) else: import cPickle as pickle exec("def do_exec(co, loc): exec co in loc\n") sources = pickle.loads(zlib.decompress(base64.decodestring(sources))) importer = DictImporter(sources) sys.meta_path.insert(0, importer) entry = "@ENTRY@" do_exec(entry, locals()) # noqa pytest-2.5.1/_pytest/core.py0000664000175000017500000003031312254002202015415 0ustar hpkhpk00000000000000""" pytest PluginManager, basic initialization and tracing. """ import sys import inspect import py assert py.__version__.split(".")[:2] >= ['1', '4'], ("installation problem: " "%s is too old, remove or upgrade 'py'" % (py.__version__)) class TagTracer: def __init__(self): self._tag2proc = {} self.writer = None self.indent = 0 def get(self, name): return TagTracerSub(self, (name,)) def format_message(self, tags, args): if isinstance(args[-1], dict): extra = args[-1] args = args[:-1] else: extra = {} content = " ".join(map(str, args)) indent = " " * self.indent lines = [ "%s%s [%s]\n" %(indent, content, ":".join(tags)) ] for name, value in extra.items(): lines.append("%s %s: %s\n" % (indent, name, value)) return lines def processmessage(self, tags, args): if self.writer is not None and args: lines = self.format_message(tags, args) self.writer(''.join(lines)) try: self._tag2proc[tags](tags, args) except KeyError: pass def setwriter(self, writer): self.writer = writer def setprocessor(self, tags, processor): if isinstance(tags, str): tags = tuple(tags.split(":")) else: assert isinstance(tags, tuple) self._tag2proc[tags] = processor class TagTracerSub: def __init__(self, root, tags): self.root = root self.tags = tags def __call__(self, *args): self.root.processmessage(self.tags, args) def setmyprocessor(self, processor): self.root.setprocessor(self.tags, processor) def get(self, name): return self.__class__(self.root, self.tags + (name,)) class PluginManager(object): def __init__(self, hookspecs=None): self._name2plugin = {} self._listattrcache = {} self._plugins = [] self._hints = [] self.trace = TagTracer().get("pluginmanage") self._plugin_distinfo = [] self._shutdown = [] self.hook = HookRelay(hookspecs or [], pm=self) def do_configure(self, config): # backward compatibility config.do_configure() def set_register_callback(self, callback): assert not hasattr(self, "_registercallback") self._registercallback = callback def register(self, plugin, name=None, prepend=False): if self._name2plugin.get(name, None) == -1: return name = name or getattr(plugin, '__name__', str(id(plugin))) if self.isregistered(plugin, name): raise ValueError("Plugin already registered: %s=%s\n%s" %( name, plugin, self._name2plugin)) #self.trace("registering", name, plugin) self._name2plugin[name] = plugin reg = getattr(self, "_registercallback", None) if reg is not None: reg(plugin, name) if not prepend: self._plugins.append(plugin) else: self._plugins.insert(0, plugin) return True def unregister(self, plugin=None, name=None): if plugin is None: plugin = self.getplugin(name=name) self._plugins.remove(plugin) for name, value in list(self._name2plugin.items()): if value == plugin: del self._name2plugin[name] def add_shutdown(self, func): self._shutdown.append(func) def ensure_shutdown(self): while self._shutdown: func = self._shutdown.pop() func() self._plugins = [] self._name2plugin.clear() self._listattrcache.clear() def isregistered(self, plugin, name=None): if self.getplugin(name) is not None: return True for val in self._name2plugin.values(): if plugin == val: return True def addhooks(self, spec, prefix="pytest_"): self.hook._addhooks(spec, prefix=prefix) def getplugins(self): return list(self._plugins) def skipifmissing(self, name): if not self.hasplugin(name): py.test.skip("plugin %r is missing" % name) def hasplugin(self, name): return bool(self.getplugin(name)) def getplugin(self, name): if name is None: return None try: return self._name2plugin[name] except KeyError: return self._name2plugin.get("_pytest." + name, None) # API for bootstrapping # def _envlist(self, varname): val = py.std.os.environ.get(varname, None) if val is not None: return val.split(',') return () def consider_env(self): for spec in self._envlist("PYTEST_PLUGINS"): self.import_plugin(spec) def consider_setuptools_entrypoints(self): try: from pkg_resources import iter_entry_points, DistributionNotFound except ImportError: return # XXX issue a warning for ep in iter_entry_points('pytest11'): name = ep.name if name.startswith("pytest_"): name = name[7:] if ep.name in self._name2plugin or name in self._name2plugin: continue try: plugin = ep.load() except DistributionNotFound: continue self._plugin_distinfo.append((ep.dist, plugin)) self.register(plugin, name=name) def consider_preparse(self, args): for opt1,opt2 in zip(args, args[1:]): if opt1 == "-p": self.consider_pluginarg(opt2) def consider_pluginarg(self, arg): if arg.startswith("no:"): name = arg[3:] if self.getplugin(name) is not None: self.unregister(None, name=name) self._name2plugin[name] = -1 else: if self.getplugin(arg) is None: self.import_plugin(arg) def consider_conftest(self, conftestmodule): if self.register(conftestmodule, name=conftestmodule.__file__): self.consider_module(conftestmodule) def consider_module(self, mod): attr = getattr(mod, "pytest_plugins", ()) if attr: if not isinstance(attr, (list, tuple)): attr = (attr,) for spec in attr: self.import_plugin(spec) def import_plugin(self, modname): assert isinstance(modname, str) if self.getplugin(modname) is not None: return try: mod = importplugin(modname) except KeyboardInterrupt: raise except ImportError: if modname.startswith("pytest_"): return self.import_plugin(modname[7:]) raise except: e = py.std.sys.exc_info()[1] if not hasattr(py.test, 'skip'): raise elif not isinstance(e, py.test.skip.Exception): raise self._hints.append("skipped plugin %r: %s" %((modname, e.msg))) else: self.register(mod, modname) self.consider_module(mod) def listattr(self, attrname, plugins=None): if plugins is None: plugins = self._plugins key = (attrname,) + tuple(plugins) try: return list(self._listattrcache[key]) except KeyError: pass l = [] last = [] for plugin in plugins: try: meth = getattr(plugin, attrname) if hasattr(meth, 'tryfirst'): last.append(meth) elif hasattr(meth, 'trylast'): l.insert(0, meth) else: l.append(meth) except AttributeError: continue l.extend(last) self._listattrcache[key] = list(l) return l def call_plugin(self, plugin, methname, kwargs): return MultiCall(methods=self.listattr(methname, plugins=[plugin]), kwargs=kwargs, firstresult=True).execute() def importplugin(importspec): name = importspec try: mod = "_pytest." + name __import__(mod) return sys.modules[mod] except ImportError: __import__(importspec) return sys.modules[importspec] class MultiCall: """ execute a call into multiple python functions/methods. """ def __init__(self, methods, kwargs, firstresult=False): self.methods = list(methods) self.kwargs = kwargs self.results = [] self.firstresult = firstresult def __repr__(self): status = "%d results, %d meths" % (len(self.results), len(self.methods)) return "" %(status, self.kwargs) def execute(self): while self.methods: method = self.methods.pop() kwargs = self.getkwargs(method) res = method(**kwargs) if res is not None: self.results.append(res) if self.firstresult: return res if not self.firstresult: return self.results def getkwargs(self, method): kwargs = {} for argname in varnames(method): try: kwargs[argname] = self.kwargs[argname] except KeyError: if argname == "__multicall__": kwargs[argname] = self return kwargs def varnames(func): """ return argument name tuple for a function, method, class or callable. In case of a class, its "__init__" method is considered. For methods the "self" parameter is not included unless you are passing an unbound method with Python3 (which has no supports for unbound methods) """ cache = getattr(func, "__dict__", {}) try: return cache["_varnames"] except KeyError: pass if inspect.isclass(func): try: func = func.__init__ except AttributeError: return () ismethod = True else: if not inspect.isfunction(func) and not inspect.ismethod(func): func = getattr(func, '__call__', func) ismethod = inspect.ismethod(func) rawcode = py.code.getrawcode(func) try: x = rawcode.co_varnames[ismethod:rawcode.co_argcount] except AttributeError: x = () try: cache["_varnames"] = x except TypeError: pass return x class HookRelay: def __init__(self, hookspecs, pm, prefix="pytest_"): if not isinstance(hookspecs, list): hookspecs = [hookspecs] self._hookspecs = [] self._pm = pm self.trace = pm.trace.root.get("hook") for hookspec in hookspecs: self._addhooks(hookspec, prefix) def _addhooks(self, hookspecs, prefix): self._hookspecs.append(hookspecs) added = False for name, method in vars(hookspecs).items(): if name.startswith(prefix): firstresult = getattr(method, 'firstresult', False) hc = HookCaller(self, name, firstresult=firstresult) setattr(self, name, hc) added = True #print ("setting new hook", name) if not added: raise ValueError("did not find new %r hooks in %r" %( prefix, hookspecs,)) class HookCaller: def __init__(self, hookrelay, name, firstresult): self.hookrelay = hookrelay self.name = name self.firstresult = firstresult self.trace = self.hookrelay.trace def __repr__(self): return "" %(self.name,) def __call__(self, **kwargs): methods = self.hookrelay._pm.listattr(self.name) return self._docall(methods, kwargs) def pcall(self, plugins, **kwargs): methods = self.hookrelay._pm.listattr(self.name, plugins=plugins) return self._docall(methods, kwargs) def _docall(self, methods, kwargs): self.trace(self.name, kwargs) self.trace.root.indent += 1 mc = MultiCall(methods, kwargs, firstresult=self.firstresult) try: res = mc.execute() if res: self.trace("finish", self.name, "-->", res) finally: self.trace.root.indent -= 1 return res pytest-2.5.1/_pytest/__init__.py0000664000175000017500000000003012254002202016215 0ustar hpkhpk00000000000000# __version__ = '2.5.1' pytest-2.5.1/_pytest/main.py0000664000175000017500000005734512254002202015427 0ustar hpkhpk00000000000000""" core implementation of testing process: init, session, runtest loop. """ import py import pytest, _pytest import os, sys, imp try: from collections import MutableMapping as MappingMixin except ImportError: from UserDict import DictMixin as MappingMixin from _pytest.runner import collect_one_node, Skipped tracebackcutdir = py.path.local(_pytest.__file__).dirpath() # exitcodes for the command line EXIT_OK = 0 EXIT_TESTSFAILED = 1 EXIT_INTERRUPTED = 2 EXIT_INTERNALERROR = 3 EXIT_USAGEERROR = 4 name_re = py.std.re.compile("^[a-zA-Z_]\w*$") def pytest_addoption(parser): parser.addini("norecursedirs", "directory patterns to avoid for recursion", type="args", default=('.*', 'CVS', '_darcs', '{arch}')) #parser.addini("dirpatterns", # "patterns specifying possible locations of test files", # type="linelist", default=["**/test_*.txt", # "**/test_*.py", "**/*_test.py"] #) group = parser.getgroup("general", "running and selection options") group._addoption('-x', '--exitfirst', action="store_true", default=False, dest="exitfirst", help="exit instantly on first error or failed test."), group._addoption('--maxfail', metavar="num", action="store", type=int, dest="maxfail", default=0, help="exit after first num failures or errors.") group._addoption('--strict', action="store_true", help="run pytest in strict mode, warnings become errors.") group = parser.getgroup("collect", "collection") group.addoption('--collectonly', '--collect-only', action="store_true", help="only collect tests, don't execute them."), group.addoption('--pyargs', action="store_true", help="try to interpret all arguments as python packages.") group.addoption("--ignore", action="append", metavar="path", help="ignore path during collection (multi-allowed).") # when changing this to --conf-cut-dir, config.py Conftest.setinitial # needs upgrading as well group.addoption('--confcutdir', dest="confcutdir", default=None, metavar="dir", help="only load conftest.py's relative to specified dir.") group = parser.getgroup("debugconfig", "test session debugging and configuration") group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir", help="base temporary directory for this test run.") def pytest_namespace(): collect = dict(Item=Item, Collector=Collector, File=File, Session=Session) return dict(collect=collect) def pytest_configure(config): py.test.config = config # compatibiltiy if config.option.exitfirst: config.option.maxfail = 1 def wrap_session(config, doit): """Skeleton command line program""" session = Session(config) session.exitstatus = EXIT_OK initstate = 0 try: try: config.do_configure() initstate = 1 config.hook.pytest_sessionstart(session=session) initstate = 2 doit(config, session) except pytest.UsageError: msg = sys.exc_info()[1].args[0] sys.stderr.write("ERROR: %s\n" %(msg,)) session.exitstatus = EXIT_USAGEERROR except KeyboardInterrupt: excinfo = py.code.ExceptionInfo() config.hook.pytest_keyboard_interrupt(excinfo=excinfo) session.exitstatus = EXIT_INTERRUPTED except: excinfo = py.code.ExceptionInfo() config.notify_exception(excinfo, config.option) session.exitstatus = EXIT_INTERNALERROR if excinfo.errisinstance(SystemExit): sys.stderr.write("mainloop: caught Spurious SystemExit!\n") else: if session._testsfailed: session.exitstatus = EXIT_TESTSFAILED finally: session.startdir.chdir() if initstate >= 2: config.hook.pytest_sessionfinish( session=session, exitstatus=session.exitstatus) if initstate >= 1: config.do_unconfigure() config.pluginmanager.ensure_shutdown() return session.exitstatus def pytest_cmdline_main(config): return wrap_session(config, _main) def _main(config, session): """ default command line protocol for initialization, session, running tests and reporting. """ config.hook.pytest_collection(session=session) config.hook.pytest_runtestloop(session=session) def pytest_collection(session): return session.perform_collect() def pytest_runtestloop(session): if session.config.option.collectonly: return True def getnextitem(i): # this is a function to avoid python2 # keeping sys.exc_info set when calling into a test # python2 keeps sys.exc_info till the frame is left try: return session.items[i+1] except IndexError: return None for i, item in enumerate(session.items): nextitem = getnextitem(i) item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) if session.shouldstop: raise session.Interrupted(session.shouldstop) return True def pytest_ignore_collect(path, config): p = path.dirpath() ignore_paths = config._getconftest_pathlist("collect_ignore", path=p) ignore_paths = ignore_paths or [] excludeopt = config.getvalue("ignore") if excludeopt: ignore_paths.extend([py.path.local(x) for x in excludeopt]) return path in ignore_paths class HookProxy: def __init__(self, fspath, config): self.fspath = fspath self.config = config def __getattr__(self, name): hookmethod = getattr(self.config.hook, name) def call_matching_hooks(**kwargs): plugins = self.config._getmatchingplugins(self.fspath) return hookmethod.pcall(plugins, **kwargs) return call_matching_hooks def compatproperty(name): def fget(self): # deprecated - use pytest.name return getattr(pytest, name) return property(fget) class NodeKeywords(MappingMixin): def __init__(self, node): self.node = node self.parent = node.parent self._markers = {node.name: True} def __getitem__(self, key): try: return self._markers[key] except KeyError: if self.parent is None: raise return self.parent.keywords[key] def __setitem__(self, key, value): self._markers[key] = value def __delitem__(self, key): raise ValueError("cannot delete key in keywords dict") def __iter__(self): seen = set(self._markers) if self.parent is not None: seen.update(self.parent.keywords) return iter(seen) def __len__(self): return len(self.__iter__()) def keys(self): return list(self) def __repr__(self): return "" % (self.node, ) class Node(object): """ base class for Collector and Item the test collection tree. Collector subclasses have children, Items are terminal nodes.""" def __init__(self, name, parent=None, config=None, session=None): #: a unique name within the scope of the parent node self.name = name #: the parent collector node. self.parent = parent #: the pytest config object self.config = config or parent.config #: the session this node is part of self.session = session or parent.session #: filesystem path where this node was collected from (can be None) self.fspath = getattr(parent, 'fspath', None) #: keywords/markers collected from all scopes self.keywords = NodeKeywords(self) #: allow adding of extra keywords to use for matching self.extra_keyword_matches = set() # used for storing artificial fixturedefs for direct parametrization self._name2pseudofixturedef = {} #self.extrainit() @property def ihook(self): """ fspath sensitive hook proxy used to call pytest hooks""" return self.session.gethookproxy(self.fspath) #def extrainit(self): # """"extra initialization after Node is initialized. Implemented # by some subclasses. """ Module = compatproperty("Module") Class = compatproperty("Class") Instance = compatproperty("Instance") Function = compatproperty("Function") File = compatproperty("File") Item = compatproperty("Item") def _getcustomclass(self, name): cls = getattr(self, name) if cls != getattr(pytest, name): py.log._apiwarn("2.0", "use of node.%s is deprecated, " "use pytest_pycollect_makeitem(...) to create custom " "collection nodes" % name) return cls def __repr__(self): return "<%s %r>" %(self.__class__.__name__, getattr(self, 'name', None)) # methods for ordering nodes @property def nodeid(self): """ a ::-separated string denoting its collection tree address. """ try: return self._nodeid except AttributeError: self._nodeid = x = self._makeid() return x def _makeid(self): return self.parent.nodeid + "::" + self.name def __hash__(self): return hash(self.nodeid) def setup(self): pass def teardown(self): pass def _memoizedcall(self, attrname, function): exattrname = "_ex_" + attrname failure = getattr(self, exattrname, None) if failure is not None: py.builtin._reraise(failure[0], failure[1], failure[2]) if hasattr(self, attrname): return getattr(self, attrname) try: res = function() except py.builtin._sysex: raise except: failure = py.std.sys.exc_info() setattr(self, exattrname, failure) raise setattr(self, attrname, res) return res def listchain(self): """ return list of all parent collectors up to self, starting from root of collection tree. """ chain = [] item = self while item is not None: chain.append(item) item = item.parent chain.reverse() return chain def add_marker(self, marker): """ dynamically add a marker object to the node. ``marker`` can be a string or pytest.mark.* instance. """ from _pytest.mark import MarkDecorator if isinstance(marker, py.builtin._basestring): marker = MarkDecorator(marker) elif not isinstance(marker, MarkDecorator): raise ValueError("is not a string or pytest.mark.* Marker") self.keywords[marker.name] = marker def get_marker(self, name): """ get a marker object from this node or None if the node doesn't have a marker with that name. """ val = self.keywords.get(name, None) if val is not None: from _pytest.mark import MarkInfo, MarkDecorator if isinstance(val, (MarkDecorator, MarkInfo)): return val def listextrakeywords(self): """ Return a set of all extra keywords in self and any parents.""" extra_keywords = set() item = self for item in self.listchain(): extra_keywords.update(item.extra_keyword_matches) return extra_keywords def listnames(self): return [x.name for x in self.listchain()] def getplugins(self): return self.config._getmatchingplugins(self.fspath) def addfinalizer(self, fin): """ register a function to be called when this node is finalized. This method can only be called when this node is active in a setup chain, for example during self.setup(). """ self.session._setupstate.addfinalizer(fin, self) def getparent(self, cls): """ get the next parent node (including ourself) which is an instance of the given class""" current = self while current and not isinstance(current, cls): current = current.parent return current def _prunetraceback(self, excinfo): pass def _repr_failure_py(self, excinfo, style=None): fm = self.session._fixturemanager if excinfo.errisinstance(fm.FixtureLookupError): return excinfo.value.formatrepr() if self.config.option.fulltrace: style="long" else: self._prunetraceback(excinfo) # XXX should excinfo.getrepr record all data and toterminal() # process it? if style is None: if self.config.option.tbstyle == "short": style = "short" else: style = "long" return excinfo.getrepr(funcargs=True, showlocals=self.config.option.showlocals, style=style) repr_failure = _repr_failure_py class Collector(Node): """ Collector instances create children through collect() and thus iteratively build a tree. """ # the set of exceptions to interpret as "Skip the whole module" during # collection skip_exceptions = (Skipped,) class CollectError(Exception): """ an error during collection, contains a custom message. """ def collect(self): """ returns a list of children (items and collectors) for this collection node. """ raise NotImplementedError("abstract") def repr_failure(self, excinfo): """ represent a collection failure. """ if excinfo.errisinstance(self.CollectError): exc = excinfo.value return str(exc.args[0]) return self._repr_failure_py(excinfo, style="short") def _memocollect(self): """ internal helper method to cache results of calling collect(). """ return self._memoizedcall('_collected', lambda: list(self.collect())) def _prunetraceback(self, excinfo): if hasattr(self, 'fspath'): traceback = excinfo.traceback ntraceback = traceback.cut(path=self.fspath) if ntraceback == traceback: ntraceback = ntraceback.cut(excludepath=tracebackcutdir) excinfo.traceback = ntraceback.filter() class FSCollector(Collector): def __init__(self, fspath, parent=None, config=None, session=None): fspath = py.path.local(fspath) # xxx only for test_resultlog.py? name = fspath.basename if parent is not None: rel = fspath.relto(parent.fspath) if rel: name = rel name = name.replace(os.sep, "/") super(FSCollector, self).__init__(name, parent, config, session) self.fspath = fspath def _makeid(self): if self == self.session: return "." relpath = self.session.fspath.bestrelpath(self.fspath) if os.sep != "/": relpath = relpath.replace(os.sep, "/") return relpath class File(FSCollector): """ base class for collecting tests from a file. """ class Item(Node): """ a basic test invocation item. Note that for a single function there might be multiple test invocation items. """ nextitem = None def reportinfo(self): return self.fspath, None, "" @property def location(self): try: return self._location except AttributeError: location = self.reportinfo() # bestrelpath is a quite slow function cache = self.config.__dict__.setdefault("_bestrelpathcache", {}) try: fspath = cache[location[0]] except KeyError: fspath = self.session.fspath.bestrelpath(location[0]) cache[location[0]] = fspath location = (fspath, location[1], str(location[2])) self._location = location return location class NoMatch(Exception): """ raised if matching cannot locate a matching names. """ class Session(FSCollector): class Interrupted(KeyboardInterrupt): """ signals an interrupted test run. """ __module__ = 'builtins' # for py3 def __init__(self, config): FSCollector.__init__(self, py.path.local(), parent=None, config=config, session=self) self.config.pluginmanager.register(self, name="session", prepend=True) self._testsfailed = 0 self.shouldstop = False self.trace = config.trace.root.get("collection") self._norecursepatterns = config.getini("norecursedirs") self.startdir = py.path.local() def pytest_collectstart(self): if self.shouldstop: raise self.Interrupted(self.shouldstop) def pytest_runtest_logreport(self, report): if report.failed and not hasattr(report, 'wasxfail'): self._testsfailed += 1 maxfail = self.config.getvalue("maxfail") if maxfail and self._testsfailed >= maxfail: self.shouldstop = "stopping after %d failures" % ( self._testsfailed) pytest_collectreport = pytest_runtest_logreport def isinitpath(self, path): return path in self._initialpaths def gethookproxy(self, fspath): return HookProxy(fspath, self.config) def perform_collect(self, args=None, genitems=True): hook = self.config.hook try: items = self._perform_collect(args, genitems) hook.pytest_collection_modifyitems(session=self, config=self.config, items=items) finally: hook.pytest_collection_finish(session=self) return items def _perform_collect(self, args, genitems): if args is None: args = self.config.args self.trace("perform_collect", self, args) self.trace.root.indent += 1 self._notfound = [] self._initialpaths = set() self._initialparts = [] self.items = items = [] for arg in args: parts = self._parsearg(arg) self._initialparts.append(parts) self._initialpaths.add(parts[0]) rep = collect_one_node(self) self.ihook.pytest_collectreport(report=rep) self.trace.root.indent -= 1 if self._notfound: for arg, exc in self._notfound: line = "(no name %r in any of %r)" % (arg, exc.args[0]) raise pytest.UsageError("not found: %s\n%s" %(arg, line)) if not genitems: return rep.result else: if rep.passed: for node in rep.result: self.items.extend(self.genitems(node)) return items def collect(self): for parts in self._initialparts: arg = "::".join(map(str, parts)) self.trace("processing argument", arg) self.trace.root.indent += 1 try: for x in self._collect(arg): yield x except NoMatch: # we are inside a make_report hook so # we cannot directly pass through the exception self._notfound.append((arg, sys.exc_info()[1])) self.trace.root.indent -= 1 break self.trace.root.indent -= 1 def _collect(self, arg): names = self._parsearg(arg) path = names.pop(0) if path.check(dir=1): assert not names, "invalid arg %r" %(arg,) for path in path.visit(fil=lambda x: x.check(file=1), rec=self._recurse, bf=True, sort=True): for x in self._collectfile(path): yield x else: assert path.check(file=1) for x in self.matchnodes(self._collectfile(path), names): yield x def _collectfile(self, path): ihook = self.gethookproxy(path) if not self.isinitpath(path): if ihook.pytest_ignore_collect(path=path, config=self.config): return () return ihook.pytest_collect_file(path=path, parent=self) def _recurse(self, path): ihook = self.gethookproxy(path.dirpath()) if ihook.pytest_ignore_collect(path=path, config=self.config): return for pat in self._norecursepatterns: if path.check(fnmatch=pat): return False ihook = self.gethookproxy(path) ihook.pytest_collect_directory(path=path, parent=self) return True def _tryconvertpyarg(self, x): mod = None path = [os.path.abspath('.')] + sys.path for name in x.split('.'): # ignore anything that's not a proper name here # else something like --pyargs will mess up '.' # since imp.find_module will actually sometimes work for it # but it's supposed to be considered a filesystem path # not a package if name_re.match(name) is None: return x try: fd, mod, type_ = imp.find_module(name, path) except ImportError: return x else: if fd is not None: fd.close() if type_[2] != imp.PKG_DIRECTORY: path = [os.path.dirname(mod)] else: path = [mod] return mod def _parsearg(self, arg): """ return (fspath, names) tuple after checking the file exists. """ arg = str(arg) if self.config.option.pyargs: arg = self._tryconvertpyarg(arg) parts = str(arg).split("::") relpath = parts[0].replace("/", os.sep) path = self.fspath.join(relpath, abs=True) if not path.check(): if self.config.option.pyargs: msg = "file or package not found: " else: msg = "file not found: " raise pytest.UsageError(msg + arg) parts[0] = path return parts def matchnodes(self, matching, names): self.trace("matchnodes", matching, names) self.trace.root.indent += 1 nodes = self._matchnodes(matching, names) num = len(nodes) self.trace("matchnodes finished -> ", num, "nodes") self.trace.root.indent -= 1 if num == 0: raise NoMatch(matching, names[:1]) return nodes def _matchnodes(self, matching, names): if not matching or not names: return matching name = names[0] assert name nextnames = names[1:] resultnodes = [] for node in matching: if isinstance(node, pytest.Item): if not names: resultnodes.append(node) continue assert isinstance(node, pytest.Collector) rep = collect_one_node(node) if rep.passed: has_matched = False for x in rep.result: if x.name == name: resultnodes.extend(self.matchnodes([x], nextnames)) has_matched = True # XXX accept IDs that don't have "()" for class instances if not has_matched and len(rep.result) == 1 and x.name == "()": nextnames.insert(0, name) resultnodes.extend(self.matchnodes([x], nextnames)) node.ihook.pytest_collectreport(report=rep) return resultnodes def genitems(self, node): self.trace("genitems", node) if isinstance(node, pytest.Item): node.ihook.pytest_itemcollected(item=node) yield node else: assert isinstance(node, pytest.Collector) rep = collect_one_node(node) if rep.passed: for subnode in rep.result: for x in self.genitems(subnode): yield x node.ihook.pytest_collectreport(report=rep) pytest-2.5.1/_pytest/helpconfig.py0000664000175000017500000001526312254002202016612 0ustar hpkhpk00000000000000""" version info, help messages, tracing configuration. """ import py import pytest import os, inspect, sys from _pytest.core import varnames def pytest_addoption(parser): group = parser.getgroup('debugconfig') group.addoption('--version', action="store_true", help="display pytest lib version and import information.") group._addoption("-h", "--help", action="store_true", dest="help", help="show help message and configuration info") group._addoption('-p', action="append", dest="plugins", default = [], metavar="name", help="early-load given plugin (multi-allowed).") group.addoption('--traceconfig', '--trace-config', action="store_true", default=False, help="trace considerations of conftest.py files."), group.addoption('--debug', action="store_true", dest="debug", default=False, help="store internal tracing debug information in 'pytestdebug.log'.") def pytest_cmdline_parse(__multicall__): config = __multicall__.execute() if config.option.debug: path = os.path.abspath("pytestdebug.log") f = open(path, 'w') config._debugfile = f f.write("versions pytest-%s, py-%s, python-%s\ncwd=%s\nargs=%s\n\n" %( pytest.__version__, py.__version__, ".".join(map(str, sys.version_info)), os.getcwd(), config._origargs)) config.trace.root.setwriter(f.write) sys.stderr.write("writing pytestdebug information to %s\n" % path) return config @pytest.mark.trylast def pytest_unconfigure(config): if hasattr(config, '_debugfile'): config._debugfile.close() sys.stderr.write("wrote pytestdebug information to %s\n" % config._debugfile.name) config.trace.root.setwriter(None) def pytest_cmdline_main(config): if config.option.version: p = py.path.local(pytest.__file__) sys.stderr.write("This is py.test version %s, imported from %s\n" % (pytest.__version__, p)) plugininfo = getpluginversioninfo(config) if plugininfo: for line in plugininfo: sys.stderr.write(line + "\n") return 0 elif config.option.help: config.do_configure() showhelp(config) config.do_unconfigure() return 0 def showhelp(config): tw = py.io.TerminalWriter() tw.write(config._parser.optparser.format_help()) tw.write(config._parser.optparser.format_epilog(None)) tw.line() tw.line() #tw.sep( "=", "config file settings") tw.line("[pytest] ini-options in the next " "pytest.ini|tox.ini|setup.cfg file:") tw.line() for name in config._parser._ininames: help, type, default = config._parser._inidict[name] if type is None: type = "string" spec = "%s (%s)" % (name, type) line = " %-24s %s" %(spec, help) tw.line(line[:tw.fullwidth]) tw.line() ; tw.line() #tw.sep("=") tw.line("to see available markers type: py.test --markers") tw.line("to see available fixtures type: py.test --fixtures") return tw.line("conftest.py options:") tw.line() conftestitems = sorted(config._parser._conftestdict.items()) for name, help in conftest_options + conftestitems: line = " %-15s %s" %(name, help) tw.line(line[:tw.fullwidth]) tw.line() #tw.sep( "=") conftest_options = [ ('pytest_plugins', 'list of plugin names to load'), ] def getpluginversioninfo(config): lines = [] plugininfo = config.pluginmanager._plugin_distinfo if plugininfo: lines.append("setuptools registered plugins:") for dist, plugin in plugininfo: loc = getattr(plugin, '__file__', repr(plugin)) content = "%s-%s at %s" % (dist.project_name, dist.version, loc) lines.append(" " + content) return lines def pytest_report_header(config): lines = [] if config.option.debug or config.option.traceconfig: lines.append("using: pytest-%s pylib-%s" % (pytest.__version__,py.__version__)) verinfo = getpluginversioninfo(config) if verinfo: lines.extend(verinfo) if config.option.traceconfig: lines.append("active plugins:") items = config.pluginmanager._name2plugin.items() for name, plugin in items: if hasattr(plugin, '__file__'): r = plugin.__file__ else: r = repr(plugin) lines.append(" %-20s: %s" %(name, r)) return lines # ===================================================== # validate plugin syntax and hooks # ===================================================== def pytest_plugin_registered(manager, plugin): methods = collectattr(plugin) hooks = {} for hookspec in manager.hook._hookspecs: hooks.update(collectattr(hookspec)) stringio = py.io.TextIO() def Print(*args): if args: stringio.write(" ".join(map(str, args))) stringio.write("\n") fail = False while methods: name, method = methods.popitem() #print "checking", name if isgenerichook(name): continue if name not in hooks: if not getattr(method, 'optionalhook', False): Print("found unknown hook:", name) fail = True else: #print "checking", method method_args = list(varnames(method)) if '__multicall__' in method_args: method_args.remove('__multicall__') hook = hooks[name] hookargs = varnames(hook) for arg in method_args: if arg not in hookargs: Print("argument %r not available" %(arg, )) Print("actual definition: %s" %(formatdef(method))) Print("available hook arguments: %s" % ", ".join(hookargs)) fail = True break #if not fail: # print "matching hook:", formatdef(method) if fail: name = getattr(plugin, '__name__', plugin) raise PluginValidationError("%s:\n%s" % (name, stringio.getvalue())) class PluginValidationError(Exception): """ plugin failed validation. """ def isgenerichook(name): return name == "pytest_plugins" or \ name.startswith("pytest_funcarg__") def collectattr(obj): methods = {} for apiname in dir(obj): if apiname.startswith("pytest_"): methods[apiname] = getattr(obj, apiname) return methods def formatdef(func): return "%s%s" % ( func.__name__, inspect.formatargspec(*inspect.getargspec(func)) ) pytest-2.5.1/_pytest/unittest.py0000664000175000017500000001450612254002202016352 0ustar hpkhpk00000000000000""" discovery and running of std-library "unittest" style tests. """ import pytest, py import sys # for transfering markers from _pytest.python import transfer_markers def is_unittest(obj): """Is obj a subclass of unittest.TestCase?""" unittest = sys.modules.get('unittest') if unittest is None: return # nobody can have derived unittest.TestCase try: return issubclass(obj, unittest.TestCase) except KeyboardInterrupt: raise except: return False def pytest_pycollect_makeitem(collector, name, obj): if is_unittest(obj): return UnitTestCase(name, parent=collector) class UnitTestCase(pytest.Class): nofuncargs = True # marker for fixturemanger.getfixtureinfo() # to declare that our children do not support funcargs # def setup(self): cls = self.obj if getattr(cls, '__unittest_skip__', False): return # skipped setup = getattr(cls, 'setUpClass', None) if setup is not None: setup() teardown = getattr(cls, 'tearDownClass', None) if teardown is not None: self.addfinalizer(teardown) super(UnitTestCase, self).setup() def collect(self): self.session._fixturemanager.parsefactories(self, unittest=True) loader = py.std.unittest.TestLoader() module = self.getparent(pytest.Module).obj cls = self.obj foundsomething = False for name in loader.getTestCaseNames(self.obj): x = getattr(self.obj, name) funcobj = getattr(x, 'im_func', x) transfer_markers(funcobj, cls, module) yield TestCaseFunction(name, parent=self) foundsomething = True if not foundsomething: runtest = getattr(self.obj, 'runTest', None) if runtest is not None: ut = sys.modules.get("twisted.trial.unittest", None) if ut is None or runtest != ut.TestCase.runTest: yield TestCaseFunction('runTest', parent=self) class TestCaseFunction(pytest.Function): _excinfo = None def setup(self): self._testcase = self.parent.obj(self.name) self._obj = getattr(self._testcase, self.name) if hasattr(self._testcase, 'setup_method'): self._testcase.setup_method(self._obj) if hasattr(self, "_request"): self._request._fillfixtures() def teardown(self): if hasattr(self._testcase, 'teardown_method'): self._testcase.teardown_method(self._obj) def startTest(self, testcase): pass def _addexcinfo(self, rawexcinfo): # unwrap potential exception info (see twisted trial support below) rawexcinfo = getattr(rawexcinfo, '_rawexcinfo', rawexcinfo) try: excinfo = py.code.ExceptionInfo(rawexcinfo) except TypeError: try: try: l = py.std.traceback.format_exception(*rawexcinfo) l.insert(0, "NOTE: Incompatible Exception Representation, " "displaying natively:\n\n") pytest.fail("".join(l), pytrace=False) except (pytest.fail.Exception, KeyboardInterrupt): raise except: pytest.fail("ERROR: Unknown Incompatible Exception " "representation:\n%r" %(rawexcinfo,), pytrace=False) except KeyboardInterrupt: raise except pytest.fail.Exception: excinfo = py.code.ExceptionInfo() self.__dict__.setdefault('_excinfo', []).append(excinfo) def addError(self, testcase, rawexcinfo): self._addexcinfo(rawexcinfo) def addFailure(self, testcase, rawexcinfo): self._addexcinfo(rawexcinfo) def addSkip(self, testcase, reason): try: pytest.skip(reason) except pytest.skip.Exception: self._addexcinfo(sys.exc_info()) def addExpectedFailure(self, testcase, rawexcinfo, reason=""): try: pytest.xfail(str(reason)) except pytest.xfail.Exception: self._addexcinfo(sys.exc_info()) def addUnexpectedSuccess(self, testcase, reason=""): self._unexpectedsuccess = reason def addSuccess(self, testcase): pass def stopTest(self, testcase): pass def runtest(self): self._testcase(result=self) def _prunetraceback(self, excinfo): pytest.Function._prunetraceback(self, excinfo) traceback = excinfo.traceback.filter( lambda x:not x.frame.f_globals.get('__unittest')) if traceback: excinfo.traceback = traceback @pytest.mark.tryfirst def pytest_runtest_makereport(item, call): if isinstance(item, TestCaseFunction): if item._excinfo: call.excinfo = item._excinfo.pop(0) try: del call.result except AttributeError: pass # twisted trial support def pytest_runtest_protocol(item, __multicall__): if isinstance(item, TestCaseFunction): if 'twisted.trial.unittest' in sys.modules: ut = sys.modules['twisted.python.failure'] Failure__init__ = ut.Failure.__init__.im_func check_testcase_implements_trial_reporter() def excstore(self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None): if exc_value is None: self._rawexcinfo = sys.exc_info() else: if exc_type is None: exc_type = type(exc_value) self._rawexcinfo = (exc_type, exc_value, exc_tb) try: Failure__init__(self, exc_value, exc_type, exc_tb, captureVars=captureVars) except TypeError: Failure__init__(self, exc_value, exc_type, exc_tb) ut.Failure.__init__ = excstore try: return __multicall__.execute() finally: ut.Failure.__init__ = Failure__init__ def check_testcase_implements_trial_reporter(done=[]): if done: return from zope.interface import classImplements from twisted.trial.itrial import IReporter classImplements(TestCaseFunction, IReporter) done.append(1) pytest-2.5.1/_pytest/monkeypatch.py0000664000175000017500000001664612254002202017024 0ustar hpkhpk00000000000000""" monkeypatching and mocking functionality. """ import os, sys from py.builtin import _basestring def pytest_funcarg__monkeypatch(request): """The returned ``monkeypatch`` funcarg provides these helper methods to modify objects, dictionaries or os.environ:: monkeypatch.setattr(obj, name, value, raising=True) monkeypatch.delattr(obj, name, raising=True) monkeypatch.setitem(mapping, name, value) monkeypatch.delitem(obj, name, raising=True) monkeypatch.setenv(name, value, prepend=False) monkeypatch.delenv(name, value, raising=True) monkeypatch.syspath_prepend(path) monkeypatch.chdir(path) All modifications will be undone after the requesting test function has finished. The ``raising`` parameter determines if a KeyError or AttributeError will be raised if the set/deletion operation has no target. """ mpatch = monkeypatch() request.addfinalizer(mpatch.undo) return mpatch def derive_importpath(import_path): import pytest if not isinstance(import_path, _basestring) or "." not in import_path: raise TypeError("must be absolute import path string, not %r" % (import_path,)) rest = [] target = import_path while target: try: obj = __import__(target, None, None, "__doc__") except ImportError: if "." not in target: __tracebackhide__ = True pytest.fail("could not import any sub part: %s" % import_path) target, name = target.rsplit(".", 1) rest.append(name) else: assert rest try: while len(rest) > 1: attr = rest.pop() obj = getattr(obj, attr) attr = rest[0] getattr(obj, attr) except AttributeError: __tracebackhide__ = True pytest.fail("object %r has no attribute %r" % (obj, attr)) return attr, obj notset = object() class monkeypatch: """ object keeping a record of setattr/item/env/syspath changes. """ def __init__(self): self._setattr = [] self._setitem = [] self._cwd = None def setattr(self, target, name, value=notset, raising=True): """ set attribute value on target, memorizing the old value. By default raise AttributeError if the attribute did not exist. For convenience you can specify a string as ``target`` which will be interpreted as a dotted import path, with the last part being the attribute name. Example: ``monkeypatch.setattr("os.getcwd", lambda x: "/")`` would set the ``getcwd`` function of the ``os`` module. The ``raising`` value determines if the setattr should fail if the attribute is not already present (defaults to True which means it will raise). """ __tracebackhide__ = True import inspect if value is notset: if not isinstance(target, _basestring): raise TypeError("use setattr(target, name, value) or " "setattr(target, value) with target being a dotted " "import string") value = name name, target = derive_importpath(target) oldval = getattr(target, name, notset) if raising and oldval is notset: raise AttributeError("%r has no attribute %r" %(target, name)) # avoid class descriptors like staticmethod/classmethod if inspect.isclass(target): oldval = target.__dict__.get(name, notset) self._setattr.insert(0, (target, name, oldval)) setattr(target, name, value) def delattr(self, target, name=notset, raising=True): """ delete attribute ``name`` from ``target``, by default raise AttributeError it the attribute did not previously exist. If no ``name`` is specified and ``target`` is a string it will be interpreted as a dotted import path with the last part being the attribute name. If raising is set to false, the attribute is allowed to not pre-exist. """ __tracebackhide__ = True if name is notset: if not isinstance(target, _basestring): raise TypeError("use delattr(target, name) or " "delattr(target) with target being a dotted " "import string") name, target = derive_importpath(target) if not hasattr(target, name): if raising: raise AttributeError(name) else: self._setattr.insert(0, (target, name, getattr(target, name, notset))) delattr(target, name) def setitem(self, dic, name, value): """ set dictionary entry ``name`` to value. """ self._setitem.insert(0, (dic, name, dic.get(name, notset))) dic[name] = value def delitem(self, dic, name, raising=True): """ delete ``name`` from dict, raise KeyError if it doesn't exist.""" if name not in dic: if raising: raise KeyError(name) else: self._setitem.insert(0, (dic, name, dic.get(name, notset))) del dic[name] def setenv(self, name, value, prepend=None): """ set environment variable ``name`` to ``value``. if ``prepend`` is a character, read the current environment variable value and prepend the ``value`` adjoined with the ``prepend`` character.""" value = str(value) if prepend and name in os.environ: value = value + prepend + os.environ[name] self.setitem(os.environ, name, value) def delenv(self, name, raising=True): """ delete ``name`` from environment, raise KeyError it not exists.""" self.delitem(os.environ, name, raising=raising) def syspath_prepend(self, path): """ prepend ``path`` to ``sys.path`` list of import locations. """ if not hasattr(self, '_savesyspath'): self._savesyspath = sys.path[:] sys.path.insert(0, str(path)) def chdir(self, path): """ change the current working directory to the specified path path can be a string or a py.path.local object """ if self._cwd is None: self._cwd = os.getcwd() if hasattr(path, "chdir"): path.chdir() else: os.chdir(path) def undo(self): """ undo previous changes. This call consumes the undo stack. Calling it a second time has no effect unless you do more monkeypatching after the undo call.""" for obj, name, value in self._setattr: if value is not notset: setattr(obj, name, value) else: delattr(obj, name) self._setattr[:] = [] for dictionary, name, value in self._setitem: if value is notset: try: del dictionary[name] except KeyError: pass # was already deleted, so we have the desired state else: dictionary[name] = value self._setitem[:] = [] if hasattr(self, '_savesyspath'): sys.path[:] = self._savesyspath del self._savesyspath if self._cwd is not None: os.chdir(self._cwd) self._cwd = None pytest-2.5.1/_pytest/pytester.py0000664000175000017500000005743612254002202016363 0ustar hpkhpk00000000000000""" (disabled by default) support for testing py.test and py.test plugins. """ import py, pytest import sys, os import codecs import re import time from fnmatch import fnmatch from _pytest.main import Session, EXIT_OK from py.builtin import print_ from _pytest.core import HookRelay def get_public_names(l): """Only return names from iterator l without a leading underscore.""" return [x for x in l if x[0] != "_"] def pytest_addoption(parser): group = parser.getgroup("pylib") group.addoption('--no-tools-on-path', action="store_true", dest="notoolsonpath", default=False, help=("discover tools on PATH instead of going through py.cmdline.") ) def pytest_configure(config): # This might be called multiple times. Only take the first. global _pytest_fullpath try: _pytest_fullpath except NameError: _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc")) _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py") def pytest_funcarg___pytest(request): return PytestArg(request) class PytestArg: def __init__(self, request): self.request = request def gethookrecorder(self, hook): hookrecorder = HookRecorder(hook._pm) hookrecorder.start_recording(hook._hookspecs) self.request.addfinalizer(hookrecorder.finish_recording) return hookrecorder class ParsedCall: def __init__(self, name, locals): assert '_name' not in locals self.__dict__.update(locals) self.__dict__.pop('self') self._name = name def __repr__(self): d = self.__dict__.copy() del d['_name'] return "" %(self._name, d) class HookRecorder: def __init__(self, pluginmanager): self._pluginmanager = pluginmanager self.calls = [] self._recorders = {} def start_recording(self, hookspecs): if not isinstance(hookspecs, (list, tuple)): hookspecs = [hookspecs] for hookspec in hookspecs: assert hookspec not in self._recorders class RecordCalls: _recorder = self for name, method in vars(hookspec).items(): if name[0] != "_": setattr(RecordCalls, name, self._makecallparser(method)) recorder = RecordCalls() self._recorders[hookspec] = recorder self._pluginmanager.register(recorder) self.hook = HookRelay(hookspecs, pm=self._pluginmanager, prefix="pytest_") def finish_recording(self): for recorder in self._recorders.values(): if self._pluginmanager.isregistered(recorder): self._pluginmanager.unregister(recorder) self._recorders.clear() def _makecallparser(self, method): name = method.__name__ args, varargs, varkw, default = py.std.inspect.getargspec(method) if not args or args[0] != "self": args.insert(0, 'self') fspec = py.std.inspect.formatargspec(args, varargs, varkw, default) # we use exec because we want to have early type # errors on wrong input arguments, using # *args/**kwargs delays this and gives errors # elsewhere exec (py.code.compile(""" def %(name)s%(fspec)s: self._recorder.calls.append( ParsedCall(%(name)r, locals())) """ % locals())) return locals()[name] def getcalls(self, names): if isinstance(names, str): names = names.split() for name in names: for cls in self._recorders: if name in vars(cls): break else: raise ValueError("callname %r not found in %r" %( name, self._recorders.keys())) l = [] for call in self.calls: if call._name in names: l.append(call) return l def contains(self, entries): __tracebackhide__ = True i = 0 entries = list(entries) backlocals = py.std.sys._getframe(1).f_locals while entries: name, check = entries.pop(0) for ind, call in enumerate(self.calls[i:]): if call._name == name: print_("NAMEMATCH", name, call) if eval(check, backlocals, call.__dict__): print_("CHECKERMATCH", repr(check), "->", call) else: print_("NOCHECKERMATCH", repr(check), "-", call) continue i += ind + 1 break print_("NONAMEMATCH", name, "with", call) else: py.test.fail("could not find %r check %r" % (name, check)) def popcall(self, name): __tracebackhide__ = True for i, call in enumerate(self.calls): if call._name == name: del self.calls[i] return call lines = ["could not find call %r, in:" % (name,)] lines.extend([" %s" % str(x) for x in self.calls]) py.test.fail("\n".join(lines)) def getcall(self, name): l = self.getcalls(name) assert len(l) == 1, (name, l) return l[0] def pytest_funcarg__linecomp(request): return LineComp() def pytest_funcarg__LineMatcher(request): return LineMatcher def pytest_funcarg__testdir(request): tmptestdir = TmpTestdir(request) return tmptestdir rex_outcome = re.compile("(\d+) (\w+)") class RunResult: def __init__(self, ret, outlines, errlines, duration): self.ret = ret self.outlines = outlines self.errlines = errlines self.stdout = LineMatcher(outlines) self.stderr = LineMatcher(errlines) self.duration = duration def parseoutcomes(self): for line in reversed(self.outlines): if 'seconds' in line: outcomes = rex_outcome.findall(line) if outcomes: d = {} for num, cat in outcomes: d[cat] = int(num) return d class TmpTestdir: def __init__(self, request): self.request = request self.Config = request.config.__class__ self._pytest = request.getfuncargvalue("_pytest") # XXX remove duplication with tmpdir plugin basetmp = request.config._tmpdirhandler.ensuretemp("testdir") name = request.function.__name__ for i in range(100): try: tmpdir = basetmp.mkdir(name + str(i)) except py.error.EEXIST: continue break self.tmpdir = tmpdir self.plugins = [] self._syspathremove = [] self.chdir() # always chdir self.request.addfinalizer(self.finalize) def __repr__(self): return "" % (self.tmpdir,) def finalize(self): for p in self._syspathremove: py.std.sys.path.remove(p) if hasattr(self, '_olddir'): self._olddir.chdir() # delete modules that have been loaded from tmpdir for name, mod in list(sys.modules.items()): if mod: fn = getattr(mod, '__file__', None) if fn and fn.startswith(str(self.tmpdir)): del sys.modules[name] def getreportrecorder(self, obj): if hasattr(obj, 'config'): obj = obj.config if hasattr(obj, 'hook'): obj = obj.hook assert hasattr(obj, '_hookspecs'), obj reprec = ReportRecorder(obj) reprec.hookrecorder = self._pytest.gethookrecorder(obj) reprec.hook = reprec.hookrecorder.hook return reprec def chdir(self): old = self.tmpdir.chdir() if not hasattr(self, '_olddir'): self._olddir = old def _makefile(self, ext, args, kwargs): items = list(kwargs.items()) if args: source = py.builtin._totext("\n").join( map(py.builtin._totext, args)) + py.builtin._totext("\n") basename = self.request.function.__name__ items.insert(0, (basename, source)) ret = None for name, value in items: p = self.tmpdir.join(name).new(ext=ext) source = py.builtin._totext(py.code.Source(value)).strip() content = source.encode("utf-8") # + "\n" #content = content.rstrip() + "\n" p.write(content, "wb") if ret is None: ret = p return ret def makefile(self, ext, *args, **kwargs): return self._makefile(ext, args, kwargs) def makeconftest(self, source): return self.makepyfile(conftest=source) def makeini(self, source): return self.makefile('.ini', tox=source) def getinicfg(self, source): p = self.makeini(source) return py.iniconfig.IniConfig(p)['pytest'] def makepyfile(self, *args, **kwargs): return self._makefile('.py', args, kwargs) def maketxtfile(self, *args, **kwargs): return self._makefile('.txt', args, kwargs) def syspathinsert(self, path=None): if path is None: path = self.tmpdir py.std.sys.path.insert(0, str(path)) self._syspathremove.append(str(path)) def mkdir(self, name): return self.tmpdir.mkdir(name) def mkpydir(self, name): p = self.mkdir(name) p.ensure("__init__.py") return p Session = Session def getnode(self, config, arg): session = Session(config) assert '::' not in str(arg) p = py.path.local(arg) x = session.fspath.bestrelpath(p) config.hook.pytest_sessionstart(session=session) res = session.perform_collect([x], genitems=False)[0] config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) return res def getpathnode(self, path): config = self.parseconfigure(path) session = Session(config) x = session.fspath.bestrelpath(path) config.hook.pytest_sessionstart(session=session) res = session.perform_collect([x], genitems=False)[0] config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) return res def genitems(self, colitems): session = colitems[0].session result = [] for colitem in colitems: result.extend(session.genitems(colitem)) return result def runitem(self, source): # used from runner functional tests item = self.getitem(source) # the test class where we are called from wants to provide the runner testclassinstance = self.request.instance runner = testclassinstance.getrunner() return runner(item) def inline_runsource(self, source, *cmdlineargs): p = self.makepyfile(source) l = list(cmdlineargs) + [p] return self.inline_run(*l) def inline_runsource1(self, *args): args = list(args) source = args.pop() p = self.makepyfile(source) l = list(args) + [p] reprec = self.inline_run(*l) reports = reprec.getreports("pytest_runtest_logreport") assert len(reports) == 3, reports # setup/call/teardown return reports[1] def inline_genitems(self, *args): return self.inprocess_run(list(args) + ['--collectonly']) def inline_run(self, *args): items, rec = self.inprocess_run(args) return rec def inprocess_run(self, args, plugins=None): rec = [] items = [] class Collect: def pytest_configure(x, config): rec.append(self.getreportrecorder(config)) def pytest_itemcollected(self, item): items.append(item) if not plugins: plugins = [] plugins.append(Collect()) ret = pytest.main(list(args), plugins=plugins) reprec = rec[0] reprec.ret = ret assert len(rec) == 1 return items, reprec def parseconfig(self, *args): args = [str(x) for x in args] for x in args: if str(x).startswith('--basetemp'): break else: args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp')) import _pytest.config config = _pytest.config._prepareconfig(args, self.plugins) # we don't know what the test will do with this half-setup config # object and thus we make sure it gets unconfigured properly in any # case (otherwise capturing could still be active, for example) def ensure_unconfigure(): if hasattr(config.pluginmanager, "_config"): config.pluginmanager.do_unconfigure(config) config.pluginmanager.ensure_shutdown() self.request.addfinalizer(ensure_unconfigure) return config def parseconfigure(self, *args): config = self.parseconfig(*args) config.do_configure() self.request.addfinalizer(lambda: config.do_unconfigure()) return config def getitem(self, source, funcname="test_func"): items = self.getitems(source) for item in items: if item.name == funcname: return item assert 0, "%r item not found in module:\n%s\nitems: %s" %( funcname, source, items) def getitems(self, source): modcol = self.getmodulecol(source) return self.genitems([modcol]) def getmodulecol(self, source, configargs=(), withinit=False): kw = {self.request.function.__name__: py.code.Source(source).strip()} path = self.makepyfile(**kw) if withinit: self.makepyfile(__init__ = "#") self.config = config = self.parseconfigure(path, *configargs) node = self.getnode(config, path) return node def collect_by_name(self, modcol, name): for colitem in modcol._memocollect(): if colitem.name == name: return colitem def popen(self, cmdargs, stdout, stderr, **kw): env = os.environ.copy() env['PYTHONPATH'] = os.pathsep.join(filter(None, [ str(os.getcwd()), env.get('PYTHONPATH', '')])) kw['env'] = env #print "env", env return py.std.subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) def run(self, *cmdargs): return self._run(*cmdargs) def _run(self, *cmdargs): cmdargs = [str(x) for x in cmdargs] p1 = self.tmpdir.join("stdout") p2 = self.tmpdir.join("stderr") print_("running", cmdargs, "curdir=", py.path.local()) f1 = codecs.open(str(p1), "w", encoding="utf8") f2 = codecs.open(str(p2), "w", encoding="utf8") try: now = time.time() popen = self.popen(cmdargs, stdout=f1, stderr=f2, close_fds=(sys.platform != "win32")) ret = popen.wait() finally: f1.close() f2.close() f1 = codecs.open(str(p1), "r", encoding="utf8") f2 = codecs.open(str(p2), "r", encoding="utf8") try: out = f1.read().splitlines() err = f2.read().splitlines() finally: f1.close() f2.close() self._dump_lines(out, sys.stdout) self._dump_lines(err, sys.stderr) return RunResult(ret, out, err, time.time()-now) def _dump_lines(self, lines, fp): try: for line in lines: py.builtin.print_(line, file=fp) except UnicodeEncodeError: print("couldn't print to %s because of encoding" % (fp,)) def runpybin(self, scriptname, *args): fullargs = self._getpybinargs(scriptname) + args return self.run(*fullargs) def _getpybinargs(self, scriptname): if not self.request.config.getvalue("notoolsonpath"): # XXX we rely on script refering to the correct environment # we cannot use "(py.std.sys.executable,script)" # becaue on windows the script is e.g. a py.test.exe return (py.std.sys.executable, _pytest_fullpath,) # noqa else: py.test.skip("cannot run %r with --no-tools-on-path" % scriptname) def runpython(self, script, prepend=True): if prepend: s = self._getsysprepend() if s: script.write(s + "\n" + script.read()) return self.run(sys.executable, script) def _getsysprepend(self): if self.request.config.getvalue("notoolsonpath"): s = "import sys;sys.path.insert(0,%r);" % str(py._pydir.dirpath()) else: s = "" return s def runpython_c(self, command): command = self._getsysprepend() + command return self.run(py.std.sys.executable, "-c", command) def runpytest(self, *args): p = py.path.local.make_numbered_dir(prefix="runpytest-", keep=None, rootdir=self.tmpdir) args = ('--basetemp=%s' % p, ) + args #for x in args: # if '--confcutdir' in str(x): # break #else: # pass # args = ('--confcutdir=.',) + args plugins = [x for x in self.plugins if isinstance(x, str)] if plugins: args = ('-p', plugins[0]) + args return self.runpybin("py.test", *args) def spawn_pytest(self, string, expect_timeout=10.0): if self.request.config.getvalue("notoolsonpath"): py.test.skip("--no-tools-on-path prevents running pexpect-spawn tests") basetemp = self.tmpdir.mkdir("pexpect") invoke = " ".join(map(str, self._getpybinargs("py.test"))) cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string) return self.spawn(cmd, expect_timeout=expect_timeout) def spawn(self, cmd, expect_timeout=10.0): pexpect = py.test.importorskip("pexpect", "3.0") if hasattr(sys, 'pypy_version_info') and '64' in py.std.platform.machine(): pytest.skip("pypy-64 bit not supported") if sys.platform == "darwin": pytest.xfail("pexpect does not work reliably on darwin?!") if sys.platform.startswith("freebsd"): pytest.xfail("pexpect does not work reliably on freebsd") logfile = self.tmpdir.join("spawn.out").open("wb") child = pexpect.spawn(cmd, logfile=logfile) self.request.addfinalizer(logfile.close) child.timeout = expect_timeout return child def getdecoded(out): try: return out.decode("utf-8") except UnicodeDecodeError: return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % ( py.io.saferepr(out),) class ReportRecorder(object): def __init__(self, hook): self.hook = hook self.pluginmanager = hook._pm self.pluginmanager.register(self) def getcall(self, name): return self.hookrecorder.getcall(name) def popcall(self, name): return self.hookrecorder.popcall(name) def getcalls(self, names): """ return list of ParsedCall instances matching the given eventname. """ return self.hookrecorder.getcalls(names) # functionality for test reports def getreports(self, names="pytest_runtest_logreport pytest_collectreport"): return [x.report for x in self.getcalls(names)] def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport", when=None): """ return a testreport whose dotted import path matches """ l = [] for rep in self.getreports(names=names): try: if not when and rep.when != "call" and rep.passed: # setup/teardown passing reports - let's ignore those continue except AttributeError: pass if when and getattr(rep, 'when', None) != when: continue if not inamepart or inamepart in rep.nodeid.split("::"): l.append(rep) if not l: raise ValueError("could not find test report matching %r: no test reports at all!" % (inamepart,)) if len(l) > 1: raise ValueError("found more than one testreport matching %r: %s" %( inamepart, l)) return l[0] def getfailures(self, names='pytest_runtest_logreport pytest_collectreport'): return [rep for rep in self.getreports(names) if rep.failed] def getfailedcollections(self): return self.getfailures('pytest_collectreport') def listoutcomes(self): passed = [] skipped = [] failed = [] for rep in self.getreports( "pytest_collectreport pytest_runtest_logreport"): if rep.passed: if getattr(rep, "when", None) == "call": passed.append(rep) elif rep.skipped: skipped.append(rep) elif rep.failed: failed.append(rep) return passed, skipped, failed def countoutcomes(self): return [len(x) for x in self.listoutcomes()] def assertoutcome(self, passed=0, skipped=0, failed=0): realpassed, realskipped, realfailed = self.listoutcomes() assert passed == len(realpassed) assert skipped == len(realskipped) assert failed == len(realfailed) def clear(self): self.hookrecorder.calls[:] = [] def unregister(self): self.pluginmanager.unregister(self) self.hookrecorder.finish_recording() class LineComp: def __init__(self): self.stringio = py.io.TextIO() def assert_contains_lines(self, lines2): """ assert that lines2 are contained (linearly) in lines1. return a list of extralines found. """ __tracebackhide__ = True val = self.stringio.getvalue() self.stringio.truncate(0) self.stringio.seek(0) lines1 = val.split("\n") return LineMatcher(lines1).fnmatch_lines(lines2) class LineMatcher: def __init__(self, lines): self.lines = lines def str(self): return "\n".join(self.lines) def _getlines(self, lines2): if isinstance(lines2, str): lines2 = py.code.Source(lines2) if isinstance(lines2, py.code.Source): lines2 = lines2.strip().lines return lines2 def fnmatch_lines_random(self, lines2): lines2 = self._getlines(lines2) for line in lines2: for x in self.lines: if line == x or fnmatch(x, line): print_("matched: ", repr(line)) break else: raise ValueError("line %r not found in output" % line) def get_lines_after(self, fnline): for i, line in enumerate(self.lines): if fnline == line or fnmatch(line, fnline): return self.lines[i+1:] raise ValueError("line %r not found in output" % fnline) def fnmatch_lines(self, lines2): def show(arg1, arg2): py.builtin.print_(arg1, arg2, file=py.std.sys.stderr) lines2 = self._getlines(lines2) lines1 = self.lines[:] nextline = None extralines = [] __tracebackhide__ = True for line in lines2: nomatchprinted = False while lines1: nextline = lines1.pop(0) if line == nextline: show("exact match:", repr(line)) break elif fnmatch(nextline, line): show("fnmatch:", repr(line)) show(" with:", repr(nextline)) break else: if not nomatchprinted: show("nomatch:", repr(line)) nomatchprinted = True show(" and:", repr(nextline)) extralines.append(nextline) else: py.test.fail("remains unmatched: %r, see stderr" % (line,)) pytest-2.5.1/_pytest/resultlog.py0000664000175000017500000000650012254002202016506 0ustar hpkhpk00000000000000""" log machine-parseable test session result information in a plain text file. """ import py def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "resultlog plugin options") group.addoption('--resultlog', '--result-log', action="store", metavar="path", default=None, help="path for machine-readable result log.") def pytest_configure(config): resultlog = config.option.resultlog # prevent opening resultlog on slave nodes (xdist) if resultlog and not hasattr(config, 'slaveinput'): logfile = open(resultlog, 'w', 1) # line buffered config._resultlog = ResultLog(config, logfile) config.pluginmanager.register(config._resultlog) def pytest_unconfigure(config): resultlog = getattr(config, '_resultlog', None) if resultlog: resultlog.logfile.close() del config._resultlog config.pluginmanager.unregister(resultlog) def generic_path(item): chain = item.listchain() gpath = [chain[0].name] fspath = chain[0].fspath fspart = False for node in chain[1:]: newfspath = node.fspath if newfspath == fspath: if fspart: gpath.append(':') fspart = False else: gpath.append('.') else: gpath.append('/') fspart = True name = node.name if name[0] in '([': gpath.pop() gpath.append(name) fspath = newfspath return ''.join(gpath) class ResultLog(object): def __init__(self, config, logfile): self.config = config self.logfile = logfile # preferably line buffered def write_log_entry(self, testpath, lettercode, longrepr): py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile) for line in longrepr.splitlines(): py.builtin.print_(" %s" % line, file=self.logfile) def log_outcome(self, report, lettercode, longrepr): testpath = getattr(report, 'nodeid', None) if testpath is None: testpath = report.fspath self.write_log_entry(testpath, lettercode, longrepr) def pytest_runtest_logreport(self, report): if report.when != "call" and report.passed: return res = self.config.hook.pytest_report_teststatus(report=report) code = res[1] if code == 'x': longrepr = str(report.longrepr) elif code == 'X': longrepr = '' elif report.passed: longrepr = "" elif report.failed: longrepr = str(report.longrepr) elif report.skipped: longrepr = str(report.longrepr[2]) self.log_outcome(report, code, longrepr) def pytest_collectreport(self, report): if not report.passed: if report.failed: code = "F" longrepr = str(report.longrepr) else: assert report.skipped code = "S" longrepr = "%s:%d: %s" % report.longrepr self.log_outcome(report, code, longrepr) def pytest_internalerror(self, excrepr): reprcrash = getattr(excrepr, 'reprcrash', None) path = getattr(reprcrash, "path", None) if path is None: path = "cwd:%s" % py.path.local() self.write_log_entry(path, '!', str(excrepr)) pytest-2.5.1/_pytest/hookspec.py0000664000175000017500000002336612254002202016312 0ustar hpkhpk00000000000000""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """ # ------------------------------------------------------------------------- # Initialization # ------------------------------------------------------------------------- def pytest_addhooks(pluginmanager): """called at plugin load time to allow adding new hooks via a call to pluginmanager.registerhooks(module).""" def pytest_namespace(): """return dict of name->object to be made globally available in the py.test/pytest namespace. This hook is called before command line options are parsed. """ def pytest_cmdline_parse(pluginmanager, args): """return initialized config object, parsing the specified args. """ pytest_cmdline_parse.firstresult = True def pytest_cmdline_preparse(config, args): """(deprecated) modify command line arguments before option parsing. """ def pytest_addoption(parser): """register argparse-style options and ini-style config values. This function must be implemented in a :ref:`plugin ` and is called once at the beginning of a test run. :arg parser: To add command line options, call :py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`. To add ini-file values call :py:func:`parser.addini(...) <_pytest.config.Parser.addini>`. Options can later be accessed through the :py:class:`config <_pytest.config.Config>` object, respectively: - :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to retrieve the value of a command line option. - :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve a value read from an ini-style file. The config object is passed around on many internal objects via the ``.config`` attribute or can be retrieved as the ``pytestconfig`` fixture or accessed via (deprecated) ``pytest.config``. """ def pytest_cmdline_main(config): """ called for performing the main command line action. The default implementation will invoke the configure hooks and runtest_mainloop. """ pytest_cmdline_main.firstresult = True def pytest_load_initial_conftests(args, early_config, parser): """ implements loading initial conftests. """ def pytest_configure(config): """ called after command line options have been parsed and all plugins and initial conftest files been loaded. """ def pytest_unconfigure(config): """ called before test process is exited. """ def pytest_runtestloop(session): """ called for performing the main runtest loop (after collection finished). """ pytest_runtestloop.firstresult = True # ------------------------------------------------------------------------- # collection hooks # ------------------------------------------------------------------------- def pytest_collection(session): """ perform the collection protocol for the given session. """ pytest_collection.firstresult = True def pytest_collection_modifyitems(session, config, items): """ called after collection has been performed, may filter or re-order the items in-place.""" def pytest_collection_finish(session): """ called after collection has been performed and modified. """ def pytest_ignore_collect(path, config): """ return True to prevent considering this path for collection. This hook is consulted for all files and directories prior to calling more specific hooks. """ pytest_ignore_collect.firstresult = True def pytest_collect_directory(path, parent): """ called before traversing a directory for collection files. """ pytest_collect_directory.firstresult = True def pytest_collect_file(path, parent): """ return collection Node or None for the given path. Any new node needs to have the specified ``parent`` as a parent.""" # logging hooks for collection def pytest_collectstart(collector): """ collector starts collecting. """ def pytest_itemcollected(item): """ we just collected a test item. """ def pytest_collectreport(report): """ collector finished collecting. """ def pytest_deselected(items): """ called for test items deselected by keyword. """ def pytest_make_collect_report(collector): """ perform ``collector.collect()`` and return a CollectReport. """ pytest_make_collect_report.firstresult = True # ------------------------------------------------------------------------- # Python test function related hooks # ------------------------------------------------------------------------- def pytest_pycollect_makemodule(path, parent): """ return a Module collector or None for the given path. This hook will be called for each matching test module path. The pytest_collect_file hook needs to be used if you want to create test modules for files that do not match as a test module. """ pytest_pycollect_makemodule.firstresult = True def pytest_pycollect_makeitem(collector, name, obj): """ return custom item/collector for a python object in a module, or None. """ pytest_pycollect_makeitem.firstresult = True def pytest_pyfunc_call(pyfuncitem): """ call underlying test function. """ pytest_pyfunc_call.firstresult = True def pytest_generate_tests(metafunc): """ generate (multiple) parametrized calls to a test function.""" # ------------------------------------------------------------------------- # generic runtest related hooks # ------------------------------------------------------------------------- def pytest_itemstart(item, node=None): """ (deprecated, use pytest_runtest_logstart). """ def pytest_runtest_protocol(item, nextitem): """ implements the runtest_setup/call/teardown protocol for the given test item, including capturing exceptions and calling reporting hooks. :arg item: test item for which the runtest protocol is performed. :arg nexitem: the scheduled-to-be-next test item (or None if this is the end my friend). This argument is passed on to :py:func:`pytest_runtest_teardown`. :return boolean: True if no further hook implementations should be invoked. """ pytest_runtest_protocol.firstresult = True def pytest_runtest_logstart(nodeid, location): """ signal the start of running a single test item. """ def pytest_runtest_setup(item): """ called before ``pytest_runtest_call(item)``. """ def pytest_runtest_call(item): """ called to execute the test ``item``. """ def pytest_runtest_teardown(item, nextitem): """ called after ``pytest_runtest_call``. :arg nexitem: the scheduled-to-be-next test item (None if no further test item is scheduled). This argument can be used to perform exact teardowns, i.e. calling just enough finalizers so that nextitem only needs to call setup-functions. """ def pytest_runtest_makereport(item, call): """ return a :py:class:`_pytest.runner.TestReport` object for the given :py:class:`pytest.Item` and :py:class:`_pytest.runner.CallInfo`. """ pytest_runtest_makereport.firstresult = True def pytest_runtest_logreport(report): """ process a test setup/call/teardown report relating to the respective phase of executing a test. """ # ------------------------------------------------------------------------- # test session related hooks # ------------------------------------------------------------------------- def pytest_sessionstart(session): """ before session.main() is called. """ def pytest_sessionfinish(session, exitstatus): """ whole test run finishes. """ # ------------------------------------------------------------------------- # hooks for customising the assert methods # ------------------------------------------------------------------------- def pytest_assertrepr_compare(config, op, left, right): """return explanation for comparisons in failing assert expressions. Return None for no custom explanation, otherwise return a list of strings. The strings will be joined by newlines but any newlines *in* a string will be escaped. Note that all but the first line will be indented sligthly, the intention is for the first line to be a summary. """ # ------------------------------------------------------------------------- # hooks for influencing reporting (invoked from _pytest_terminal) # ------------------------------------------------------------------------- def pytest_report_header(config, startdir): """ return a string to be displayed as header info for terminal reporting.""" def pytest_report_teststatus(report): """ return result-category, shortletter and verbose word for reporting.""" pytest_report_teststatus.firstresult = True def pytest_terminal_summary(terminalreporter): """ add additional section in terminal summary reporting. """ # ------------------------------------------------------------------------- # doctest hooks # ------------------------------------------------------------------------- def pytest_doctest_prepare_content(content): """ return processed content for a given doctest""" pytest_doctest_prepare_content.firstresult = True # ------------------------------------------------------------------------- # error handling and internal debugging hooks # ------------------------------------------------------------------------- def pytest_plugin_registered(plugin, manager): """ a new pytest plugin got registered. """ def pytest_internalerror(excrepr, excinfo): """ called for internal errors. """ def pytest_keyboard_interrupt(excinfo): """ called for keyboard interrupt. """ def pytest_exception_interact(node, call, report): """ (experimental, new in 2.4) called when an exception was raised which can potentially be interactively handled. This hook is only called if an exception was raised that is not an internal exception like "skip.Exception". """ pytest-2.5.1/_pytest/capture.py0000664000175000017500000002024312254002202016131 0ustar hpkhpk00000000000000""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """ import pytest, py import sys import os def pytest_addoption(parser): group = parser.getgroup("general") group._addoption('--capture', action="store", default=None, metavar="method", choices=['fd', 'sys', 'no'], help="per-test capturing method: one of fd (default)|sys|no.") group._addoption('-s', action="store_const", const="no", dest="capture", help="shortcut for --capture=no.") @pytest.mark.tryfirst def pytest_load_initial_conftests(early_config, parser, args, __multicall__): ns = parser.parse_known_args(args) method = ns.capture if not method: method = "fd" if method == "fd" and not hasattr(os, "dup"): method = "sys" capman = CaptureManager(method) early_config.pluginmanager.register(capman, "capturemanager") # make sure that capturemanager is properly reset at final shutdown def teardown(): try: capman.reset_capturings() except ValueError: pass early_config.pluginmanager.add_shutdown(teardown) # make sure logging does not raise exceptions at the end def silence_logging_at_shutdown(): if "logging" in sys.modules: sys.modules["logging"].raiseExceptions = False early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown) # finally trigger conftest loading but while capturing (issue93) capman.resumecapture() try: try: return __multicall__.execute() finally: out, err = capman.suspendcapture() except: sys.stdout.write(out) sys.stderr.write(err) raise def addouterr(rep, outerr): for secname, content in zip(["out", "err"], outerr): if content: rep.sections.append(("Captured std%s" % secname, content)) class NoCapture: def startall(self): pass def resume(self): pass def reset(self): pass def suspend(self): return "", "" class CaptureManager: def __init__(self, defaultmethod=None): self._method2capture = {} self._defaultmethod = defaultmethod def _maketempfile(self): f = py.std.tempfile.TemporaryFile() newf = py.io.dupfile(f, encoding="UTF-8") f.close() return newf def _makestringio(self): return py.io.TextIO() def _getcapture(self, method): if method == "fd": return py.io.StdCaptureFD(now=False, out=self._maketempfile(), err=self._maketempfile() ) elif method == "sys": return py.io.StdCapture(now=False, out=self._makestringio(), err=self._makestringio() ) elif method == "no": return NoCapture() else: raise ValueError("unknown capturing method: %r" % method) def _getmethod(self, config, fspath): if config.option.capture: method = config.option.capture else: try: method = config._conftest.rget("option_capture", path=fspath) except KeyError: method = "fd" if method == "fd" and not hasattr(os, 'dup'): # e.g. jython method = "sys" return method def reset_capturings(self): for name, cap in self._method2capture.items(): cap.reset() def resumecapture_item(self, item): method = self._getmethod(item.config, item.fspath) if not hasattr(item, 'outerr'): item.outerr = ('', '') # we accumulate outerr on the item return self.resumecapture(method) def resumecapture(self, method=None): if hasattr(self, '_capturing'): raise ValueError("cannot resume, already capturing with %r" % (self._capturing,)) if method is None: method = self._defaultmethod cap = self._method2capture.get(method) self._capturing = method if cap is None: self._method2capture[method] = cap = self._getcapture(method) cap.startall() else: cap.resume() def suspendcapture(self, item=None): self.deactivate_funcargs() if hasattr(self, '_capturing'): method = self._capturing cap = self._method2capture.get(method) if cap is not None: outerr = cap.suspend() del self._capturing if item: outerr = (item.outerr[0] + outerr[0], item.outerr[1] + outerr[1]) return outerr if hasattr(item, 'outerr'): return item.outerr return "", "" def activate_funcargs(self, pyfuncitem): funcargs = getattr(pyfuncitem, "funcargs", None) if funcargs is not None: for name, capfuncarg in funcargs.items(): if name in ('capsys', 'capfd'): assert not hasattr(self, '_capturing_funcarg') self._capturing_funcarg = capfuncarg capfuncarg._start() def deactivate_funcargs(self): capturing_funcarg = getattr(self, '_capturing_funcarg', None) if capturing_funcarg: outerr = capturing_funcarg._finalize() del self._capturing_funcarg return outerr def pytest_make_collect_report(self, __multicall__, collector): method = self._getmethod(collector.config, collector.fspath) try: self.resumecapture(method) except ValueError: return # recursive collect, XXX refactor capturing # to allow for more lightweight recursive capturing try: rep = __multicall__.execute() finally: outerr = self.suspendcapture() addouterr(rep, outerr) return rep @pytest.mark.tryfirst def pytest_runtest_setup(self, item): self.resumecapture_item(item) @pytest.mark.tryfirst def pytest_runtest_call(self, item): self.resumecapture_item(item) self.activate_funcargs(item) @pytest.mark.tryfirst def pytest_runtest_teardown(self, item): self.resumecapture_item(item) def pytest_keyboard_interrupt(self, excinfo): if hasattr(self, '_capturing'): self.suspendcapture() @pytest.mark.tryfirst def pytest_runtest_makereport(self, __multicall__, item, call): funcarg_outerr = self.deactivate_funcargs() rep = __multicall__.execute() outerr = self.suspendcapture(item) if funcarg_outerr is not None: outerr = (outerr[0] + funcarg_outerr[0], outerr[1] + funcarg_outerr[1]) addouterr(rep, outerr) if not rep.passed or rep.when == "teardown": outerr = ('', '') item.outerr = outerr return rep error_capsysfderror = "cannot use capsys and capfd at the same time" def pytest_funcarg__capsys(request): """enables capturing of writes to sys.stdout/sys.stderr and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ if "capfd" in request._funcargs: raise request.raiseerror(error_capsysfderror) return CaptureFixture(py.io.StdCapture) def pytest_funcarg__capfd(request): """enables capturing of writes to file descriptors 1 and 2 and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ if "capsys" in request._funcargs: request.raiseerror(error_capsysfderror) if not hasattr(os, 'dup'): pytest.skip("capfd funcarg needs os.dup") return CaptureFixture(py.io.StdCaptureFD) class CaptureFixture: def __init__(self, captureclass): self.capture = captureclass(now=False) def _start(self): self.capture.startall() def _finalize(self): if hasattr(self, 'capture'): outerr = self._outerr = self.capture.reset() del self.capture return outerr def readouterr(self): try: return self.capture.readouterr() except AttributeError: return self._outerr def close(self): self._finalize() pytest-2.5.1/_pytest/assertion/0000775000175000017500000000000012254002202016122 5ustar hpkhpk00000000000000pytest-2.5.1/_pytest/assertion/newinterpret.py0000664000175000017500000002774212254002202021236 0ustar hpkhpk00000000000000""" Find intermediate evalutation results in assert statements through builtin AST. This should replace oldinterpret.py eventually. """ import sys import ast import py from _pytest.assertion import util from _pytest.assertion.reinterpret import BuiltinAssertionError if sys.platform.startswith("java"): # See http://bugs.jython.org/issue1497 _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", "ListComp", "GeneratorExp", "Yield", "Compare", "Call", "Repr", "Num", "Str", "Attribute", "Subscript", "Name", "List", "Tuple") _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign", "AugAssign", "Print", "For", "While", "If", "With", "Raise", "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom", "Exec", "Global", "Expr", "Pass", "Break", "Continue") _expr_nodes = set(getattr(ast, name) for name in _exprs) _stmt_nodes = set(getattr(ast, name) for name in _stmts) def _is_ast_expr(node): return node.__class__ in _expr_nodes def _is_ast_stmt(node): return node.__class__ in _stmt_nodes else: def _is_ast_expr(node): return isinstance(node, ast.expr) def _is_ast_stmt(node): return isinstance(node, ast.stmt) class Failure(Exception): """Error found while interpreting AST.""" def __init__(self, explanation=""): self.cause = sys.exc_info() self.explanation = explanation def interpret(source, frame, should_fail=False): mod = ast.parse(source) visitor = DebugInterpreter(frame) try: visitor.visit(mod) except Failure: failure = sys.exc_info()[1] return getfailure(failure) if should_fail: return ("(assertion failed, but when it was re-run for " "printing intermediate values, it did not fail. Suggestions: " "compute assert expression before the assert or use --assert=plain)") def run(offending_line, frame=None): if frame is None: frame = py.code.Frame(sys._getframe(1)) return interpret(offending_line, frame) def getfailure(e): explanation = util.format_explanation(e.explanation) value = e.cause[1] if str(value): lines = explanation.split('\n') lines[0] += " << %s" % (value,) explanation = '\n'.join(lines) text = "%s: %s" % (e.cause[0].__name__, explanation) if text.startswith('AssertionError: assert '): text = text[16:] return text operator_map = { ast.BitOr : "|", ast.BitXor : "^", ast.BitAnd : "&", ast.LShift : "<<", ast.RShift : ">>", ast.Add : "+", ast.Sub : "-", ast.Mult : "*", ast.Div : "/", ast.FloorDiv : "//", ast.Mod : "%", ast.Eq : "==", ast.NotEq : "!=", ast.Lt : "<", ast.LtE : "<=", ast.Gt : ">", ast.GtE : ">=", ast.Pow : "**", ast.Is : "is", ast.IsNot : "is not", ast.In : "in", ast.NotIn : "not in" } unary_map = { ast.Not : "not %s", ast.Invert : "~%s", ast.USub : "-%s", ast.UAdd : "+%s" } class DebugInterpreter(ast.NodeVisitor): """Interpret AST nodes to gleam useful debugging information. """ def __init__(self, frame): self.frame = frame def generic_visit(self, node): # Fallback when we don't have a special implementation. if _is_ast_expr(node): mod = ast.Expression(node) co = self._compile(mod) try: result = self.frame.eval(co) except Exception: raise Failure() explanation = self.frame.repr(result) return explanation, result elif _is_ast_stmt(node): mod = ast.Module([node]) co = self._compile(mod, "exec") try: self.frame.exec_(co) except Exception: raise Failure() return None, None else: raise AssertionError("can't handle %s" %(node,)) def _compile(self, source, mode="eval"): return compile(source, "", mode) def visit_Expr(self, expr): return self.visit(expr.value) def visit_Module(self, mod): for stmt in mod.body: self.visit(stmt) def visit_Name(self, name): explanation, result = self.generic_visit(name) # See if the name is local. source = "%r in locals() is not globals()" % (name.id,) co = self._compile(source) try: local = self.frame.eval(co) except Exception: # have to assume it isn't local = None if local is None or not self.frame.is_true(local): return name.id, result return explanation, result def visit_Compare(self, comp): left = comp.left left_explanation, left_result = self.visit(left) for op, next_op in zip(comp.ops, comp.comparators): next_explanation, next_result = self.visit(next_op) op_symbol = operator_map[op.__class__] explanation = "%s %s %s" % (left_explanation, op_symbol, next_explanation) source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,) co = self._compile(source) try: result = self.frame.eval(co, __exprinfo_left=left_result, __exprinfo_right=next_result) except Exception: raise Failure(explanation) try: if not self.frame.is_true(result): break except KeyboardInterrupt: raise except: break left_explanation, left_result = next_explanation, next_result if util._reprcompare is not None: res = util._reprcompare(op_symbol, left_result, next_result) if res: explanation = res return explanation, result def visit_BoolOp(self, boolop): is_or = isinstance(boolop.op, ast.Or) explanations = [] for operand in boolop.values: explanation, result = self.visit(operand) explanations.append(explanation) if result == is_or: break name = is_or and " or " or " and " explanation = "(" + name.join(explanations) + ")" return explanation, result def visit_UnaryOp(self, unary): pattern = unary_map[unary.op.__class__] operand_explanation, operand_result = self.visit(unary.operand) explanation = pattern % (operand_explanation,) co = self._compile(pattern % ("__exprinfo_expr",)) try: result = self.frame.eval(co, __exprinfo_expr=operand_result) except Exception: raise Failure(explanation) return explanation, result def visit_BinOp(self, binop): left_explanation, left_result = self.visit(binop.left) right_explanation, right_result = self.visit(binop.right) symbol = operator_map[binop.op.__class__] explanation = "(%s %s %s)" % (left_explanation, symbol, right_explanation) source = "__exprinfo_left %s __exprinfo_right" % (symbol,) co = self._compile(source) try: result = self.frame.eval(co, __exprinfo_left=left_result, __exprinfo_right=right_result) except Exception: raise Failure(explanation) return explanation, result def visit_Call(self, call): func_explanation, func = self.visit(call.func) arg_explanations = [] ns = {"__exprinfo_func" : func} arguments = [] for arg in call.args: arg_explanation, arg_result = self.visit(arg) arg_name = "__exprinfo_%s" % (len(ns),) ns[arg_name] = arg_result arguments.append(arg_name) arg_explanations.append(arg_explanation) for keyword in call.keywords: arg_explanation, arg_result = self.visit(keyword.value) arg_name = "__exprinfo_%s" % (len(ns),) ns[arg_name] = arg_result keyword_source = "%s=%%s" % (keyword.arg) arguments.append(keyword_source % (arg_name,)) arg_explanations.append(keyword_source % (arg_explanation,)) if call.starargs: arg_explanation, arg_result = self.visit(call.starargs) arg_name = "__exprinfo_star" ns[arg_name] = arg_result arguments.append("*%s" % (arg_name,)) arg_explanations.append("*%s" % (arg_explanation,)) if call.kwargs: arg_explanation, arg_result = self.visit(call.kwargs) arg_name = "__exprinfo_kwds" ns[arg_name] = arg_result arguments.append("**%s" % (arg_name,)) arg_explanations.append("**%s" % (arg_explanation,)) args_explained = ", ".join(arg_explanations) explanation = "%s(%s)" % (func_explanation, args_explained) args = ", ".join(arguments) source = "__exprinfo_func(%s)" % (args,) co = self._compile(source) try: result = self.frame.eval(co, **ns) except Exception: raise Failure(explanation) pattern = "%s\n{%s = %s\n}" rep = self.frame.repr(result) explanation = pattern % (rep, rep, explanation) return explanation, result def _is_builtin_name(self, name): pattern = "%r not in globals() and %r not in locals()" source = pattern % (name.id, name.id) co = self._compile(source) try: return self.frame.eval(co) except Exception: return False def visit_Attribute(self, attr): if not isinstance(attr.ctx, ast.Load): return self.generic_visit(attr) source_explanation, source_result = self.visit(attr.value) explanation = "%s.%s" % (source_explanation, attr.attr) source = "__exprinfo_expr.%s" % (attr.attr,) co = self._compile(source) try: result = self.frame.eval(co, __exprinfo_expr=source_result) except Exception: raise Failure(explanation) explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result), self.frame.repr(result), source_explanation, attr.attr) # Check if the attr is from an instance. source = "%r in getattr(__exprinfo_expr, '__dict__', {})" source = source % (attr.attr,) co = self._compile(source) try: from_instance = self.frame.eval(co, __exprinfo_expr=source_result) except Exception: from_instance = None if from_instance is None or self.frame.is_true(from_instance): rep = self.frame.repr(result) pattern = "%s\n{%s = %s\n}" explanation = pattern % (rep, rep, explanation) return explanation, result def visit_Assert(self, assrt): test_explanation, test_result = self.visit(assrt.test) explanation = "assert %s" % (test_explanation,) if not self.frame.is_true(test_result): try: raise BuiltinAssertionError except Exception: raise Failure(explanation) return explanation, test_result def visit_Assign(self, assign): value_explanation, value_result = self.visit(assign.value) explanation = "... = %s" % (value_explanation,) name = ast.Name("__exprinfo_expr", ast.Load(), lineno=assign.value.lineno, col_offset=assign.value.col_offset) new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno, col_offset=assign.col_offset) mod = ast.Module([new_assign]) co = self._compile(mod, "exec") try: self.frame.exec_(co, __exprinfo_expr=value_result) except Exception: raise Failure(explanation) return explanation, value_result pytest-2.5.1/_pytest/assertion/rewrite.py0000664000175000017500000006151412254002202020164 0ustar hpkhpk00000000000000"""Rewrite assertion AST to produce nice error messages""" import ast import errno import itertools import imp import marshal import os import re import struct import sys import types import py from _pytest.assertion import util # py.test caches rewritten pycs in __pycache__. if hasattr(imp, "get_tag"): PYTEST_TAG = imp.get_tag() + "-PYTEST" else: if hasattr(sys, "pypy_version_info"): impl = "pypy" elif sys.platform == "java": impl = "jython" else: impl = "cpython" ver = sys.version_info PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) del ver, impl PYC_EXT = ".py" + (__debug__ and "c" or "o") PYC_TAIL = "." + PYTEST_TAG + PYC_EXT REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2) ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 class AssertionRewritingHook(object): """PEP302 Import hook which rewrites asserts.""" def __init__(self): self.session = None self.modules = {} self._register_with_pkg_resources() def set_session(self, session): self.fnpats = session.config.getini("python_files") self.session = session def find_module(self, name, path=None): if self.session is None: return None sess = self.session state = sess.config._assertstate state.trace("find_module called for: %s" % name) names = name.rsplit(".", 1) lastname = names[-1] pth = None if path is not None: # Starting with Python 3.3, path is a _NamespacePath(), which # causes problems if not converted to list. path = list(path) if len(path) == 1: pth = path[0] if pth is None: try: fd, fn, desc = imp.find_module(lastname, path) except ImportError: return None if fd is not None: fd.close() tp = desc[2] if tp == imp.PY_COMPILED: if hasattr(imp, "source_from_cache"): fn = imp.source_from_cache(fn) else: fn = fn[:-1] elif tp != imp.PY_SOURCE: # Don't know what this is. return None else: fn = os.path.join(pth, name.rpartition(".")[2] + ".py") fn_pypath = py.path.local(fn) # Is this a test file? if not sess.isinitpath(fn): # We have to be very careful here because imports in this code can # trigger a cycle. self.session = None try: for pat in self.fnpats: if fn_pypath.fnmatch(pat): state.trace("matched test file %r" % (fn,)) break else: return None finally: self.session = sess else: state.trace("matched test file (was specified on cmdline): %r" % (fn,)) # The requested module looks like a test file, so rewrite it. This is # the most magical part of the process: load the source, rewrite the # asserts, and load the rewritten source. We also cache the rewritten # module code in a special pyc. We must be aware of the possibility of # concurrent py.test processes rewriting and loading pycs. To avoid # tricky race conditions, we maintain the following invariant: The # cached pyc is always a complete, valid pyc. Operations on it must be # atomic. POSIX's atomic rename comes in handy. write = not sys.dont_write_bytecode cache_dir = os.path.join(fn_pypath.dirname, "__pycache__") if write: try: os.mkdir(cache_dir) except OSError: e = sys.exc_info()[1].errno if e == errno.EEXIST: # Either the __pycache__ directory already exists (the # common case) or it's blocked by a non-dir node. In the # latter case, we'll ignore it in _write_pyc. pass elif e in [errno.ENOENT, errno.ENOTDIR]: # One of the path components was not a directory, likely # because we're in a zip file. write = False elif e == errno.EACCES: state.trace("read only directory: %r" % fn_pypath.dirname) write = False else: raise cache_name = fn_pypath.basename[:-3] + PYC_TAIL pyc = os.path.join(cache_dir, cache_name) # Notice that even if we're in a read-only directory, I'm going # to check for a cached pyc. This may not be optimal... co = _read_pyc(fn_pypath, pyc) if co is None: state.trace("rewriting %r" % (fn,)) co = _rewrite_test(state, fn_pypath) if co is None: # Probably a SyntaxError in the test. return None if write: _make_rewritten_pyc(state, fn_pypath, pyc, co) else: state.trace("found cached rewritten pyc for %r" % (fn,)) self.modules[name] = co, pyc return self def load_module(self, name): co, pyc = self.modules.pop(name) # I wish I could just call imp.load_compiled here, but __file__ has to # be set properly. In Python 3.2+, this all would be handled correctly # by load_compiled. mod = sys.modules[name] = imp.new_module(name) try: mod.__file__ = co.co_filename # Normally, this attribute is 3.2+. mod.__cached__ = pyc mod.__loader__ = self py.builtin.exec_(co, mod.__dict__) except: del sys.modules[name] raise return sys.modules[name] def is_package(self, name): try: fd, fn, desc = imp.find_module(name) except ImportError: return False if fd is not None: fd.close() tp = desc[2] return tp == imp.PKG_DIRECTORY @classmethod def _register_with_pkg_resources(cls): """ Ensure package resources can be loaded from this loader. May be called multiple times, as the operation is idempotent. """ try: import pkg_resources # access an attribute in case a deferred importer is present pkg_resources.__name__ except ImportError: return # Since pytest tests are always located in the file system, the # DefaultProvider is appropriate. pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) def _write_pyc(state, co, source_path, pyc): # Technically, we don't have to have the same pyc format as # (C)Python, since these "pycs" should never be seen by builtin # import. However, there's little reason deviate, and I hope # sometime to be able to use imp.load_compiled to load them. (See # the comment in load_module above.) mtime = int(source_path.mtime()) try: fp = open(pyc, "wb") except IOError: err = sys.exc_info()[1].errno state.trace("error writing pyc file at %s: errno=%s" %(pyc, err)) # we ignore any failure to write the cache file # there are many reasons, permission-denied, __pycache__ being a # file etc. return False try: fp.write(imp.get_magic()) fp.write(struct.pack(">", ast.Add: "+", ast.Sub: "-", ast.Mult: "*", ast.Div: "/", ast.FloorDiv: "//", ast.Mod: "%%", # escaped for string formatting ast.Eq: "==", ast.NotEq: "!=", ast.Lt: "<", ast.LtE: "<=", ast.Gt: ">", ast.GtE: ">=", ast.Pow: "**", ast.Is: "is", ast.IsNot: "is not", ast.In: "in", ast.NotIn: "not in" } def set_location(node, lineno, col_offset): """Set node location information recursively.""" def _fix(node, lineno, col_offset): if "lineno" in node._attributes: node.lineno = lineno if "col_offset" in node._attributes: node.col_offset = col_offset for child in ast.iter_child_nodes(node): _fix(child, lineno, col_offset) _fix(node, lineno, col_offset) return node class AssertionRewriter(ast.NodeVisitor): def run(self, mod): """Find all assert statements in *mod* and rewrite them.""" if not mod.body: # Nothing to do. return # Insert some special imports at the top of the module but after any # docstrings and __future__ imports. aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"), ast.alias("_pytest.assertion.rewrite", "@pytest_ar")] expect_docstring = True pos = 0 lineno = 0 for item in mod.body: if (expect_docstring and isinstance(item, ast.Expr) and isinstance(item.value, ast.Str)): doc = item.value.s if "PYTEST_DONT_REWRITE" in doc: # The module has disabled assertion rewriting. return lineno += len(doc) - 1 expect_docstring = False elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or item.module != "__future__"): lineno = item.lineno break pos += 1 imports = [ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases] mod.body[pos:pos] = imports # Collect asserts. nodes = [mod] while nodes: node = nodes.pop() for name, field in ast.iter_fields(node): if isinstance(field, list): new = [] for i, child in enumerate(field): if isinstance(child, ast.Assert): # Transform assert. new.extend(self.visit(child)) else: new.append(child) if isinstance(child, ast.AST): nodes.append(child) setattr(node, name, new) elif (isinstance(field, ast.AST) and # Don't recurse into expressions as they can't contain # asserts. not isinstance(field, ast.expr)): nodes.append(field) def variable(self): """Get a new variable.""" # Use a character invalid in python identifiers to avoid clashing. name = "@py_assert" + str(next(self.variable_counter)) self.variables.append(name) return name def assign(self, expr): """Give *expr* a name.""" name = self.variable() self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) return ast.Name(name, ast.Load()) def display(self, expr): """Call py.io.saferepr on the expression.""" return self.helper("saferepr", expr) def helper(self, name, *args): """Call a helper in this module.""" py_name = ast.Name("@pytest_ar", ast.Load()) attr = ast.Attribute(py_name, "_" + name, ast.Load()) return ast.Call(attr, list(args), [], None, None) def builtin(self, name): """Return the builtin called *name*.""" builtin_name = ast.Name("@py_builtins", ast.Load()) return ast.Attribute(builtin_name, name, ast.Load()) def explanation_param(self, expr): specifier = "py" + str(next(self.variable_counter)) self.explanation_specifiers[specifier] = expr return "%(" + specifier + ")s" def push_format_context(self): self.explanation_specifiers = {} self.stack.append(self.explanation_specifiers) def pop_format_context(self, expl_expr): current = self.stack.pop() if self.stack: self.explanation_specifiers = self.stack[-1] keys = [ast.Str(key) for key in current.keys()] format_dict = ast.Dict(keys, list(current.values())) form = ast.BinOp(expl_expr, ast.Mod(), format_dict) name = "@py_format" + str(next(self.variable_counter)) self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form)) return ast.Name(name, ast.Load()) def generic_visit(self, node): """Handle expressions we don't have custom code for.""" assert isinstance(node, ast.expr) res = self.assign(node) return res, self.explanation_param(self.display(res)) def visit_Assert(self, assert_): if assert_.msg: # There's already a message. Don't mess with it. return [assert_] self.statements = [] self.cond_chain = () self.variables = [] self.variable_counter = itertools.count() self.stack = [] self.on_failure = [] self.push_format_context() # Rewrite assert into a bunch of statements. top_condition, explanation = self.visit(assert_.test) # Create failure message. body = self.on_failure negation = ast.UnaryOp(ast.Not(), top_condition) self.statements.append(ast.If(negation, body, [])) explanation = "assert " + explanation template = ast.Str(explanation) msg = self.pop_format_context(template) fmt = self.helper("format_explanation", msg) err_name = ast.Name("AssertionError", ast.Load()) exc = ast.Call(err_name, [fmt], [], None, None) if sys.version_info[0] >= 3: raise_ = ast.Raise(exc, None) else: raise_ = ast.Raise(exc, None, None) body.append(raise_) # Clear temporary variables by setting them to None. if self.variables: variables = [ast.Name(name, ast.Store()) for name in self.variables] clear = ast.Assign(variables, ast.Name("None", ast.Load())) self.statements.append(clear) # Fix line numbers. for stmt in self.statements: set_location(stmt, assert_.lineno, assert_.col_offset) return self.statements def visit_Name(self, name): # Display the repr of the name if it's a local variable or # _should_repr_global_name() thinks it's acceptable. locs = ast.Call(self.builtin("locals"), [], [], None, None) inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) dorepr = self.helper("should_repr_global_name", name) test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) return name, self.explanation_param(expr) def visit_BoolOp(self, boolop): res_var = self.variable() expl_list = self.assign(ast.List([], ast.Load())) app = ast.Attribute(expl_list, "append", ast.Load()) is_or = int(isinstance(boolop.op, ast.Or)) body = save = self.statements fail_save = self.on_failure levels = len(boolop.values) - 1 self.push_format_context() # Process each operand, short-circuting if needed. for i, v in enumerate(boolop.values): if i: fail_inner = [] # cond is set in a prior loop iteration below self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa self.on_failure = fail_inner self.push_format_context() res, expl = self.visit(v) body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) expl_format = self.pop_format_context(ast.Str(expl)) call = ast.Call(app, [expl_format], [], None, None) self.on_failure.append(ast.Expr(call)) if i < levels: cond = res if is_or: cond = ast.UnaryOp(ast.Not(), cond) inner = [] self.statements.append(ast.If(cond, inner, [])) self.statements = body = inner self.statements = save self.on_failure = fail_save expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or)) expl = self.pop_format_context(expl_template) return ast.Name(res_var, ast.Load()), self.explanation_param(expl) def visit_UnaryOp(self, unary): pattern = unary_map[unary.op.__class__] operand_res, operand_expl = self.visit(unary.operand) res = self.assign(ast.UnaryOp(unary.op, operand_res)) return res, pattern % (operand_expl,) def visit_BinOp(self, binop): symbol = binop_map[binop.op.__class__] left_expr, left_expl = self.visit(binop.left) right_expr, right_expl = self.visit(binop.right) explanation = "(%s %s %s)" % (left_expl, symbol, right_expl) res = self.assign(ast.BinOp(left_expr, binop.op, right_expr)) return res, explanation def visit_Call(self, call): new_func, func_expl = self.visit(call.func) arg_expls = [] new_args = [] new_kwargs = [] new_star = new_kwarg = None for arg in call.args: res, expl = self.visit(arg) new_args.append(res) arg_expls.append(expl) for keyword in call.keywords: res, expl = self.visit(keyword.value) new_kwargs.append(ast.keyword(keyword.arg, res)) arg_expls.append(keyword.arg + "=" + expl) if call.starargs: new_star, expl = self.visit(call.starargs) arg_expls.append("*" + expl) if call.kwargs: new_kwarg, expl = self.visit(call.kwargs) arg_expls.append("**" + expl) expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) res = self.assign(new_call) res_expl = self.explanation_param(self.display(res)) outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) return res, outer_expl def visit_Attribute(self, attr): if not isinstance(attr.ctx, ast.Load): return self.generic_visit(attr) value, value_expl = self.visit(attr.value) res = self.assign(ast.Attribute(value, attr.attr, ast.Load())) res_expl = self.explanation_param(self.display(res)) pat = "%s\n{%s = %s.%s\n}" expl = pat % (res_expl, res_expl, value_expl, attr.attr) return res, expl def visit_Compare(self, comp): self.push_format_context() left_res, left_expl = self.visit(comp.left) res_variables = [self.variable() for i in range(len(comp.ops))] load_names = [ast.Name(v, ast.Load()) for v in res_variables] store_names = [ast.Name(v, ast.Store()) for v in res_variables] it = zip(range(len(comp.ops)), comp.ops, comp.comparators) expls = [] syms = [] results = [left_res] for i, op, next_operand in it: next_res, next_expl = self.visit(next_operand) results.append(next_res) sym = binop_map[op.__class__] syms.append(ast.Str(sym)) expl = "%s %s %s" % (left_expl, sym, next_expl) expls.append(ast.Str(expl)) res_expr = ast.Compare(left_res, [op], [next_res]) self.statements.append(ast.Assign([store_names[i]], res_expr)) left_res, left_expl = next_res, next_expl # Use pytest.assertion.util._reprcompare if that's available. expl_call = self.helper("call_reprcompare", ast.Tuple(syms, ast.Load()), ast.Tuple(load_names, ast.Load()), ast.Tuple(expls, ast.Load()), ast.Tuple(results, ast.Load())) if len(comp.ops) > 1: res = ast.BoolOp(ast.And(), load_names) else: res = load_names[0] return res, self.explanation_param(self.pop_format_context(expl_call)) pytest-2.5.1/_pytest/assertion/util.py0000664000175000017500000002314712254002202017460 0ustar hpkhpk00000000000000"""Utilities for assertion debugging""" import py try: from collections.abc import Sequence except ImportError: try: from collections import Sequence except ImportError: Sequence = list BuiltinAssertionError = py.builtin.builtins.AssertionError u = py.builtin._totext # The _reprcompare attribute on the util module is used by the new assertion # interpretation code and assertion rewriter to detect this plugin was # loaded and in turn call the hooks defined here as part of the # DebugInterpreter. _reprcompare = None def format_explanation(explanation): """This formats an explanation Normally all embedded newlines are escaped, however there are three exceptions: \n{, \n} and \n~. The first two are intended cover nested explanations, see function and attribute explanations for examples (.visit_Call(), visit_Attribute()). The last one is for when one explanation needs to span multiple lines, e.g. when displaying diffs. """ explanation = _collapse_false(explanation) lines = _split_explanation(explanation) result = _format_lines(lines) return u('\n').join(result) def _collapse_false(explanation): """Collapse expansions of False So this strips out any "assert False\n{where False = ...\n}" blocks. """ where = 0 while True: start = where = explanation.find("False\n{False = ", where) if where == -1: break level = 0 for i, c in enumerate(explanation[start:]): if c == "{": level += 1 elif c == "}": level -= 1 if not level: break else: raise AssertionError("unbalanced braces: %r" % (explanation,)) end = start + i where = end if explanation[end - 1] == '\n': explanation = (explanation[:start] + explanation[start+15:end-1] + explanation[end+1:]) where -= 17 return explanation def _split_explanation(explanation): """Return a list of individual lines in the explanation This will return a list of lines split on '\n{', '\n}' and '\n~'. Any other newlines will be escaped and appear in the line as the literal '\n' characters. """ raw_lines = (explanation or u('')).split('\n') lines = [raw_lines[0]] for l in raw_lines[1:]: if l.startswith('{') or l.startswith('}') or l.startswith('~'): lines.append(l) else: lines[-1] += '\\n' + l return lines def _format_lines(lines): """Format the individual lines This will replace the '{', '}' and '~' characters of our mini formatting language with the proper 'where ...', 'and ...' and ' + ...' text, taking care of indentation along the way. Return a list of formatted lines. """ result = lines[:1] stack = [0] stackcnt = [0] for line in lines[1:]: if line.startswith('{'): if stackcnt[-1]: s = u('and ') else: s = u('where ') stack.append(len(result)) stackcnt[-1] += 1 stackcnt.append(0) result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:]) elif line.startswith('}'): assert line.startswith('}') stack.pop() stackcnt.pop() result[stack[-1]] += line[1:] else: assert line.startswith('~') result.append(u(' ')*len(stack) + line[1:]) assert len(stack) == 1 return result # Provide basestring in python3 try: basestring = basestring except NameError: basestring = str def assertrepr_compare(config, op, left, right): """Return specialised explanations for some operators/operands""" width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op left_repr = py.io.saferepr(left, maxsize=int(width/2)) right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) summary = u('%s %s %s') % (left_repr, op, right_repr) issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and not isinstance(x, basestring)) istext = lambda x: isinstance(x, basestring) isdict = lambda x: isinstance(x, dict) isset = lambda x: isinstance(x, (set, frozenset)) verbose = config.getoption('verbose') explanation = None try: if op == '==': if istext(left) and istext(right): explanation = _diff_text(left, right, verbose) elif issequence(left) and issequence(right): explanation = _compare_eq_sequence(left, right, verbose) elif isset(left) and isset(right): explanation = _compare_eq_set(left, right, verbose) elif isdict(left) and isdict(right): explanation = _compare_eq_dict(left, right, verbose) elif op == 'not in': if istext(left) and istext(right): explanation = _notin_text(left, right, verbose) except Exception: excinfo = py.code.ExceptionInfo() explanation = [ u('(pytest_assertion plugin: representation of details failed. ' 'Probably an object has a faulty __repr__.)'), u(excinfo)] if not explanation: return None return [summary] + explanation def _diff_text(left, right, verbose=False): """Return the explanation for the diff between text Unless --verbose is used this will skip leading and trailing characters which are identical to keep the diff minimal. """ explanation = [] if not verbose: i = 0 # just in case left or right has zero length for i in range(min(len(left), len(right))): if left[i] != right[i]: break if i > 42: i -= 10 # Provide some context explanation = [u('Skipping %s identical leading ' 'characters in diff, use -v to show') % i] left = left[i:] right = right[i:] if len(left) == len(right): for i in range(len(left)): if left[-i] != right[-i]: break if i > 42: i -= 10 # Provide some context explanation += [u('Skipping %s identical trailing ' 'characters in diff, use -v to show') % i] left = left[:-i] right = right[:-i] explanation += [line.strip('\n') for line in py.std.difflib.ndiff(left.splitlines(), right.splitlines())] return explanation def _compare_eq_sequence(left, right, verbose=False): explanation = [] for i in range(min(len(left), len(right))): if left[i] != right[i]: explanation += [u('At index %s diff: %r != %r') % (i, left[i], right[i])] break if len(left) > len(right): explanation += [u('Left contains more items, first extra item: %s') % py.io.saferepr(left[len(right)],)] elif len(left) < len(right): explanation += [ u('Right contains more items, first extra item: %s') % py.io.saferepr(right[len(left)],)] return explanation # + _diff_text(py.std.pprint.pformat(left), # py.std.pprint.pformat(right)) def _compare_eq_set(left, right, verbose=False): explanation = [] diff_left = left - right diff_right = right - left if diff_left: explanation.append(u('Extra items in the left set:')) for item in diff_left: explanation.append(py.io.saferepr(item)) if diff_right: explanation.append(u('Extra items in the right set:')) for item in diff_right: explanation.append(py.io.saferepr(item)) return explanation def _compare_eq_dict(left, right, verbose=False): explanation = [] common = set(left).intersection(set(right)) same = dict((k, left[k]) for k in common if left[k] == right[k]) if same and not verbose: explanation += [u('Omitting %s identical items, use -v to show') % len(same)] elif same: explanation += [u('Common items:')] explanation += py.std.pprint.pformat(same).splitlines() diff = set(k for k in common if left[k] != right[k]) if diff: explanation += [u('Differing items:')] for k in diff: explanation += [py.io.saferepr({k: left[k]}) + ' != ' + py.io.saferepr({k: right[k]})] extra_left = set(left) - set(right) if extra_left: explanation.append(u('Left contains more items:')) explanation.extend(py.std.pprint.pformat( dict((k, left[k]) for k in extra_left)).splitlines()) extra_right = set(right) - set(left) if extra_right: explanation.append(u('Right contains more items:')) explanation.extend(py.std.pprint.pformat( dict((k, right[k]) for k in extra_right)).splitlines()) return explanation def _notin_text(term, text, verbose=False): index = text.find(term) head = text[:index] tail = text[index+len(term):] correct_text = head + tail diff = _diff_text(correct_text, text, verbose) newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)] for line in diff: if line.startswith(u('Skipping')): continue if line.startswith(u('- ')): continue if line.startswith(u('+ ')): newdiff.append(u(' ') + line[2:]) else: newdiff.append(line) return newdiff pytest-2.5.1/_pytest/assertion/__init__.py0000664000175000017500000001154212254002202020236 0ustar hpkhpk00000000000000""" support for presenting detailed information in failing assertions. """ import py import sys from _pytest.monkeypatch import monkeypatch from _pytest.assertion import util def pytest_addoption(parser): group = parser.getgroup("debugconfig") group.addoption('--assert', action="store", dest="assertmode", choices=("rewrite", "reinterp", "plain",), default="rewrite", metavar="MODE", help="""control assertion debugging tools. 'plain' performs no assertion debugging. 'reinterp' reinterprets assert statements after they failed to provide assertion expression information. 'rewrite' (the default) rewrites assert statements in test modules on import to provide assert expression information. """) group.addoption('--no-assert', action="store_true", default=False, dest="noassert", help="DEPRECATED equivalent to --assert=plain") group.addoption('--nomagic', '--no-magic', action="store_true", default=False, help="DEPRECATED equivalent to --assert=plain") class AssertionState: """State for the assertion plugin.""" def __init__(self, config, mode): self.mode = mode self.trace = config.trace.root.get("assertion") def pytest_configure(config): mode = config.getvalue("assertmode") if config.getvalue("noassert") or config.getvalue("nomagic"): mode = "plain" if mode == "rewrite": try: import ast # noqa except ImportError: mode = "reinterp" else: # Both Jython and CPython 2.6.0 have AST bugs that make the # assertion rewriting hook malfunction. if (sys.platform.startswith('java') or sys.version_info[:3] == (2, 6, 0)): mode = "reinterp" if mode != "plain": _load_modules(mode) m = monkeypatch() config._cleanup.append(m.undo) m.setattr(py.builtin.builtins, 'AssertionError', reinterpret.AssertionError) # noqa hook = None if mode == "rewrite": hook = rewrite.AssertionRewritingHook() # noqa sys.meta_path.insert(0, hook) warn_about_missing_assertion(mode) config._assertstate = AssertionState(config, mode) config._assertstate.hook = hook config._assertstate.trace("configured with mode set to %r" % (mode,)) def pytest_unconfigure(config): hook = config._assertstate.hook if hook is not None: sys.meta_path.remove(hook) def pytest_collection(session): # this hook is only called when test modules are collected # so for example not in the master process of pytest-xdist # (which does not collect test modules) hook = session.config._assertstate.hook if hook is not None: hook.set_session(session) def pytest_runtest_setup(item): def callbinrepr(op, left, right): hook_result = item.ihook.pytest_assertrepr_compare( config=item.config, op=op, left=left, right=right) for new_expl in hook_result: if new_expl: # Don't include pageloads of data unless we are very # verbose (-vv) if (len(py.builtin._totext('').join(new_expl[1:])) > 80*8 and item.config.option.verbose < 2): new_expl[1:] = [py.builtin._totext( 'Detailed information truncated, use "-vv" to see')] res = py.builtin._totext('\n~').join(new_expl) if item.config.getvalue("assertmode") == "rewrite": # The result will be fed back a python % formatting # operation, which will fail if there are extraneous # '%'s in the string. Escape them here. res = res.replace("%", "%%") return res util._reprcompare = callbinrepr def pytest_runtest_teardown(item): util._reprcompare = None def pytest_sessionfinish(session): hook = session.config._assertstate.hook if hook is not None: hook.session = None def _load_modules(mode): """Lazily import assertion related code.""" global rewrite, reinterpret from _pytest.assertion import reinterpret # noqa if mode == "rewrite": from _pytest.assertion import rewrite # noqa def warn_about_missing_assertion(mode): try: assert False except AssertionError: pass else: if mode == "rewrite": specifically = ("assertions which are not in test modules " "will be ignored") else: specifically = "failing tests may report as passing" sys.stderr.write("WARNING: " + specifically + " because assert statements are not executed " "by the underlying Python interpreter " "(are you using python -O?)\n") pytest_assertrepr_compare = util.assertrepr_compare pytest-2.5.1/_pytest/assertion/oldinterpret.py0000664000175000017500000004236712254002202021223 0ustar hpkhpk00000000000000import py import sys, inspect from compiler import parse, ast, pycodegen from _pytest.assertion.util import format_explanation, BuiltinAssertionError passthroughex = py.builtin._sysex class Failure: def __init__(self, node): self.exc, self.value, self.tb = sys.exc_info() self.node = node class View(object): """View base class. If C is a subclass of View, then C(x) creates a proxy object around the object x. The actual class of the proxy is not C in general, but a *subclass* of C determined by the rules below. To avoid confusion we call view class the class of the proxy (a subclass of C, so of View) and object class the class of x. Attributes and methods not found in the proxy are automatically read on x. Other operations like setting attributes are performed on the proxy, as determined by its view class. The object x is available from the proxy as its __obj__ attribute. The view class selection is determined by the __view__ tuples and the optional __viewkey__ method. By default, the selected view class is the most specific subclass of C whose __view__ mentions the class of x. If no such subclass is found, the search proceeds with the parent object classes. For example, C(True) will first look for a subclass of C with __view__ = (..., bool, ...) and only if it doesn't find any look for one with __view__ = (..., int, ...), and then ..., object,... If everything fails the class C itself is considered to be the default. Alternatively, the view class selection can be driven by another aspect of the object x, instead of the class of x, by overriding __viewkey__. See last example at the end of this module. """ _viewcache = {} __view__ = () def __new__(rootclass, obj, *args, **kwds): self = object.__new__(rootclass) self.__obj__ = obj self.__rootclass__ = rootclass key = self.__viewkey__() try: self.__class__ = self._viewcache[key] except KeyError: self.__class__ = self._selectsubclass(key) return self def __getattr__(self, attr): # attributes not found in the normal hierarchy rooted on View # are looked up in the object's real class return getattr(self.__obj__, attr) def __viewkey__(self): return self.__obj__.__class__ def __matchkey__(self, key, subclasses): if inspect.isclass(key): keys = inspect.getmro(key) else: keys = [key] for key in keys: result = [C for C in subclasses if key in C.__view__] if result: return result return [] def _selectsubclass(self, key): subclasses = list(enumsubclasses(self.__rootclass__)) for C in subclasses: if not isinstance(C.__view__, tuple): C.__view__ = (C.__view__,) choices = self.__matchkey__(key, subclasses) if not choices: return self.__rootclass__ elif len(choices) == 1: return choices[0] else: # combine the multiple choices return type('?', tuple(choices), {}) def __repr__(self): return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__) def enumsubclasses(cls): for subcls in cls.__subclasses__(): for subsubclass in enumsubclasses(subcls): yield subsubclass yield cls class Interpretable(View): """A parse tree node with a few extra methods.""" explanation = None def is_builtin(self, frame): return False def eval(self, frame): # fall-back for unknown expression nodes try: expr = ast.Expression(self.__obj__) expr.filename = '' self.__obj__.filename = '' co = pycodegen.ExpressionCodeGenerator(expr).getCode() result = frame.eval(co) except passthroughex: raise except: raise Failure(self) self.result = result self.explanation = self.explanation or frame.repr(self.result) def run(self, frame): # fall-back for unknown statement nodes try: expr = ast.Module(None, ast.Stmt([self.__obj__])) expr.filename = '' co = pycodegen.ModuleCodeGenerator(expr).getCode() frame.exec_(co) except passthroughex: raise except: raise Failure(self) def nice_explanation(self): return format_explanation(self.explanation) class Name(Interpretable): __view__ = ast.Name def is_local(self, frame): source = '%r in locals() is not globals()' % self.name try: return frame.is_true(frame.eval(source)) except passthroughex: raise except: return False def is_global(self, frame): source = '%r in globals()' % self.name try: return frame.is_true(frame.eval(source)) except passthroughex: raise except: return False def is_builtin(self, frame): source = '%r not in locals() and %r not in globals()' % ( self.name, self.name) try: return frame.is_true(frame.eval(source)) except passthroughex: raise except: return False def eval(self, frame): super(Name, self).eval(frame) if not self.is_local(frame): self.explanation = self.name class Compare(Interpretable): __view__ = ast.Compare def eval(self, frame): expr = Interpretable(self.expr) expr.eval(frame) for operation, expr2 in self.ops: if hasattr(self, 'result'): # shortcutting in chained expressions if not frame.is_true(self.result): break expr2 = Interpretable(expr2) expr2.eval(frame) self.explanation = "%s %s %s" % ( expr.explanation, operation, expr2.explanation) source = "__exprinfo_left %s __exprinfo_right" % operation try: self.result = frame.eval(source, __exprinfo_left=expr.result, __exprinfo_right=expr2.result) except passthroughex: raise except: raise Failure(self) expr = expr2 class And(Interpretable): __view__ = ast.And def eval(self, frame): explanations = [] for expr in self.nodes: expr = Interpretable(expr) expr.eval(frame) explanations.append(expr.explanation) self.result = expr.result if not frame.is_true(expr.result): break self.explanation = '(' + ' and '.join(explanations) + ')' class Or(Interpretable): __view__ = ast.Or def eval(self, frame): explanations = [] for expr in self.nodes: expr = Interpretable(expr) expr.eval(frame) explanations.append(expr.explanation) self.result = expr.result if frame.is_true(expr.result): break self.explanation = '(' + ' or '.join(explanations) + ')' # == Unary operations == keepalive = [] for astclass, astpattern in { ast.Not : 'not __exprinfo_expr', ast.Invert : '(~__exprinfo_expr)', }.items(): class UnaryArith(Interpretable): __view__ = astclass def eval(self, frame, astpattern=astpattern): expr = Interpretable(self.expr) expr.eval(frame) self.explanation = astpattern.replace('__exprinfo_expr', expr.explanation) try: self.result = frame.eval(astpattern, __exprinfo_expr=expr.result) except passthroughex: raise except: raise Failure(self) keepalive.append(UnaryArith) # == Binary operations == for astclass, astpattern in { ast.Add : '(__exprinfo_left + __exprinfo_right)', ast.Sub : '(__exprinfo_left - __exprinfo_right)', ast.Mul : '(__exprinfo_left * __exprinfo_right)', ast.Div : '(__exprinfo_left / __exprinfo_right)', ast.Mod : '(__exprinfo_left % __exprinfo_right)', ast.Power : '(__exprinfo_left ** __exprinfo_right)', }.items(): class BinaryArith(Interpretable): __view__ = astclass def eval(self, frame, astpattern=astpattern): left = Interpretable(self.left) left.eval(frame) right = Interpretable(self.right) right.eval(frame) self.explanation = (astpattern .replace('__exprinfo_left', left .explanation) .replace('__exprinfo_right', right.explanation)) try: self.result = frame.eval(astpattern, __exprinfo_left=left.result, __exprinfo_right=right.result) except passthroughex: raise except: raise Failure(self) keepalive.append(BinaryArith) class CallFunc(Interpretable): __view__ = ast.CallFunc def is_bool(self, frame): source = 'isinstance(__exprinfo_value, bool)' try: return frame.is_true(frame.eval(source, __exprinfo_value=self.result)) except passthroughex: raise except: return False def eval(self, frame): node = Interpretable(self.node) node.eval(frame) explanations = [] vars = {'__exprinfo_fn': node.result} source = '__exprinfo_fn(' for a in self.args: if isinstance(a, ast.Keyword): keyword = a.name a = a.expr else: keyword = None a = Interpretable(a) a.eval(frame) argname = '__exprinfo_%d' % len(vars) vars[argname] = a.result if keyword is None: source += argname + ',' explanations.append(a.explanation) else: source += '%s=%s,' % (keyword, argname) explanations.append('%s=%s' % (keyword, a.explanation)) if self.star_args: star_args = Interpretable(self.star_args) star_args.eval(frame) argname = '__exprinfo_star' vars[argname] = star_args.result source += '*' + argname + ',' explanations.append('*' + star_args.explanation) if self.dstar_args: dstar_args = Interpretable(self.dstar_args) dstar_args.eval(frame) argname = '__exprinfo_kwds' vars[argname] = dstar_args.result source += '**' + argname + ',' explanations.append('**' + dstar_args.explanation) self.explanation = "%s(%s)" % ( node.explanation, ', '.join(explanations)) if source.endswith(','): source = source[:-1] source += ')' try: self.result = frame.eval(source, **vars) except passthroughex: raise except: raise Failure(self) if not node.is_builtin(frame) or not self.is_bool(frame): r = frame.repr(self.result) self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) class Getattr(Interpretable): __view__ = ast.Getattr def eval(self, frame): expr = Interpretable(self.expr) expr.eval(frame) source = '__exprinfo_expr.%s' % self.attrname try: self.result = frame.eval(source, __exprinfo_expr=expr.result) except passthroughex: raise except: raise Failure(self) self.explanation = '%s.%s' % (expr.explanation, self.attrname) # if the attribute comes from the instance, its value is interesting source = ('hasattr(__exprinfo_expr, "__dict__") and ' '%r in __exprinfo_expr.__dict__' % self.attrname) try: from_instance = frame.is_true( frame.eval(source, __exprinfo_expr=expr.result)) except passthroughex: raise except: from_instance = True if from_instance: r = frame.repr(self.result) self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation) # == Re-interpretation of full statements == class Assert(Interpretable): __view__ = ast.Assert def run(self, frame): test = Interpretable(self.test) test.eval(frame) # print the result as 'assert ' self.result = test.result self.explanation = 'assert ' + test.explanation if not frame.is_true(test.result): try: raise BuiltinAssertionError except passthroughex: raise except: raise Failure(self) class Assign(Interpretable): __view__ = ast.Assign def run(self, frame): expr = Interpretable(self.expr) expr.eval(frame) self.result = expr.result self.explanation = '... = ' + expr.explanation # fall-back-run the rest of the assignment ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr')) mod = ast.Module(None, ast.Stmt([ass])) mod.filename = '' co = pycodegen.ModuleCodeGenerator(mod).getCode() try: frame.exec_(co, __exprinfo_expr=expr.result) except passthroughex: raise except: raise Failure(self) class Discard(Interpretable): __view__ = ast.Discard def run(self, frame): expr = Interpretable(self.expr) expr.eval(frame) self.result = expr.result self.explanation = expr.explanation class Stmt(Interpretable): __view__ = ast.Stmt def run(self, frame): for stmt in self.nodes: stmt = Interpretable(stmt) stmt.run(frame) def report_failure(e): explanation = e.node.nice_explanation() if explanation: explanation = ", in: " + explanation else: explanation = "" sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation)) def check(s, frame=None): if frame is None: frame = sys._getframe(1) frame = py.code.Frame(frame) expr = parse(s, 'eval') assert isinstance(expr, ast.Expression) node = Interpretable(expr.node) try: node.eval(frame) except passthroughex: raise except Failure: e = sys.exc_info()[1] report_failure(e) else: if not frame.is_true(node.result): sys.stderr.write("assertion failed: %s\n" % node.nice_explanation()) ########################################################### # API / Entry points # ######################################################### def interpret(source, frame, should_fail=False): module = Interpretable(parse(source, 'exec').node) #print "got module", module if isinstance(frame, py.std.types.FrameType): frame = py.code.Frame(frame) try: module.run(frame) except Failure: e = sys.exc_info()[1] return getfailure(e) except passthroughex: raise except: import traceback traceback.print_exc() if should_fail: return ("(assertion failed, but when it was re-run for " "printing intermediate values, it did not fail. Suggestions: " "compute assert expression before the assert or use --assert=plain)") else: return None def getmsg(excinfo): if isinstance(excinfo, tuple): excinfo = py.code.ExceptionInfo(excinfo) #frame, line = gettbline(tb) #frame = py.code.Frame(frame) #return interpret(line, frame) tb = excinfo.traceback[-1] source = str(tb.statement).strip() x = interpret(source, tb.frame, should_fail=True) if not isinstance(x, str): raise TypeError("interpret returned non-string %r" % (x,)) return x def getfailure(e): explanation = e.node.nice_explanation() if str(e.value): lines = explanation.split('\n') lines[0] += " << %s" % (e.value,) explanation = '\n'.join(lines) text = "%s: %s" % (e.exc.__name__, explanation) if text.startswith('AssertionError: assert '): text = text[16:] return text def run(s, frame=None): if frame is None: frame = sys._getframe(1) frame = py.code.Frame(frame) module = Interpretable(parse(s, 'exec').node) try: module.run(frame) except Failure: e = sys.exc_info()[1] report_failure(e) if __name__ == '__main__': # example: def f(): return 5 def g(): return 3 def h(x): return 'never' check("f() * g() == 5") check("not f()") check("not (f() and g() or 0)") check("f() == g()") i = 4 check("i == f()") check("len(f()) == 0") check("isinstance(2+3+4, float)") run("x = i") check("x == 5") run("assert not f(), 'oops'") run("a, b, c = 1, 2") run("a, b, c = f()") check("max([f(),g()]) == 4") check("'hello'[g()] == 'h'") run("'guk%d' % h(f())") pytest-2.5.1/_pytest/assertion/reinterpret.py0000664000175000017500000000376112254002202021046 0ustar hpkhpk00000000000000import sys import py from _pytest.assertion.util import BuiltinAssertionError u = py.builtin._totext class AssertionError(BuiltinAssertionError): def __init__(self, *args): BuiltinAssertionError.__init__(self, *args) if args: # on Python2.6 we get len(args)==2 for: assert 0, (x,y) # on Python2.7 and above we always get len(args) == 1 # with args[0] being the (x,y) tuple. if len(args) > 1: toprint = args else: toprint = args[0] try: self.msg = u(toprint) except Exception: self.msg = u( "<[broken __repr__] %s at %0xd>" % (toprint.__class__, id(toprint))) else: f = py.code.Frame(sys._getframe(1)) try: source = f.code.fullsource if source is not None: try: source = source.getstatement(f.lineno, assertion=True) except IndexError: source = None else: source = str(source.deindent()).strip() except py.error.ENOENT: source = None # this can also occur during reinterpretation, when the # co_filename is set to "". if source: self.msg = reinterpret(source, f, should_fail=True) else: self.msg = "" if not self.args: self.args = (self.msg,) if sys.version_info > (3, 0): AssertionError.__module__ = "builtins" reinterpret_old = "old reinterpretation not available for py3" else: from _pytest.assertion.oldinterpret import interpret as reinterpret_old if sys.version_info >= (2, 6) or (sys.platform.startswith("java")): from _pytest.assertion.newinterpret import interpret as reinterpret else: reinterpret = reinterpret_old pytest-2.5.1/_pytest/junitxml.py0000664000175000017500000001716312254002202016347 0ustar hpkhpk00000000000000""" report test results in JUnit-XML format, for use with Hudson and build integration servers. Based on initial code from Ross Lawley. """ import py import os import re import sys import time # Python 2.X and 3.X compatibility try: unichr(65) except NameError: unichr = chr try: unicode('A') except NameError: unicode = str try: long(1) except NameError: long = int class Junit(py.xml.Namespace): pass # We need to get the subset of the invalid unicode ranges according to # XML 1.0 which are valid in this python build. Hence we calculate # this dynamically instead of hardcoding it. The spec range of valid # chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] # | [#x10000-#x10FFFF] _legal_chars = (0x09, 0x0A, 0x0d) _legal_ranges = ( (0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF), ) _legal_xml_re = [unicode("%s-%s") % (unichr(low), unichr(high)) for (low, high) in _legal_ranges if low < sys.maxunicode] _legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re illegal_xml_re = re.compile(unicode('[^%s]') % unicode('').join(_legal_xml_re)) del _legal_chars del _legal_ranges del _legal_xml_re def bin_xml_escape(arg): def repl(matchobj): i = ord(matchobj.group()) if i <= 0xFF: return unicode('#x%02X') % i else: return unicode('#x%04X') % i return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg))) def pytest_addoption(parser): group = parser.getgroup("terminal reporting") group.addoption('--junitxml', '--junit-xml', action="store", dest="xmlpath", metavar="path", default=None, help="create junit-xml style report file at given path.") group.addoption('--junitprefix', '--junit-prefix', action="store", metavar="str", default=None, help="prepend prefix to classnames in junit-xml output") def pytest_configure(config): xmlpath = config.option.xmlpath # prevent opening xmllog on slave nodes (xdist) if xmlpath and not hasattr(config, 'slaveinput'): config._xml = LogXML(xmlpath, config.option.junitprefix) config.pluginmanager.register(config._xml) def pytest_unconfigure(config): xml = getattr(config, '_xml', None) if xml: del config._xml config.pluginmanager.unregister(xml) def mangle_testnames(names): names = [x.replace(".py", "") for x in names if x != '()'] names[0] = names[0].replace("/", '.') return names class LogXML(object): def __init__(self, logfile, prefix): logfile = os.path.expanduser(os.path.expandvars(logfile)) self.logfile = os.path.normpath(os.path.abspath(logfile)) self.prefix = prefix self.tests = [] self.passed = self.skipped = 0 self.failed = self.errors = 0 def _opentestcase(self, report): names = mangle_testnames(report.nodeid.split("::")) classnames = names[:-1] if self.prefix: classnames.insert(0, self.prefix) self.tests.append(Junit.testcase( classname=".".join(classnames), name=bin_xml_escape(names[-1]), time=getattr(report, 'duration', 0) )) def _write_captured_output(self, report): sec = dict(report.sections) for name in ('out', 'err'): content = sec.get("Captured std%s" % name) if content: tag = getattr(Junit, 'system-'+name) self.append(tag(bin_xml_escape(content))) def append(self, obj): self.tests[-1].append(obj) def append_pass(self, report): self.passed += 1 self._write_captured_output(report) def append_failure(self, report): #msg = str(report.longrepr.reprtraceback.extraline) if hasattr(report, "wasxfail"): self.append( Junit.skipped(message="xfail-marked test passes unexpectedly")) self.skipped += 1 else: fail = Junit.failure(message="test failure") fail.append(bin_xml_escape(report.longrepr)) self.append(fail) self.failed += 1 self._write_captured_output(report) def append_collect_failure(self, report): #msg = str(report.longrepr.reprtraceback.extraline) self.append(Junit.failure(bin_xml_escape(report.longrepr), message="collection failure")) self.errors += 1 def append_collect_skipped(self, report): #msg = str(report.longrepr.reprtraceback.extraline) self.append(Junit.skipped(bin_xml_escape(report.longrepr), message="collection skipped")) self.skipped += 1 def append_error(self, report): self.append(Junit.error(bin_xml_escape(report.longrepr), message="test setup failure")) self.errors += 1 def append_skipped(self, report): if hasattr(report, "wasxfail"): self.append(Junit.skipped(bin_xml_escape(report.wasxfail), message="expected test failure")) else: filename, lineno, skipreason = report.longrepr if skipreason.startswith("Skipped: "): skipreason = bin_xml_escape(skipreason[9:]) self.append( Junit.skipped("%s:%s: %s" % report.longrepr, type="pytest.skip", message=skipreason )) self.skipped += 1 self._write_captured_output(report) def pytest_runtest_logreport(self, report): if report.passed: if report.when == "call": # ignore setup/teardown self._opentestcase(report) self.append_pass(report) elif report.failed: self._opentestcase(report) if report.when != "call": self.append_error(report) else: self.append_failure(report) elif report.skipped: self._opentestcase(report) self.append_skipped(report) def pytest_collectreport(self, report): if not report.passed: self._opentestcase(report) if report.failed: self.append_collect_failure(report) else: self.append_collect_skipped(report) def pytest_internalerror(self, excrepr): self.errors += 1 data = bin_xml_escape(excrepr) self.tests.append( Junit.testcase( Junit.error(data, message="internal error"), classname="pytest", name="internal")) def pytest_sessionstart(self): self.suite_start_time = time.time() def pytest_sessionfinish(self): if py.std.sys.version_info[0] < 3: logfile = py.std.codecs.open(self.logfile, 'w', encoding='utf-8') else: logfile = open(self.logfile, 'w', encoding='utf-8') suite_stop_time = time.time() suite_time_delta = suite_stop_time - self.suite_start_time numtests = self.passed + self.failed logfile.write('') logfile.write(Junit.testsuite( self.tests, name="pytest", errors=self.errors, failures=self.failed, skips=self.skipped, tests=numtests, time="%.3f" % suite_time_delta, ).unicode(indent=0)) logfile.close() def pytest_terminal_summary(self, terminalreporter): terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile)) pytest-2.5.1/_pytest/terminal.py0000664000175000017500000004413012254002202016302 0ustar hpkhpk00000000000000""" terminal reporting of the full testing process. This is a good source for looking at the various reporting hooks. """ import pytest import py import sys def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "reporting", after="general") group._addoption('-v', '--verbose', action="count", dest="verbose", default=0, help="increase verbosity."), group._addoption('-q', '--quiet', action="count", dest="quiet", default=0, help="decrease verbosity."), group._addoption('-r', action="store", dest="reportchars", default=None, metavar="chars", help="show extra test summary info as specified by chars (f)ailed, " "(E)error, (s)skipped, (x)failed, (X)passed.") group._addoption('-l', '--showlocals', action="store_true", dest="showlocals", default=False, help="show locals in tracebacks (disabled by default).") group._addoption('--report', action="store", dest="report", default=None, metavar="opts", help="(deprecated, use -r)") group._addoption('--tb', metavar="style", action="store", dest="tbstyle", default='long', choices=['long', 'short', 'no', 'line', 'native'], help="traceback print mode (long/short/line/native/no).") group._addoption('--fulltrace', '--full-trace', action="store_true", default=False, help="don't cut any tracebacks (default is to cut).") group._addoption('--color', metavar="color", action="store", dest="color", default='auto', choices=['yes', 'no', 'auto'], help="color terminal output (yes/no/auto).") def pytest_configure(config): config.option.verbose -= config.option.quiet reporter = TerminalReporter(config, sys.stdout) config.pluginmanager.register(reporter, 'terminalreporter') if config.option.debug or config.option.traceconfig: def mywriter(tags, args): msg = " ".join(map(str, args)) reporter.write_line("[traceconfig] " + msg) config.trace.root.setprocessor("pytest:config", mywriter) def getreportopt(config): reportopts = "" optvalue = config.option.report if optvalue: py.builtin.print_("DEPRECATED: use -r instead of --report option.", file=py.std.sys.stderr) if optvalue: for setting in optvalue.split(","): setting = setting.strip() if setting == "skipped": reportopts += "s" elif setting == "xfailed": reportopts += "x" reportchars = config.option.reportchars if reportchars: for char in reportchars: if char not in reportopts: reportopts += char return reportopts def pytest_report_teststatus(report): if report.passed: letter = "." elif report.skipped: letter = "s" elif report.failed: letter = "F" if report.when != "call": letter = "f" return report.outcome, letter, report.outcome.upper() class TerminalReporter: def __init__(self, config, file=None): self.config = config self.verbosity = self.config.option.verbose self.showheader = self.verbosity >= 0 self.showfspath = self.verbosity >= 0 self.showlongtestinfo = self.verbosity > 0 self._numcollected = 0 self.stats = {} self.startdir = self.curdir = py.path.local() if file is None: file = py.std.sys.stdout self._tw = self.writer = py.io.TerminalWriter(file) if self.config.option.color == 'yes': self._tw.hasmarkup = True if self.config.option.color == 'no': self._tw.hasmarkup = False self.currentfspath = None self.reportchars = getreportopt(config) self.hasmarkup = self._tw.hasmarkup def hasopt(self, char): char = {'xfailed': 'x', 'skipped': 's'}.get(char, char) return char in self.reportchars def write_fspath_result(self, fspath, res): if fspath != self.currentfspath: self.currentfspath = fspath #fspath = self.startdir.bestrelpath(fspath) self._tw.line() #relpath = self.startdir.bestrelpath(fspath) self._tw.write(fspath + " ") self._tw.write(res) def write_ensure_prefix(self, prefix, extra="", **kwargs): if self.currentfspath != prefix: self._tw.line() self.currentfspath = prefix self._tw.write(prefix) if extra: self._tw.write(extra, **kwargs) self.currentfspath = -2 def ensure_newline(self): if self.currentfspath: self._tw.line() self.currentfspath = None def write(self, content, **markup): self._tw.write(content, **markup) def write_line(self, line, **markup): line = str(line) self.ensure_newline() self._tw.line(line, **markup) def rewrite(self, line, **markup): line = str(line) self._tw.write("\r" + line, **markup) def write_sep(self, sep, title=None, **markup): self.ensure_newline() self._tw.sep(sep, title, **markup) def section(self, title, sep="=", **kw): self._tw.sep(sep, title, **kw) def line(self, msg, **kw): self._tw.line(msg, **kw) def pytest_internalerror(self, excrepr): for line in str(excrepr).split("\n"): self.write_line("INTERNALERROR> " + line) return 1 def pytest_plugin_registered(self, plugin): if self.config.option.traceconfig: msg = "PLUGIN registered: %s" % (plugin,) # XXX this event may happen during setup/teardown time # which unfortunately captures our output here # which garbles our output if we use self.write_line self.write_line(msg) def pytest_deselected(self, items): self.stats.setdefault('deselected', []).extend(items) def pytest_runtest_logstart(self, nodeid, location): # ensure that the path is printed before the # 1st test of a module starts running fspath = nodeid.split("::")[0] if self.showlongtestinfo: line = self._locationline(fspath, *location) self.write_ensure_prefix(line, "") elif self.showfspath: self.write_fspath_result(fspath, "") def pytest_runtest_logreport(self, report): rep = report res = self.config.hook.pytest_report_teststatus(report=rep) cat, letter, word = res self.stats.setdefault(cat, []).append(rep) self._tests_ran = True if not letter and not word: # probably passed setup/teardown return if self.verbosity <= 0: if not hasattr(rep, 'node') and self.showfspath: self.write_fspath_result(rep.fspath, letter) else: self._tw.write(letter) else: if isinstance(word, tuple): word, markup = word else: if rep.passed: markup = {'green':True} elif rep.failed: markup = {'red':True} elif rep.skipped: markup = {'yellow':True} line = self._locationline(str(rep.fspath), *rep.location) if not hasattr(rep, 'node'): self.write_ensure_prefix(line, word, **markup) #self._tw.write(word, **markup) else: self.ensure_newline() if hasattr(rep, 'node'): self._tw.write("[%s] " % rep.node.gateway.id) self._tw.write(word, **markup) self._tw.write(" " + line) self.currentfspath = -2 def pytest_collection(self): if not self.hasmarkup and self.config.option.verbose >= 1: self.write("collecting ... ", bold=True) def pytest_collectreport(self, report): if report.failed: self.stats.setdefault("error", []).append(report) elif report.skipped: self.stats.setdefault("skipped", []).append(report) items = [x for x in report.result if isinstance(x, pytest.Item)] self._numcollected += len(items) if self.hasmarkup: #self.write_fspath_result(report.fspath, 'E') self.report_collect() def report_collect(self, final=False): if self.config.option.verbose < 0: return errors = len(self.stats.get('error', [])) skipped = len(self.stats.get('skipped', [])) if final: line = "collected " else: line = "collecting " line += str(self._numcollected) + " items" if errors: line += " / %d errors" % errors if skipped: line += " / %d skipped" % skipped if self.hasmarkup: if final: line += " \n" self.rewrite(line, bold=True) else: self.write_line(line) def pytest_collection_modifyitems(self): self.report_collect(True) @pytest.mark.trylast def pytest_sessionstart(self, session): self._sessionstarttime = py.std.time.time() if not self.showheader: return self.write_sep("=", "test session starts", bold=True) verinfo = ".".join(map(str, sys.version_info[:3])) msg = "platform %s -- Python %s" % (sys.platform, verinfo) if hasattr(sys, 'pypy_version_info'): verinfo = ".".join(map(str, sys.pypy_version_info[:3])) msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3]) msg += " -- pytest-%s" % (py.test.__version__) if self.verbosity > 0 or self.config.option.debug or \ getattr(self.config.option, 'pastebin', None): msg += " -- " + str(sys.executable) self.write_line(msg) lines = self.config.hook.pytest_report_header( config=self.config, startdir=self.startdir) lines.reverse() for line in flatten(lines): self.write_line(line) def pytest_report_header(self, config): plugininfo = config.pluginmanager._plugin_distinfo if plugininfo: l = [] for dist, plugin in plugininfo: name = dist.project_name if name.startswith("pytest-"): name = name[7:] l.append(name) return "plugins: %s" % ", ".join(l) def pytest_collection_finish(self, session): if self.config.option.collectonly: self._printcollecteditems(session.items) if self.stats.get('failed'): self._tw.sep("!", "collection failures") for rep in self.stats.get('failed'): rep.toterminal(self._tw) return 1 return 0 if not self.showheader: return #for i, testarg in enumerate(self.config.args): # self.write_line("test path %d: %s" %(i+1, testarg)) def _printcollecteditems(self, items): # to print out items and their parent collectors # we take care to leave out Instances aka () # because later versions are going to get rid of them anyway if self.config.option.verbose < 0: if self.config.option.verbose < -1: counts = {} for item in items: name = item.nodeid.split('::', 1)[0] counts[name] = counts.get(name, 0) + 1 for name, count in sorted(counts.items()): self._tw.line("%s: %d" % (name, count)) else: for item in items: nodeid = item.nodeid nodeid = nodeid.replace("::()::", "::") self._tw.line(nodeid) return stack = [] indent = "" for item in items: needed_collectors = item.listchain()[1:] # strip root node while stack: if stack == needed_collectors[:len(stack)]: break stack.pop() for col in needed_collectors[len(stack):]: stack.append(col) #if col.name == "()": # continue indent = (len(stack) - 1) * " " self._tw.line("%s%s" % (indent, col)) def pytest_sessionfinish(self, exitstatus, __multicall__): __multicall__.execute() self._tw.line("") if exitstatus in (0, 1, 2, 4): self.summary_errors() self.summary_failures() self.summary_hints() self.config.hook.pytest_terminal_summary(terminalreporter=self) if exitstatus == 2: self._report_keyboardinterrupt() del self._keyboardinterrupt_memo self.summary_deselected() self.summary_stats() def pytest_keyboard_interrupt(self, excinfo): self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) def pytest_unconfigure(self): if hasattr(self, '_keyboardinterrupt_memo'): self._report_keyboardinterrupt() def _report_keyboardinterrupt(self): excrepr = self._keyboardinterrupt_memo msg = excrepr.reprcrash.message self.write_sep("!", msg) if "KeyboardInterrupt" in msg: if self.config.option.fulltrace: excrepr.toterminal(self._tw) else: excrepr.reprcrash.toterminal(self._tw) def _locationline(self, collect_fspath, fspath, lineno, domain): # collect_fspath comes from testid which has a "/"-normalized path if fspath and fspath.replace("\\", "/") != collect_fspath: fspath = "%s <- %s" % (collect_fspath, fspath) if fspath: line = str(fspath) if lineno is not None: lineno += 1 line += ":" + str(lineno) if domain: line += ": " + str(domain) else: line = "[location]" return line + " " def _getfailureheadline(self, rep): if hasattr(rep, 'location'): fspath, lineno, domain = rep.location return domain else: return "test session" # XXX? def _getcrashline(self, rep): try: return str(rep.longrepr.reprcrash) except AttributeError: try: return str(rep.longrepr)[:50] except AttributeError: return "" # # summaries for sessionfinish # def getreports(self, name): l = [] for x in self.stats.get(name, []): if not hasattr(x, '_pdbshown'): l.append(x) return l def summary_hints(self): if self.config.option.traceconfig: for hint in self.config.pluginmanager._hints: self._tw.line("hint: %s" % hint) def summary_failures(self): if self.config.option.tbstyle != "no": reports = self.getreports('failed') if not reports: return self.write_sep("=", "FAILURES") for rep in reports: if self.config.option.tbstyle == "line": line = self._getcrashline(rep) self.write_line(line) else: msg = self._getfailureheadline(rep) self.write_sep("_", msg) self._outrep_summary(rep) def summary_errors(self): if self.config.option.tbstyle != "no": reports = self.getreports('error') if not reports: return self.write_sep("=", "ERRORS") for rep in self.stats['error']: msg = self._getfailureheadline(rep) if not hasattr(rep, 'when'): # collect msg = "ERROR collecting " + msg elif rep.when == "setup": msg = "ERROR at setup of " + msg elif rep.when == "teardown": msg = "ERROR at teardown of " + msg self.write_sep("_", msg) self._outrep_summary(rep) def _outrep_summary(self, rep): rep.toterminal(self._tw) for secname, content in rep.sections: self._tw.sep("-", secname) if content[-1:] == "\n": content = content[:-1] self._tw.line(content) def summary_stats(self): session_duration = py.std.time.time() - self._sessionstarttime keys = "failed passed skipped deselected xfailed xpassed".split() for key in self.stats.keys(): if key not in keys: keys.append(key) parts = [] for key in keys: if key: # setup/teardown reports have an empty key, ignore them val = self.stats.get(key, None) if val: parts.append("%d %s" % (len(val), key)) line = ", ".join(parts) msg = "%s in %.2f seconds" % (line, session_duration) markup = {'bold': True} if 'failed' in self.stats or 'error' in self.stats: markup = {'red': True, 'bold': True} else: markup = {'green': True, 'bold': True} if self.verbosity >= 0: self.write_sep("=", msg, **markup) if self.verbosity == -1: self.write_line(msg, **markup) def summary_deselected(self): if 'deselected' in self.stats: l = [] k = self.config.option.keyword if k: l.append("-k%s" % k) m = self.config.option.markexpr if m: l.append("-m %r" % m) if l: self.write_sep("=", "%d tests deselected by %r" % ( len(self.stats['deselected']), " ".join(l)), bold=True) def repr_pythonversion(v=None): if v is None: v = sys.version_info try: return "%s.%s.%s-%s-%s" % v except (TypeError, ValueError): return str(v) def flatten(l): for x in l: if isinstance(x, (list, tuple)): for y in flatten(x): yield y else: yield x pytest-2.5.1/_pytest/doctest.py0000664000175000017500000001222012254002202016127 0ustar hpkhpk00000000000000""" discover and run doctests in modules and test files.""" import pytest, py from _pytest.python import FixtureRequest, FuncFixtureInfo from py._code.code import TerminalRepr, ReprFileLocation def pytest_addoption(parser): group = parser.getgroup("collect") group.addoption("--doctest-modules", action="store_true", default=False, help="run doctests in all .py modules", dest="doctestmodules") group.addoption("--doctest-glob", action="store", default="test*.txt", metavar="pat", help="doctests file matching pattern, default: test*.txt", dest="doctestglob") def pytest_collect_file(path, parent): config = parent.config if path.ext == ".py": if config.option.doctestmodules: return DoctestModule(path, parent) elif (path.ext in ('.txt', '.rst') and parent.session.isinitpath(path)) or \ path.check(fnmatch=config.getvalue("doctestglob")): return DoctestTextfile(path, parent) class ReprFailDoctest(TerminalRepr): def __init__(self, reprlocation, lines): self.reprlocation = reprlocation self.lines = lines def toterminal(self, tw): for line in self.lines: tw.line(line) self.reprlocation.toterminal(tw) class DoctestItem(pytest.Item): def __init__(self, name, parent, runner=None, dtest=None): super(DoctestItem, self).__init__(name, parent) self.runner = runner self.dtest = dtest def runtest(self): self.runner.run(self.dtest) def repr_failure(self, excinfo): doctest = py.std.doctest if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)): doctestfailure = excinfo.value example = doctestfailure.example test = doctestfailure.test filename = test.filename if test.lineno is None: lineno = None else: lineno = test.lineno + example.lineno + 1 message = excinfo.type.__name__ reprlocation = ReprFileLocation(filename, lineno, message) checker = py.std.doctest.OutputChecker() REPORT_UDIFF = py.std.doctest.REPORT_UDIFF filelines = py.path.local(filename).readlines(cr=0) lines = [] if lineno is not None: i = max(test.lineno, max(0, lineno - 10)) # XXX? for line in filelines[i:lineno]: lines.append("%03d %s" % (i+1, line)) i += 1 else: lines.append('EXAMPLE LOCATION UNKNOWN, not showing all tests of that example') indent = '>>>' for line in example.source.splitlines(): lines.append('??? %s %s' % (indent, line)) indent = '...' if excinfo.errisinstance(doctest.DocTestFailure): lines += checker.output_difference(example, doctestfailure.got, REPORT_UDIFF).split("\n") else: inner_excinfo = py.code.ExceptionInfo(excinfo.value.exc_info) lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)] lines += py.std.traceback.format_exception(*excinfo.value.exc_info) return ReprFailDoctest(reprlocation, lines) else: return super(DoctestItem, self).repr_failure(excinfo) def reportinfo(self): return self.fspath, None, "[doctest] %s" % self.name class DoctestTextfile(DoctestItem, pytest.File): def runtest(self): doctest = py.std.doctest # satisfy `FixtureRequest` constructor... self.funcargs = {} fm = self.session._fixturemanager def func(): pass self._fixtureinfo = fm.getfixtureinfo(node=self, func=func, cls=None, funcargs=False) fixture_request = FixtureRequest(self) fixture_request._fillfixtures() failed, tot = doctest.testfile( str(self.fspath), module_relative=False, optionflags=doctest.ELLIPSIS, extraglobs=dict(getfixture=fixture_request.getfuncargvalue), raise_on_error=True, verbose=0) class DoctestModule(pytest.File): def collect(self): doctest = py.std.doctest if self.fspath.basename == "conftest.py": module = self.config._conftest.importconftest(self.fspath) else: module = self.fspath.pyimport() # satisfy `FixtureRequest` constructor... self.funcargs = {} self._fixtureinfo = FuncFixtureInfo((), [], {}) fixture_request = FixtureRequest(self) doctest_globals = dict(getfixture=fixture_request.getfuncargvalue) # uses internal doctest module parsing mechanism finder = doctest.DocTestFinder() runner = doctest.DebugRunner(verbose=0, optionflags=doctest.ELLIPSIS) for test in finder.find(module, module.__name__, extraglobs=doctest_globals): if test.examples: # skip empty doctests yield DoctestItem(test.name, self, runner, test) pytest-2.5.1/tox.ini0000664000175000017500000000467512254002202013753 0ustar hpkhpk00000000000000[tox] distshare={homedir}/.tox/distshare envlist=flakes,py26,py27,pypy,py27-pexpect,py33-pexpect,py27-nobyte,py32,py33,py27-xdist,py33-xdist,trial [testenv] changedir=testing commands= py.test --lsof -rfsxX --junitxml={envlogdir}/junit-{envname}.xml [] deps= nose [testenv:genscript] changedir=. commands= py.test --genscript=pytest1 [testenv:flakes] changedir= deps = pytest-flakes>=0.2 commands = py.test --flakes -m flakes _pytest testing [testenv:py27-xdist] changedir=. basepython=python2.7 deps=pytest-xdist mock nose commands= py.test -n3 -rfsxX \ --junitxml={envlogdir}/junit-{envname}.xml testing [testenv:py33-xdist] changedir=. basepython=python3.3 deps={[testenv:py27-xdist]deps} commands= py.test -n3 -rfsxX \ --junitxml={envlogdir}/junit-{envname}.xml testing [testenv:py27-pexpect] changedir=testing basepython=python2.7 deps=pexpect commands= py.test -rfsxX test_pdb.py test_terminal.py test_unittest.py [testenv:py33-pexpect] changedir=testing basepython=python2.7 deps={[testenv:py27-pexpect]deps} commands= py.test -rfsxX test_pdb.py test_terminal.py test_unittest.py [testenv:py27-nobyte] changedir=. basepython=python2.7 deps=pytest-xdist distribute=true setenv= PYTHONDONTWRITEBYTECODE=1 commands= py.test -n3 -rfsxX \ --junitxml={envlogdir}/junit-{envname}.xml {posargs:testing} [testenv:trial] changedir=. deps=twisted commands= py.test -rsxf \ --junitxml={envlogdir}/junit-{envname}.xml {posargs:testing/test_unittest.py} [testenv:doctest] changedir=. commands=py.test --doctest-modules _pytest deps= [testenv:doc] basepython=python changedir=doc/en deps=sphinx PyYAML commands= make clean make html [testenv:regen] basepython=python changedir=doc/en deps=sphinx PyYAML commands= rm -rf /tmp/doc-exec* #pip install pytest==2.3.4 make regen [testenv:py31] deps=nose>=1.0 [testenv:py31-xdist] deps=pytest-xdist commands= py.test -n3 -rfsxX \ --junitxml={envlogdir}/junit-{envname}.xml [] [testenv:jython] changedir=testing commands= {envpython} {envbindir}/py.test-jython \ -rfsxX --junitxml={envlogdir}/junit-{envname}2.xml [] [pytest] minversion=2.0 plugins=pytester #--pyargs --doctest-modules --ignore=.tox addopts= -rxsX rsyncdirs=tox.ini pytest.py _pytest testing python_files=test_*.py *_test.py testing/*/*.py python_classes=Test Acceptance python_functions=test pep8ignore = E401 E225 E261 E128 E124 E302 norecursedirs = .tox ja .hg pytest-2.5.1/setup.cfg0000664000175000017500000000026412254002202014247 0ustar hpkhpk00000000000000[build_sphinx] source-dir = doc/en/ build-dir = doc/build all_files = 1 [upload_sphinx] upload-dir = doc/en/build/html [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 pytest-2.5.1/MANIFEST.in0000664000175000017500000000015612254002202014164 0ustar hpkhpk00000000000000include CHANGELOG include README.rst include setup.py include tox.ini include LICENSE graft doc graft testing