pax_global_header00006660000000000000000000000064141250252640014513gustar00rootroot0000000000000052 comment=654f1b6220978e428c5b45a308db6a994208f8f0 testfixtures-6.18.3/000077500000000000000000000000001412502526400143635ustar00rootroot00000000000000testfixtures-6.18.3/.carthorse.yml000066400000000000000000000003421412502526400171550ustar00rootroot00000000000000carthorse: version-from: setup.py tag-format: "{version}" when: - version-not-tagged actions: - run: "sudo pip install -e .[build]" - run: "twine upload -u __token__ -p $PYPI_TOKEN dist/*" - create-tag testfixtures-6.18.3/.circleci/000077500000000000000000000000001412502526400162165ustar00rootroot00000000000000testfixtures-6.18.3/.circleci/config.yml000066400000000000000000000067521412502526400202200ustar00rootroot00000000000000version: 2.1 orbs: python: cjw296/python-ci@2 jobs: check-package: parameters: image: type: string extra_package: type: string default: "" imports: type: string default: "testfixtures" docker: - image: << parameters.image >> steps: - python/check-package: package: "testfixtures" extra_packages: << parameters.extra_package >> test: - run: name: "Check Imports" command: python -c "import << parameters.imports >>" common: &common jobs: - python/pip-run-tests: name: python27 image: circleci/python:2.7 - python/pip-run-tests: name: python36 # so we test the mock monkey patches: image: circleci/python:3.6.6 - python/pip-run-tests: name: python37 image: circleci/python:3.7 - python/pip-run-tests: name: python38 image: circleci/python:3.8 - python/pip-run-tests: name: python39 image: circleci/python:3.9 - python/pip-run-tests: name: python39-mock-backport image: circleci/python:3.9 extra_packages: "mock" - python/pip-run-tests: name: python27-django-1-9 image: circleci/python:2.7 extra_packages: "'django<1.10'" - python/pip-run-tests: name: python36-django-1-11 image: circleci/python:3.6 extra_packages: "'django<1.12'" - python/pip-run-tests: name: python39-django-latest image: circleci/python:3.9 - python/coverage: name: coverage image: circleci/python:3.7 requires: - python27 - python36 - python37 - python38 - python39 - python39-mock-backport - python27-django-1-9 - python36-django-1-11 - python39-django-latest - python/pip-docs: name: docs requires: - coverage - python/pip-setuptools-build-package: name: package requires: - docs filters: branches: only: master - check-package: name: check-package-python27 image: circleci/python:2.7 requires: - package - check-package: name: check-package-python39 image: circleci/python:3.9 requires: - package - check-package: name: check-package-python27-mock image: circleci/python:2.7 extra_package: mock imports: "testfixtures, testfixtures.mock" requires: - package - check-package: name: check-package-python39-mock image: circleci/python:3.9 extra_package: mock imports: "testfixtures, testfixtures.mock" requires: - package - check-package: name: check-package-python39-django image: circleci/python:3.9 extra_package: django imports: "testfixtures, testfixtures.django" requires: - package - python/release: name: release config: .carthorse.yml requires: - check-package-python27 - check-package-python27-mock - check-package-python39 - check-package-python39-mock - check-package-python39-django workflows: push: <<: *common periodic: <<: *common triggers: - schedule: cron: "0 1 * * *" filters: branches: only: master testfixtures-6.18.3/.coveragerc000066400000000000000000000004131412502526400165020ustar00rootroot00000000000000[run] source = testfixtures [report] exclude_lines = # the original exclude pragma: no cover # code executed only when tests fail 'No exception raised!' self\.fail\('Expected # example code that we don't want to cover with pragma statements guppy = testfixtures-6.18.3/.gitignore000066400000000000000000000003341412502526400163530ustar00rootroot00000000000000/.installed.cfg /bin/ /develop-eggs /dist /docs/_build /eggs /*.egg-info/ /parts/ *.pyc /.coverage /*.xml /.tox /htmlcov /include /lib /local /man /.Python desc.html pip-selfcheck.json .coverage.* .cache .pytest* /build testfixtures-6.18.3/.readthedocs.yml000066400000000000000000000002261412502526400174510ustar00rootroot00000000000000version: 2 python: version: 3.7 install: - method: pip path: . extra_requirements: - docs sphinx: fail_on_warning: true testfixtures-6.18.3/CHANGELOG.rst000066400000000000000000001057411412502526400164140ustar00rootroot00000000000000Changes ======= 6.18.3 (29 Sep 2021) -------------------- - Fix bug when using :func:`compare` on two regular expressions that have very long patterns. Thanks to Christoph Ludwig for the report. 6.18.2 (21 Sep 2021) -------------------- - Fix bug that meant :class:`LogCapture` didn't preserve or provide a clean testing environment for filters. Thanks to Jesse Rittner for the fix. 6.18.1 (20 Aug 2021) -------------------- - Fix bug when showing differences between mappings found by :func:`compare` when mismatching values contained the same number more than once. 6.18.0 (15 Jul 2021) -------------------- - Add support for lazy resolution of ``prefix`` and ``suffix`` when using :func:`compare`. 6.17.1 (14 Jan 2020) -------------------- - Fix bug where bug where duplicated entries in an ordered but partial :class:`SequenceComparison` could result in a failed match. 6.17.0 (16 Dec 2020) -------------------- - Add simpler flag support to :class:`StringComparison`. - Fix deprecation warning about invalid escape sequence. Thanks to Wim Glenn for the deprecation warning fix. 6.16.0 (9 Dec 2020) ------------------- - Simplify and clarify the documentation of timezones when using :class:`test_datetime`. - :doc:`api` has been re-arranged to make it easier to browse. - The ``strict`` parameter to :class:`Comparison` has been deprecated in favour of ``partial``. - Add :class:`SequenceComparison`, :class:`Subset` and :class:`Permutation` objects. - Add :class:`MappingComparison` objects. - Officially support Python 3.9. 6.15.0 (9 Oct 2020) ------------------- - Add support to :class:`LogCapture` for making sure log entries above a specified level have been checked. Thanks to Zoltan Farkas for the implementation. 6.14.2 (4 Sep 2020) ------------------- - Fix bug where ``ignore_eq`` had no effect on nested objects when using :func:`compare`. Thanks to Grégoire Payen de La Garanderie for the fix. 6.14.1 (20 Apr 2020) -------------------- - Fix bugs in comparison of :func:`~unittest.mock.call` objects where the :func:`repr` of the :func:`~unittest.mock.call` arguments were the same even when their attributes were not. 6.14.0 (24 Feb 2020) -------------------- - Add support for non-deterministic logging order when using :meth:`twisted.LogCapture`. 6.13.1 (20 Feb 2020) -------------------- - Fix for using :func:`compare` to compare two-element :func:`~unittest.mock.call` objects. Thanks to Daniel Fortunov for the fix. 6.13.0 (18 Feb 2020) -------------------- - Allow any attributes that need to be ignored to be specified directly when calling :func:`~testfixtures.comparison.compare_object`. This is handy when writing comparers for :func:`compare`. 6.12.1 (16 Feb 2020) -------------------- - Fix a bug that occured when using :func:`compare` to compare a string with a slotted object that had the same :func:`repr` as the string. 6.12.0 (6 Feb 2020) ------------------- - Add support for ``universal_newlines``, ``text``, ``encoding`` and ``errors`` to :class:`popen.MockPopen`, but only for Python 3. 6.11.0 (29 Jan 2020) -------------------- - :class:`decimal.Decimal` now has better representation when :func:`compare` displays a failed comparison, particularly on Python 2. - Add support to :func:`compare` for explicitly naming objects to be compared as ``x`` and ``y``. This allows symmetry with the ``x_label`` and ``y_label`` parameters that are now documented. - Restore ability for :class:`Comparison` to compare properties and methods, although these uses are not recommended. Thanks to Daniel Fortunov for all of the above. 6.10.3 (22 Nov 2019) -------------------- - Fix bug where new-style classes had their attributes checked with :func:`compare` even when they were of different types. 6.10.2 (15 Nov 2019) -------------------- - Fix bugs in :func:`compare` when comparing objects which have both ``__slots__`` and a ``__dict__``. 6.10.1 (1 Nov 2019) ------------------- - Fix edge case where string interning made dictionary comparison output much less useful. 6.10.0 (19 Jun 2019) -------------------- - Better feedback where objects do not :func:`compare` equal but do have the same representation. 6.9.0 (10 Jun 2019) ------------------- - Fix deprecation warning relating to :func:`getargspec`. - Improve :doc:`mocking ` docs. - Added ``strip_whitespace`` option to :class:`OutputCapture`. - When ``separate`` is used with :class:`OutputCapture`, differences in ``stdout`` and ``stderr`` are now given in the same :class:`AssertionError`. - :class:`ShouldRaise` no longer catches exceptions that are not of the required type. - Fixed a problem that resulted in unhelpful :func:`compare` failures when :func:`~unittest.mock.call` was involved and Python 3.6.7 was used. Thanks to Łukasz Rogalski for the deprecation warning fix. Thanks to Wim Glenn for the :class:`ShouldRaise` idea. 6.8.2 (4 May 2019) ------------------ - Fix handling of the latest releases of the :mod:`mock` backport. 6.8.1 (2 May 2019) ------------------ - Fix bogus import in :class:`OutputCapture`. 6.8.0 (2 May 2019) ------------------ - Allow :class:`OutputCapture` to capture the underlying file descriptors for :attr:`sys.stdout` and :attr:`sys.stderr`. 6.7.1 (29 Apr 2019) ------------------- - Silence :class:`DeprecationWarning` relating to ``collections.abc`` on Python 3.7. Thanks to Tom Hendrikx for the fix. 6.7.0 (11 Apr 2019) ------------------- - Add :meth:`twisted.LogCapture.raise_logged_failure` debugging helper. 6.6.2 (22 Mar 2019) ------------------- - :meth:`popen.MockPopen.set_command` is now symmetrical with :class:`popen.MockPopen` process instantiation in that both can be called with either lists or strings, in the same way as :class:`subprocess.Popen`. 6.6.1 (13 Mar 2019) ------------------- - Fixed bugs where using :attr:`not_there` to ensure a key or attribute was not there but would be set by a test would result in the test attribute or key being left behind. - Add support for comparing :func:`~functools.partial` instances and fix comparison of functions and other objects where ``vars()`` returns an empty :class:`dict`. 6.6.0 (22 Feb 2019) ------------------- - Add the ability to ignore attributes of particular object types when using :func:`compare`. 6.5.2 (18 Feb 2019) ------------------- - Fix bug when :func:`compare` was used with objects that had ``__slots__`` inherited from a base class but where their ``__slots__`` was an empty sequence. 6.5.1 (18 Feb 2019) ------------------- - Fix bug when :func:`compare` was used with objects that had ``__slots__`` inherited from a base class. 6.5.0 (28 Jan 2019) ------------------- - Experimental support for making assertions about events logged with Twisted's logging framework. 6.4.3 (10 Jan 2019) ------------------- - Fix problems on Python 2 when the rolling backport of `mock`__ was not installed. __ https://mock.readthedocs.io 6.4.2 (9 Jan 2019) ------------------ - Fixed typo in the ``executable`` parameter name for :class:`~testfixtures.popen.MockPopen`. - Fixed :func:`~unittest.mock.call` patching to only patch when needed. - Fixed :func:`compare` with :func:`~unittest.mock.call` objects for the latest Python releases. 6.4.1 (24 Dec 2018) ------------------- - Fix bug when using :func:`unittest.mock.patch` and any of the testfixtures decorators at the same time and where the object being patched in was not hashable. 6.4.0 (19 Dec 2018) ------------------- - Add official support for Python 3.7. - Drop official support for Python 3.5. - Introduce a facade for :mod:`unittest.mock` at :mod:`testfixtures.mock`, including an important bug fix for :func:`~unittest.mock.call` objects. - Better feedback when :func:`~unittest.mock.call` comparisons fail when using :func:`compare`. - A re-working of :class:`~testfixtures.popen.MockPopen` to enable it to handle multiple processes being active at the same time. - Fixes to :doc:`datetime` documentation. Thanks to Augusto Wagner Andreoli for his work on the :doc:`datetime` documentation. 6.3.0 (4 Sep 2018) ------------------ - Allow the behaviour specified with :meth:`~testfixtures.popen.MockPopen.set_command` to be a callable meaning that mock behaviour can now be dynamic based on the command executed and whatever was sent to ``stdin``. - Make :class:`~testfixtures.popen.MockPopen` more accurately reflect :class:`subprocess.Popen` on Python 3 by adding ``timeout`` parameters to :meth:`~testfixtures.popen.MockPopen.wait` and :meth:`~testfixtures.popen.MockPopen.communicate` along with some other smaller changes. Thanks to Tim Davies for his work on :class:`~testfixtures.popen.MockPopen`. 6.2.0 (14 Jun 2018) ------------------- - Better rendering of differences between :class:`bytes` when using :func:`compare` on Python 3. 6.1.0 (6 Jun 2018) ------------------ - Support filtering for specific warnings with :class:`ShouldWarn`. 6.0.2 (2 May 2018) ------------------ - Fix nasty bug where objects that had neither ``__dict__`` nor ``__slots__`` would always be considered equal by :func:`compare`. 6.0.1 (17 April 2018) --------------------- - Fix a bug when comparing equal :class:`set` instances using :func:`compare` when ``strict==True``. 6.0.0 (27 March 2018) --------------------- - :func:`compare` will now handle objects that do not natively support equality or inequality and will treat these objects as equal if they are of the same type and have the same attributes as found using :func:`vars` or ``__slots__``. This is a change in behaviour which, while it could conceivably cause tests that are currently failing to pass, should not cause any currently passing tests to start failing. - Add support for writing to the ``stdin`` of :class:`~testfixtures.popen.MockPopen` instances. - The default behaviour of :class:`~testfixtures.popen.MockPopen` can now be controlled by providing a callable. - :meth:`LogCapture.actual` is now part of the documented public interface. - Add :meth:`LogCapture.check_present` to help with assertions about a sub-set of messages logged along with those that are logged in a non-deterministic order. - :class:`Comparison` now supports objects with ``__slots__``. - Added :class:`ShouldAssert` as a simpler tool for testing test helpers. - Changed the internals of the various decorators testfixtures provides such that they can be used in conjunction with :func:`unittest.mock.patch` on the same test method or function. - Changed the internals of :class:`ShouldRaise` and :class:`Comparison` to make use of :func:`compare` and so provide nested comparisons with better feedback. This finally allows :class:`ShouldRaise` to deal with Django's :class:`~django.core.exceptions.ValidationError`. - Added handling of self-referential structures to :func:`compare` by treating all but the first occurence as equal. Another change needed to support Django's insane :class:`~django.core.exceptions.ValidationError`. Thanks to Hamish Downer and Tim Davies for their work on :class:`~testfixtures.popen.MockPopen`. Thanks to Wim Glenn and Daniel Fortunov for their help reviewing some of the more major changes. 5.4.0 (25 January 2018) ----------------------- - Add explicit support for :class:`~unittest.mock.Mock` to :func:`compare`. 5.3.1 (21 November 2017) ------------------------ - Fix missing support for the `start_new_session` parameter to :class:`~testfixtures.popen.MockPopen`. 5.3.0 (28 October 2017) ----------------------- - Add pytest traceback hiding for :meth:`TempDirectory.compare`. - Add warnings that :func:`log_capture`, :func:`tempdir` and :func:`replace` are not currently compatible with pytest's fixtures mechanism. - Better support for ``stdout`` or ``stderr`` *not* being set to ``PIPE`` when using :class:`~testfixtures.popen.MockPopen`. - Add support to :class:`~testfixtures.popen.MockPopen` for using :class:`subprocess.Popen` as a context manager in Python 3. - Add support to :class:`~testfixtures.popen.MockPopen` for ``stderr=STDOUT``. Thanks to Tim Davies for his work on :class:`~testfixtures.popen.MockPopen`. 5.2.0 (3 September 2017) ------------------------ - :class:`test_datetime` and :class:`test_time` now accept a :class:`~datetime.datetime` instance during instantiation to set the initial value. - :class:`test_date` now accepts a :class:`~datetime.date` instance during instantiation to set the initial value. - Relax the restriction on adding, setting or instantiating :class:`test_datetime` with `tzinfo` such that if the `tzinfo` matches the one configured, then it's okay to add. This means that you can now instantiate a :class:`test_datetime` with an existing :class:`~datetime.datetime` instance that has `tzinfo` set. - :func:`testfixtures.django.compare_model` now ignores :class:`many to many ` fields rather than blowing up on them. - Drop official support for Python 3.4, although things should continue to work. 5.1.1 (8 June 2017) ------------------- - Fix support for Django 1.9 in :func:`testfixtures.django.compare_model`. 5.1.0 (8 June 2017) ------------------- - Added support for including non-edit able fields to the :func:`comparer ` used by :func:`compare` when comparing :doc:`django ` :class:`~django.db.models.Model` instances. 5.0.0 (5 June 2017) ------------------- - Move from `nose`__ to `pytest`__ for running tests. __ http://nose.readthedocs.io/en/latest/ __ https://docs.pytest.org/en/latest/ - Switch from `manuel`__ to `sybil`__ for checking examples in documentation. This introduces a backwards incompatible change in that :class:`~testfixtures.sybil.FileParser` replaces the Manuel plugin that is no longer included. __ http://packages.python.org/manuel/ __ http://sybil.readthedocs.io/en/latest/ - Add a 'tick' method to :meth:`test_datetime `, :meth:`test_date ` and :meth:`test_time `, to advance the returned point in time, which is particularly helpful when ``delta`` is set to zero. 4.14.3 (15 May 2017) -------------------- - Fix build environment bug in ``.travis.yml`` that caused bad tarballs. 4.14.2 (15 May 2017) -------------------- - New release as it looks like Travis mis-built the 4.14.1 tarball. 4.14.1 (15 May 2017) -------------------- - Fix mis-merge. 4.14.0 (15 May 2017) -------------------- - Added helpers for testing with :doc:`django ` :class:`~django.db.models.Model` instances. 4.13.5 (1 March 2017) ------------------------- - :func:`compare` now correctly compares nested empty dictionaries when using ``ignore_eq=True``. 4.13.4 (6 February 2017) ------------------------ - Keep the `Reproducible Builds`__ guys happy. __ https://reproducible-builds.org/ 4.13.3 (13 December 2016) ------------------------- - :func:`compare` now better handles equality comparison with ``ignore_eq=True`` when either of the objects being compared cannot be hashed. 4.13.2 (16 November 2016) ------------------------- - Fixed a bug where a :class:`LogCapture` wouldn't be cleared when used via :func:`log_capture` on a base class and sub class execute the same test. Thanks to "mlabonte" for the bug report. 4.13.1 (2 November 2016) ------------------------ - When ``ignore_eq`` is used with :func:`compare`, fall back to comparing by hash if not type-specific comparer can be found. 4.13.0 (2 November 2016) ------------------------ - Add support to :func:`compare` for ignoring broken ``__eq__`` implementations. 4.12.0 (18 October 2016) ------------------------ - Add support for specifying a callable to extract rows from log records when using :class:`LogCapture`. - Add support for recursive comparison of log messages with :class:`LogCapture`. 4.11.0 (12 October 2016) ------------------------ - Allow the attributes returned in :meth:`LogCapture.actual` rows to be specified. - Allow a default to be specified for encoding in :meth:`TempDirectory.read` and :meth:`TempDirectory.write`. 4.10.1 (5 September 2016) ------------------------- - Better docs for :meth:`TempDirectory.compare`. - Remove the need for expected paths supplied to :meth:`TempDirectory.compare` to be in sorted order. - Document a good way of restoring ``stdout`` when in a debugger. - Fix handling of trailing slashes in :meth:`TempDirectory.compare`. Thanks to Maximilian Albert for the :meth:`TempDirectory.compare` docs. 4.10.0 (17 May 2016) -------------------- - Fixed examples in documentation broken in 4.5.1. - Add :class:`RangeComparison` for comparing against values that fall in a range. - Add :meth:`~popen.MockPopen.set_default` to :class:`~popen.MockPopen`. Thanks to Asaf Peleg for the :class:`RangeComparison` implementation. 4.9.1 (19 February 2016) ------------------------ - Fix for use with PyPy, broken since 4.8.0. Thanks to Nicola Iarocci for the pull request to fix. 4.9.0 (18 February 2016) ------------------------ - Added the `suffix` parameter to :func:`compare` to allow failure messages to include some additional context. - Update package metadata to indicate Python 3.5 compatibility. Thanks for Felix Yan for the metadata patch. Thanks to Wim Glenn for the suffix patch. 4.8.0 (2 February 2016) ----------------------- - Introduce a new :class:`Replace` context manager and make :class:`Replacer` callable. This gives more succinct and easy to read mocking code. - Add :class:`ShouldWarn` and :class:`ShouldNotWarn` context managers. 4.7.0 (10 December 2015) ------------------------ - Add the ability to pass ``raises=False`` to :func:`compare` to just get the resulting message back rather than having an exception raised. 4.6.0 (3 December 2015) ------------------------ - Fix a bug that mean symlinked directories would never show up when using :meth:`TempDirectory.compare` and friends. - Add the ``followlinks`` parameter to :meth:`TempDirectory.compare` to indicate that symlinked or hard linked directories should be recursed into when using ``recursive=True``. 4.5.1 (23 November 2015) ------------------------ - Switch from :class:`cStringIO` to :class:`StringIO` in :class:`OutputCapture` to better handle unicode being written to `stdout` or `stderr`. Thanks to "tell-k" for the patch. 4.5.0 (13 November 2015) ------------------------ - :class:`LogCapture`, :class:`OutputCapture` and :class:`TempDirectory` now explicitly show what is expected versus actual when reporting differences. Thanks to Daniel Fortunov for the pull request. 4.4.0 (1 November 2015) ----------------------- - Add support for labelling the arguments passed to :func:`compare`. - Allow ``expected`` and ``actual`` keyword parameters to be passed to :func:`compare`. - Fix ``TypeError: unorderable types`` when :func:`compare` found multiple differences in sets and dictionaries on Python 3. - Add official support for Python 3.5. - Drop official support for Python 2.6. Thanks to Daniel Fortunov for the initial ideas for explicit ``expected`` and ``actual`` support in :func:`compare`. 4.3.3 (15 September 2015) ------------------------- - Add wheel distribution to release. - Attempt to fix up various niggles from the move to Travis CI for doing releases. 4.3.2 (15 September 2015) ------------------------- - Fix broken 4.3.1 tag. 4.3.1 (15 September 2015) ------------------------- - Fix build problems introduced by moving the build process to Travis CI. 4.3.0 (15 September 2015) ------------------------- - Add :meth:`TempDirectory.compare` with a cleaner, more explicit API that allows comparison of only the files in a temporary directory. - Deprecate :meth:`TempDirectory.check`, :meth:`TempDirectory.check_dir` and :meth:`TempDirectory.check_all` - Relax absolute-path rules so that if it's inside the :class:`TempDirectory`, it's allowed. - Allow :class:`OutputCapture` to separately check output to ``stdout`` and ``stderr``. 4.2.0 (11 August 2015) ---------------------- - Add :class:`~testfixtures.popen.MockPopen`, a mock helpful when testing code that uses :class:`subprocess.Popen`. - :class:`ShouldRaise` now subclasses :class:`object`, so that subclasses of it may use :meth:`super()`. - Drop official support for Python 3.2. Thanks to BATS Global Markets for donating the code for :class:`~testfixtures.popen.MockPopen`. 4.1.2 (30 January 2015) ----------------------- - Clarify documentation for ``name`` parameter to :class:`LogCapture`. - :class:`ShouldRaise` now shows different output when two exceptions have the same representation but still differ. - Fix bug that could result in a :class:`dict` comparing equal to a :class:`list`. Thanks to Daniel Fortunov for the documentation clarification. 4.1.1 (30 October 2014) ----------------------- - Fix bug that prevented logger propagation to be controlled by the :class:`log_capture` decorator. Thanks to John Kristensen for the fix. 4.1.0 (14 October 2014) ----------------------- - Fix :func:`compare` bug when :class:`dict` instances with :class:`tuple` keys were not equal. - Allow logger propagation to be controlled by :class:`LogCapture`. - Enabled disabled loggers if a :class:`LogCapture` is attached to them. Thanks to Daniel Fortunov for the :func:`compare` fix. 4.0.2 (10 September 2014) ------------------------- - Fix "maximum recursion depth exceeded" when comparing a string with bytes that did not contain the same character. 4.0.1 (4 August 2014) --------------------- - Fix bugs when string compared equal and options to :func:`compare` were used. - Fix bug when strictly comparing two nested structures containing identical objects. 4.0.0 (22 July 2014) -------------------- - Moved from buildout to virtualenv for development. - The ``identity`` singleton is no longer needed and has been removed. - :func:`compare` will now work recursively on data structures for which it has registered comparers, giving more detailed feedback on nested data structures. Strict comparison will also be applied recursively. - Re-work the interfaces for using custom comparers with :func:`compare`. - Better feedback when comparing :func:`collections.namedtuple` instances. - Official support for Python 3.4. Thanks to Yevgen Kovalienia for the typo fix in :doc:`datetime`. 3.1.0 (25 May 2014) ------------------- - Added :class:`RoundComparison` helper for comparing numerics to a specific precision. - Added ``unless`` parameter to :class:`ShouldRaise` to cover some very specific edge cases. - Fix missing imports that showed up :class:`TempDirectory` had to do the "convoluted folder delete" dance on Windows. Thanks to Jon Thompson for the :class:`RoundComparison` implementation. Thanks to Matthias Lehmann for the import error reports. 3.0.2 (7 April 2014) -------------------- - Document :attr:`ShouldRaise.raised` and make it part of the official API. - Fix rare failures when cleaning up :class:`TempDirectory` instances on Windows. 3.0.1 (10 June 2013) -------------------- - Some documentation tweaks and clarifications. - Fixed a bug which masked exceptions when using :func:`compare` with a broken generator. - Fixed a bug when comparing a generator with a non-generator. - Ensure :class:`LogCapture` cleans up global state it may effect. - Fixed replacement of static methods using a :class:`Replacer`. 3.0.0 (5 March 2013) -------------------- - Added compatibility with Python 3.2 and 3.3. - Dropped compatibility with Python 2.5. - Removed support for the following obscure uses of :class:`should_raise`: .. invisible-code-block: python from testfixtures.mock import MagicMock should_raise = x = MagicMock() .. code-block:: python should_raise(x, IndexError)[1] should_raise(x, KeyError)['x'] - Dropped the `mode` parameter to :meth:`TempDirectory.read`. - :meth:`TempDirectory.makedir` and :meth:`TempDirectory.write` no longer accept a `path` parameter. - :meth:`TempDirectory.read` and :meth:`TempDirectory.write` now accept an `encoding` parameter to control how non-byte data is decoded and encoded respectively. - Added the `prefix` parameter to :func:`compare` to allow failure messages to be made more informative. - Fixed a problem when using sub-second deltas with :func:`test_time`. 2.3.5 (13 August 2012) ---------------------- - Fixed a bug in :func:`~testfixtures.comparison.compare_dict` that mean the list of keys that were the same was returned in an unsorted order. 2.3.4 (31 January 2012) ----------------------- - Fixed compatibility with Python 2.5 - Fixed compatibility with Python 2.7 - Development model moved to continuous integration using Jenkins. - Introduced `Tox`__ based testing to ensure packaging and dependencies are as expected. __ http://tox.testrun.org/latest/ - 100% line and branch coverage with tests. - Mark :class:`test_datetime`, :class:`test_date` and :class:`test_time` such that nose doesn't mistake them as tests. 2.3.3 (12 December 2011) ------------------------- - Fixed a bug where when a target was replaced more than once using a single :class:`Replacer`, :meth:`~Replacer.restore` would not correctly restore the original. 2.3.2 (10 November 2011) ------------------------- - Fixed a bug where attributes and keys could not be removed by a :class:`Replacer` as described in :ref:`removing_attr_and_item` if the attribute or key might not be there, such as where a test wants to ensure an ``os.environ`` variable is not set. 2.3.1 (8 November 2011) ------------------------- - Move to use `nose `__ for running the testfixtures unit tests. - Fixed a bug where :meth:`tdatetime.now` returned an instance of the wrong type when `tzinfo` was passed in :ref:`strict mode `. 2.3.0 (11 October 2011) ------------------------- - :class:`Replacer`, :class:`TempDirectory`, :class:`LogCapture` and :class:`~components.TestComponents` instances will now warn if the process they are created in exits without them being cleaned up. Instances of these classes should be cleaned up at the end of each test and these warnings serve to point to a cause for possible mysterious failures elsewhere. 2.2.0 (4 October 2011) ------------------------- - Add a :ref:`strict mode ` to :class:`test_datetime` and :class:`test_date`. When used, instances returned from the mocks are instances of those mocks. The default behaviour is now to return instances of the real :class:`~datetime.datetime` and :class:`~datetime.date` classes instead, which is usually much more useful. 2.1.0 (29 September 2011) ------------------------- - Add a :ref:`strict mode ` to :func:`compare`. When used, it ensures that the values compared are not only equal but also of the same type. This mode is not used by default, and the default mode restores the more commonly useful functionality where values of similar types but that aren't equal give useful feedback about differences. 2.0.1 (23 September 2011) ------------------------- - add back functionality to allow comparison of generators with non-generators. 2.0.0 (23 September 2011) ------------------------- - :func:`compare` now uses a registry of comparers that can be modified either by passing a `registry` option to :func:`compare` or, globally, using the :func:`~comparison.register` function. - added a comparer for :class:`set` instances to :func:`compare`. - added a new `show_whitespace` parameter to :func:`~comparison.compare_text`, the comparer used when comparing strings and unicodes with :func:`compare`. - The internal queue for :class:`test_datetime` is now considered to be in local time. This has implication on the values returned from both :meth:`~tdatetime.now` and :meth:`~tdatetime.utcnow` when `tzinfo` is passed to the :class:`test_datetime` constructor. - :meth:`set` and :meth:`add` on :class:`test_date`, :class:`test_datetime` and :class:`test_time` now accept instances of the appropriate type as an alternative to just passing in the parameters to create the instance. - Refactored the monolithic ``__init__.py`` into modules for each type of functionality. 1.12.0 (16 August 2011) ----------------------- - Add a :attr:`~OutputCapture.captured` property to :class:`OutputCapture` so that more complex assertion can be made about the output that has been captured. - :class:`OutputCapture` context managers can now be temporarily disabled using their :meth:`~OutputCapture.disable` method. - Logging can now be captured only when it exceeds a specified logging level. - The handling of timezones has been reworked in both :func:`test_datetime` and :func:`test_time`. This is not backwards compatible but is much more useful and correct. 1.11.3 (3 August 2011) ---------------------- - Fix bugs where various :meth:`test_date`, :meth:`test_datetime` and :meth:`test_time` methods didn't accept keyword parameters. 1.11.2 (28 July 2011) --------------------- - Fix for 1.10 and 1.11 releases that didn't include non-.py files as a result of the move from subversion to git. 1.11.1 (28 July 2011) --------------------- - Fix bug where :meth:`tdatetime.now` didn't accept the `tz` parameter that :meth:`datetime.datetime.now` did. 1.11.0 (27 July 2011) --------------------- - Give more useful output when comparing dicts and their subclasses. - Turn :class:`should_raise` into a decorator form of :class:`ShouldRaise` rather than the rather out-moded wrapper function that it was. 1.10.0 (19 July 2011) --------------------- - Remove dependency on :mod:`zope.dottedname`. - Implement the ability to mock out :class:`dict` and :class:`list` items using :class:`~testfixtures.Replacer` and :func:`~testfixtures.replace`. - Implement the ability to remove attributes and :class:`dict` items using :class:`~testfixtures.Replacer` and :func:`~testfixtures.replace`. 1.9.2 (20 April 2011) --------------------- - Fix for issue #328: :meth:`~tdatetime.utcnow` of :func:`test_datetime` now returns items from the internal queue in the same way as :meth:`~tdatetime.now`. 1.9.1 (11 March 2011) ------------------------ - Fix bug when :class:`ShouldRaise` context managers incorrectly reported what exception was incorrectly raised when the incorrectly raised exception was a :class:`KeyError`. 1.9.0 (11 February 2011) ------------------------ - Added :class:`~components.TestComponents` for getting a sterile registry when testing code that uses :mod:`zope.component`. 1.8.0 (14 January 2011) ----------------------- - Added full Sphinx-based documentation. - added a `Manuel `__ plugin for reading and writing files into a :class:`TempDirectory`. - any existing log handlers present when a :class:`LogCapture` is installed for a particular logger are now removed. - fix the semantics of :class:`should_raise`, which should always expect an exception to be raised! - added the :class:`ShouldRaise` context manager. - added recursive support to :meth:`TempDirectory.listdir` and added the new :meth:`TempDirectory.check_all` method. - added support for forward-slash separated paths to all relevant :class:`TempDirectory` methods. - added :meth:`TempDirectory.getpath` method. - allow files and directories to be ignored by a regular expression specification when using :class:`TempDirectory`. - made :class:`Comparison` objects work when the attributes expected might be class attributes. - re-implement :func:`test_time` so that it uses the correct way to get timezone-less time. - added :meth:`~tdatetime.set` along with `delta` and `delta_type` parameters to :func:`test_date`, :func:`test_datetime` and :func:`test_time`. - allow the date class returned by the :meth:`tdatetime.date` method to be configured. - added the :class:`OutputCapture` context manager. - added the :class:`StringComparison` class. - added options to ignore trailing whitespace and blank lines when comparing multi-line strings with :func:`compare`. - fixed bugs in the handling of some exception types when using :class:`Comparison`, :class:`ShouldRaise` or :class:`should_raise`. - changed :func:`wrap` to correctly set __name__, along with some other attributes, which should help when using the decorators with certain testing frameworks. 1.7.0 (20 January 2010) ----------------------- - fixed a bug where the @replace decorator passed a classmethod rather than the replacment to the decorated callable when replacing a classmethod - added set method to test_date, test_datetime and test_time to allow setting the parameters for the next instance to be returned. - added delta and delta_type parameters to test_date,test_datetime and test_time to control the intervals between returned instances. 1.6.2 (23 September 2009) ------------------------- - changed Comparison to use __eq__ and __ne__ instead of the deprecated __cmp__ - documented that order matters when using Comparisons with objects that implement __eq__ themselves, such as instances of Django models. 1.6.1 (06 September 2009) ------------------------- - @replace and Replacer.replace can now replace attributes that may not be present, provided the `strict` parameter is passed as False. - should_raise now catches BaseException rather than Exception so raising of SystemExit and KeyboardInterrupt can be tested. 1.6.0 (09 May 2009) ------------------- - added support for using TempDirectory, Replacer and LogCapture as context managers. - fixed test failure in Python 2.6. 1.5.4 (11 Feb 2009) ------------------- - fix bug where should_raise didn't complain when no exception was raised but one was expected. - clarified that the return of a should_raise call will be None in the event that an exception is raised but no expected exception is specified. 1.5.3 (17 Dec 2008) ------------------- - should_raise now supports methods other than __call__ 1.5.2 (14 Dec 2008) ------------------- - added `makedir` and `check_dir` methods to TempDirectory and added support for sub directories to `read` and `write` 1.5.1 (12 Dec 2008) ------------------- - added `path` parameter to `write` method of TempDirectory so that the full path of the file written can be easilly obtained 1.5.0 (12 Dec 2008) ------------------- - added handy `read` and `write` methods to TempDirectory for creating and reading files in the temporary directory - added support for rich comparison of objects that don't support vars() 1.4.0 (12 Dec 2008) ------------------- - improved representation of failed Comparison - improved representation of failed compare with sequences 1.3.1 (10 Dec 2008) ------------------- - fixed bug that occurs when directory was deleted by a test that use tempdir or TempDirectory 1.3.0 (9 Dec 2008) ------------------ - added TempDirectory helper - added tempdir decorator 1.2.0 (3 Dec 2008) ------------------ - LogCaptures now auto-install on creation unless configured otherwise - LogCaptures now have a clear method - LogCaptures now have a class method uninstall_all that uninstalls all instances of LogCapture. Handy for a tearDown method in doctests. 1.1.0 (3 Dec 2008) ------------------ - add support to Comparisons for only comparing some attributes - move to use zope.dottedname 1.0.0 (26 Nov 2008) ------------------- - Initial Release testfixtures-6.18.3/LICENSE.txt000066400000000000000000000021331412502526400162050ustar00rootroot00000000000000Copyright (c) 2008-2015 Simplistix Ltd Copyright (c) 2015-2020 Chris Withers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. testfixtures-6.18.3/README.rst000066400000000000000000000033451412502526400160570ustar00rootroot00000000000000Testfixtures ============ |CircleCI|_ |Docs|_ .. |CircleCI| image:: https://circleci.com/gh/simplistix/testfixtures/tree/master.svg?style=shield .. _CircleCI: https://circleci.com/gh/simplistix/testfixtures/tree/master .. |Docs| image:: https://readthedocs.org/projects/testfixtures/badge/?version=latest .. _Docs: http://testfixtures.readthedocs.org/en/latest/ Testfixtures is a collection of helpers and mock objects that are useful when writing automated tests in Python. The areas of testing this package can help with are listed below: **Comparing objects and sequences** Better feedback when the results aren't as you expected along with support for comparison of objects that don't normally support comparison and comparison of deeply nested datastructures. **Mocking out objects and methods** Easy to use ways of stubbing out objects, classes or individual methods for both doc tests and unit tests. Special helpers are provided for testing with dates and times. **Testing logging** Helpers for capturing logging output in both doc tests and unit tests. **Testing stream output** Helpers for capturing stream output, such as that from print statements, and making assertion about it. **Testing with files and directories** Support for creating and checking files and directories in sandboxes for both doc tests and unit tests. **Testing exceptions and warnings** Easy to use ways of checking that a certain exception is raised, or a warning is issued, even down the to the parameters provided. **Testing subprocesses** A handy mock for testing code that uses subprocesses. **Testing when using django** Helpers for comparing instances of django models. **Testing when using zope.component** An easy to use sterile component registry. testfixtures-6.18.3/docs/000077500000000000000000000000001412502526400153135ustar00rootroot00000000000000testfixtures-6.18.3/docs/Makefile000066400000000000000000000050201412502526400167500ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD ?= sphinx-build PAPER = # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf _build/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html @echo @echo "Build finished. The HTML pages are in _build/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) _build/dirhtml @echo @echo "Build finished. The HTML pages are in _build/dirhtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) _build/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in _build/htmlhelp." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex @echo @echo "Build finished; the LaTeX files are in _build/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes @echo @echo "The overview file is in _build/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in _build/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) _build/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in _build/doctest/output.txt." testfixtures-6.18.3/docs/api.txt000066400000000000000000000332201412502526400166250ustar00rootroot00000000000000API Reference ============= .. currentmodule:: testfixtures Comparisons ----------- .. autofunction:: compare(x, y, prefix=None, suffix=None, raises=True, recursive=True, strict=False, comparers=None, **kw) .. autoclass:: Comparison .. autoclass:: MappingComparison :members: .. autoclass:: Permutation :members: .. autoclass:: RoundComparison :members: .. autoclass:: RangeComparison :members: .. autoclass:: SequenceComparison :members: .. autoclass:: Subset :members: .. autoclass:: StringComparison :members: testfixtures.comparison ~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: testfixtures.comparison .. autofunction:: testfixtures.comparison.register .. autofunction:: testfixtures.comparison.compare_simple .. autofunction:: testfixtures.comparison.compare_object .. autofunction:: testfixtures.comparison.compare_exception .. autofunction:: testfixtures.comparison.compare_with_type .. autofunction:: testfixtures.comparison.compare_sequence .. autofunction:: testfixtures.comparison.compare_generator .. autofunction:: testfixtures.comparison.compare_tuple .. autofunction:: testfixtures.comparison.compare_dict .. autofunction:: testfixtures.comparison.compare_set .. autofunction:: testfixtures.comparison.compare_text .. currentmodule:: testfixtures Capturing --------- .. autoclass:: LogCapture :members: .. autofunction:: log_capture .. autoclass:: OutputCapture :members: Mocking ------- .. autoclass:: Replace :members: .. autoclass:: Replacer :members: .. autofunction:: replace .. function:: test_date(year=2001, month=1, day=1, delta=None, delta_type='days', strict=False) A function that returns a mock object that can be used in place of the :class:`datetime.date` class but where the return value of :meth:`~datetime.date.today` can be controlled. If a single positional argument of ``None`` is passed, then the queue of dates to be returned will be empty and you will need to call :meth:`~tdate.set` or :meth:`~tdate.add` before calling :meth:`~tdate.today`. If an instance of :class:`~datetime.date` is passed as a single positional argument, that will be used as the first date returned by :meth:`~datetime.date.today` :param year: An optional year used to create the first date returned by :meth:`~datetime.date.today`. :param month: An optional month used to create the first date returned by :meth:`~datetime.date.today`. :param day: An optional day used to create the first date returned by :meth:`~datetime.date.today`. :param delta: The size of the delta to use between values returned from :meth:`~datetime.date.today`. If not specified, it will increase by 1 with each call to :meth:`~datetime.date.today`. :param delta_type: The type of the delta to use between values returned from :meth:`~datetime.date.today`. This can be any keyword parameter accepted by the :class:`~datetime.timedelta` constructor. :param strict: If ``True``, calling the mock class and any of its methods will result in an instance of the mock being returned. If ``False``, the default, an instance of :class:`~datetime.date` will be returned instead. The mock returned will behave exactly as the :class:`datetime.date` class with the exception of the following members: .. method:: tdate.add(*args, **kw) This will add the :class:`datetime.date` created from the supplied parameters to the queue of dates to be returned by :meth:`~tdate.today`. An instance of :class:`~datetime.date` may also be passed as a single positional argument. .. method:: tdate.set(*args, **kw) This will set the :class:`datetime.date` created from the supplied parameters as the next date to be returned by :meth:`~tdate.today`, regardless of any dates in the queue. An instance of :class:`~datetime.date` may also be passed as a single positional argument. .. method:: tdate.tick(*args, **kw) This method should be called either with a :class:`~datetime.timedelta` as a positional argument, or with keyword parameters that will be used to construct a :class:`~datetime.timedelta`. The :class:`~datetime.timedelta` will be used to advance the next date to be returned by :meth:`~tdate.today`. .. classmethod:: tdate.today() This will return the next supplied or calculated date from the internal queue, rather than the actual current date. .. function:: test_datetime(year=2001, month=1, day=1, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, delta=None, delta_type='seconds', date_type=datetime.date, strict=False) A function that returns a mock object that can be used in place of the :class:`datetime.datetime` class but where the return value of :meth:`~tdatetime.now` can be controlled. If a single positional argument of ``None`` is passed, then the queue of datetimes to be returned will be empty and you will need to call :meth:`~tdatetime.set` or :meth:`~tdatetime.add` before calling :meth:`~tdatetime.now` or :meth:`~tdatetime.utcnow`. If an instance of :class:`~datetime.datetime` is passed as a single positional argument, that will be used as the first date returned by :meth:`~tdatetime.now` :param year: An optional year used to create the first datetime returned by :meth:`~tdatetime.now`. :param month: An optional month used to create the first datetime returned by :meth:`~tdatetime.now`. :param day: An optional day used to create the first datetime returned by :meth:`~tdatetime.now`. :param hour: An optional hour used to create the first datetime returned by :meth:`~tdatetime.now`. :param minute: An optional minute used to create the first datetime returned by :meth:`~tdatetime.now`. :param second: An optional second used to create the first datetime returned by :meth:`~tdatetime.now`. :param microsecond: An optional microsecond used to create the first datetime returned by :meth:`~tdatetime.now`. :param tzinfo: An optional :class:`datetime.tzinfo`, see :ref:`timezones`. :param delta: The size of the delta to use between values returned from :meth:`~tdatetime.now`. If not specified, it will increase by 1 with each call to :meth:`~tdatetime.now`. :param delta_type: The type of the delta to use between values returned from :meth:`~tdatetime.now`. This can be any keyword parameter accepted by the :class:`~datetime.timedelta` constructor. :param date_type: The type to use for the return value of the :meth:`~datetime.datetime.date` method. This can help with gotchas that occur when type checking if performed on values returned by the mock's :meth:`~datetime.datetime.date` method. :param strict: If ``True``, calling the mock class and any of its methods will result in an instance of the mock being returned. If ``False``, the default, an instance of :class:`~datetime.datetime` will be returned instead. The mock returned will behave exactly as the :class:`datetime.datetime` class with the exception of the following members: .. method:: tdatetime.add(*args, **kw) This will add the :class:`datetime.datetime` created from the supplied parameters to the queue of datetimes to be returned by :meth:`~tdatetime.now` or :meth:`~tdatetime.utcnow`. An instance of :class:`~datetime.datetime` may also be passed as a single positional argument. .. method:: tdatetime.set(*args, *kw) This will set the :class:`datetime.datetime` created from the supplied parameters as the next datetime to be returned by :meth:`~tdatetime.now` or :meth:`~tdatetime.utcnow`, clearing out any datetimes in the queue. An instance of :class:`~datetime.datetime` may also be passed as a single positional argument. .. method:: tdatetime.tick(*args, **kw) This method should be called either with a :class:`~datetime.timedelta` as a positional argument, or with keyword parameters that will be used to construct a :class:`~datetime.timedelta`. The :class:`~datetime.timedelta` will be used to advance the next datetime to be returned by :meth:`~tdatetime.now` or :meth:`~tdatetime.utcnow`. .. classmethod:: tdatetime.now([tz]) :param tz: An optional timezone to apply to the returned time. If supplied, it must be an instance of a :class:`~datetime.tzinfo` subclass. This will return the next supplied or calculated datetime from the internal queue, rather than the actual current datetime. If `tz` is supplied, see :ref:`timezones`. .. classmethod:: tdatetime.utcnow() This will return the next supplied or calculated datetime from the internal queue, rather than the actual current UTC datetime. If you care about timezones, see :ref:`timezones`. .. classmethod:: tdatetime.date() This will return the date component of the current mock instance, but using the date type supplied when the mock class was created. .. function:: test_time(year=2001, month=1, day=1, hour=0, minute=0, second=0, microsecond=0, tzinfo=None, delta=None, delta_type='seconds') A function that returns a mock object that can be used in place of the :class:`time.time` function but where the return value can be controlled. If a single positional argument of ``None`` is passed, then the queue of times to be returned will be empty and you will need to call :meth:`~ttime.set` or :meth:`~ttime.add` before calling the mock. If an instance of :class:`~datetime.datetime` is passed as a single positional argument, that will be used to create the first time returned. :param year: An optional year used to create the first time returned. :param month: An optional month used to create the first time. :param day: An optional day used to create the first time. :param hour: An optional hour used to create the first time. :param minute: An optional minute used to create the first time. :param second: An optional second used to create the first time. :param microsecond: An optional microsecond used to create the first time. :param delta: The size of the delta to use between values returned. If not specified, it will increase by 1 with each call to the mock. :param delta_type: The type of the delta to use between values returned. This can be any keyword parameter accepted by the :class:`~datetime.timedelta` constructor. The mock additionally has the following methods available on it: .. method:: ttime.add(*args, **kw) This will add the time specified by the supplied parameters to the queue of times to be returned by calls to the mock. The parameters are the same as the :class:`datetime.datetime` constructor. An instance of :class:`~datetime.datetime` may also be passed as a single positional argument. .. method:: ttime.set(*args, **kw) This will set the time specified by the supplied parameters as the next time to be returned by a call to the mock, regardless of any times in the queue. The parameters are the same as the :class:`datetime.datetime` constructor. An instance of :class:`~datetime.datetime` may also be passed as a single positional argument. .. method:: ttime.tick(*args, **kw) This method should be called either with a :class:`~datetime.timedelta` as a positional argument, or with keyword parameters that will be used to construct a :class:`~datetime.timedelta`. The :class:`~datetime.timedelta` will be used to advance the next time to be returned by a call to the mock. testfixtures.mock ~~~~~~~~~~~~~~~~~ .. automodule:: testfixtures.mock testfixtures.popen ~~~~~~~~~~~~~~~~~~ .. automodule:: testfixtures.popen :members: .. currentmodule:: testfixtures Assertions ---------- .. autoclass:: ShouldRaise :members: .. autoclass:: should_raise .. autofunction:: ShouldAssert .. autoclass:: ShouldWarn :members: .. autoclass:: ShouldNotWarn :members: Resources --------- .. autoclass:: TempDirectory :members: .. autofunction:: tempdir .. autofunction:: generator Helpers and Constants --------------------- .. autofunction:: diff .. autofunction:: wrap .. data:: not_there A singleton used to represent the absence of a particular attribute. Framework Helpers ----------------- Framework-specific helpers provided by testfixtures. testfixtures.components ~~~~~~~~~~~~~~~~~~~~~~~ .. automodule:: testfixtures.components :member-order: bysource :members: testfixtures.django ~~~~~~~~~~~~~~~~~~~ .. automodule:: testfixtures.django :members: .. function:: compare(x, y, prefix=None, suffix=None, raises=True, recursive=True, strict=False, comparers=None, **kw) This is identical to :func:`compare`, but with ``ignore=True`` automatically set to make comparing django :class:`~django.db.models.Model` instances easier. testfixtures.sybil ~~~~~~~~~~~~~~~~~~ .. automodule:: testfixtures.sybil :member-order: bysource :members: testfixtures.twisted ~~~~~~~~~~~~~~~~~~~~ .. automodule:: testfixtures.twisted :member-order: bysource :members: testfixtures-6.18.3/docs/changes.txt000066400000000000000000000001001412502526400174530ustar00rootroot00000000000000 .. currentmodule:: testfixtures .. include:: ../CHANGELOG.rst testfixtures-6.18.3/docs/comparing.txt000066400000000000000000000700211412502526400200330ustar00rootroot00000000000000Comparing objects and sequences =============================== .. currentmodule:: testfixtures Python's :mod:`unittest` package often fails to give very useful feedback when comparing long sequences or chunks of text. It also has trouble dealing with objects that don't natively support comparison. The functions and classes described here alleviate these problems. The compare function -------------------- The :func:`compare` function can be used as a replacement for :meth:`~unittest.TestCase.assertEqual`. It raises an ``AssertionError`` when its parameters are not equal, which will be reported as a test failure: >>> from testfixtures import compare >>> compare(1, 2) Traceback (most recent call last): ... AssertionError: 1 != 2 However, it allows you to specify a prefix for the message to be used in the event of failure: >>> compare(1, 2, prefix='wrong number of orders') Traceback (most recent call last): ... AssertionError: wrong number of orders: 1 != 2 This is recommended as it makes the reason for the failure more apparent without having to delve into the code or tests. You can also optionally specify a suffix, which will be appended to the message on a new line: >>> compare(1, 2, suffix='(Except for very large values of 1)') Traceback (most recent call last): ... AssertionError: 1 != 2 (Except for very large values of 1) The expected and actual value can also be explicitly supplied, making it clearer as to what has gone wrong: >>> compare(expected=1, actual=2) Traceback (most recent call last): ... AssertionError: 1 (expected) != 2 (actual) The real strengths of this function come when comparing more complex data types. A number of common python data types will give more detailed output when a comparison fails as described below: sets ~~~~ Comparing sets that aren't the same will attempt to highlight where the differences lie: >>> compare(set([1, 2]), set([2, 3])) Traceback (most recent call last): ... AssertionError: set not as expected: in first but not second: [1] in second but not first: [3] dicts ~~~~~ Comparing dictionaries that aren't the same will attempt to highlight where the differences lie: >>> compare(dict(x=1, y=2, a=4), dict(x=1, z=3, a=5)) Traceback (most recent call last): ... AssertionError: dict not as expected: same: ['x'] in first but not second: 'y': 2 in second but not first: 'z': 3 values differ: 'a': 4 != 5 lists and tuples ~~~~~~~~~~~~~~~~ Comparing lists or tuples that aren't the same will attempt to highlight where the differences lie: >>> compare([1, 2, 3], [1, 2, 4]) Traceback (most recent call last): ... AssertionError: sequence not as expected: same: [1, 2] first: [3] second: [4] namedtuples ~~~~~~~~~~~ When two :func:`~collections.namedtuple` instances are compared, if they are of the same type, the description given will highlight which elements were the same and which were different: >>> from collections import namedtuple >>> TestTuple = namedtuple('TestTuple', 'x y z') >>> compare(TestTuple(1, 2, 3), TestTuple(1, 4, 3)) Traceback (most recent call last): ... AssertionError: TestTuple not as expected: same: ['x', 'z'] values differ: 'y': 2 != 4 generators ~~~~~~~~~~ When two generators are compared, they are both first unwound into tuples and those tuples are then compared. The :ref:`generator ` helper is useful for creating a generator to represent the expected results: >>> from testfixtures import generator >>> def my_gen(t): ... i = 0 ... while i>> compare(generator(1, 2, 3), my_gen(2)) Traceback (most recent call last): ... AssertionError: sequence not as expected: same: (1, 2) first: (3,) second: () .. warning:: If you wish to assert that a function returns a generator, say, for performance reasons, then you should use :ref:`strict comparison `. strings and unicodes ~~~~~~~~~~~~~~~~~~~~ Comparison of strings can be tricky, particularly when those strings contain multiple lines; spotting the differences between the expected and actual values can be hard. To help with this, long strings give a more helpful representation when comparison fails: >>> compare("1234567891011", "1234567789") Traceback (most recent call last): ... AssertionError: '1234567891011' != '1234567789' Likewise, multi-line strings give unified diffs when their comparison fails: >>> compare(""" ... This is line 1 ... This is line 2 ... This is line 3 ... """, ... """ ... This is line 1 ... This is another line ... This is line 3 ... """) Traceback (most recent call last): ... AssertionError: --- first +++ second @@ -1,5 +1,5 @@ This is line 1 - This is line 2 + This is another line This is line 3 Such comparisons can still be confusing as white space is taken into account. If you need to care about whitespace characters, you can make spotting the differences easier as follows: >>> compare("\tline 1\r\nline 2"," line1 \nline 2", show_whitespace=True) Traceback (most recent call last): ... AssertionError: --- first +++ second @@ -1,2 +1,2 @@ -'\tline 1\r\n' +' line1 \n' 'line 2' However, you may not care about some of the whitespace involved. To help with this, :func:`compare` has two options that can be set to ignore certain types of whitespace. If you wish to compare two strings that contain blank lines or lines containing only whitespace characters, but where you only care about the content, you can use the following: .. code-block:: python compare('line1\nline2', 'line1\n \nline2\n\n', blanklines=False) If you wish to compare two strings made up of lines that may have trailing whitespace that you don't care about, you can do so with the following: .. code-block:: python compare('line1\nline2', 'line1 \t\nline2 \n', trailing_whitespace=False) .. _comparer-objects: objects ~~~~~~~ Even if your objects do not natively support comparison, when they are compared they will be considered identical if they are of the same type and have identical attributes. Take this instances of this class as an example: .. code-block:: python from datetime import datetime class MyObject(object): def __init__(self, name): self.name = name def __repr__(self): return '' If the attributes and type of instances are the same, they will be considered equal: >>> compare(MyObject('foo'), MyObject('foo')) However, if their attributes differ, you will get an informative error: >>> compare(MyObject('foo'), MyObject('bar')) Traceback (most recent call last): ... AssertionError: MyObject not as expected: attributes differ: 'name': 'foo' != 'bar' While comparing .name: 'foo' != 'bar' This type of comparison is also used on objects that make use of ``__slots__``. Recursive comparison ~~~~~~~~~~~~~~~~~~~~ Where :func:`compare` is able to provide a descriptive comparison for a particular type, it will then recurse to do the same for the elements contained within objects of that type. For example, when comparing a list of dictionaries, the description will not only tell you where in the list the difference occurred, but also what the differences were within the dictionaries that weren't equal: >>> compare([{'one': 1}, {'two': 2, 'text':'foo\nbar\nbaz'}], ... [{'one': 1}, {'two': 2, 'text':'foo\nbob\nbaz'}]) Traceback (most recent call last): ... AssertionError: sequence not as expected: same: [{'one': 1}] first: [{'text': 'foo\nbar\nbaz', 'two': 2}] second: [{'text': 'foo\nbob\nbaz', 'two': 2}] While comparing [1]: dict not as expected: same: ['two'] values differ: 'text': 'foo\nbar\nbaz' != 'foo\nbob\nbaz' While comparing [1]['text']: --- first +++ second @@ -1,3 +1,3 @@ foo -bar +bob baz This also applies to any comparers you have provided, as can be seen in the next section. .. _comparer-register: Providing your own comparers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When using :meth:`compare` frequently for your own complex objects, it can be beneficial to give more descriptive output when two objects don't compare as equal. .. note:: If you are reading this section as a result of needing to test objects that don't natively support comparison, or as a result of needing to infrequently compare your own subclasses of python basic types, take a look at :ref:`comparison-objects` as this may well be an easier solution. .. invisible-code-block: python from testfixtures.comparison import _registry, compare_sequence from testfixtures import Replacer r = Replacer() r.replace('testfixtures.comparison._registry', { list: compare_sequence }) As an example, suppose you have a class whose instances have a timestamp and a name as attributes, but you'd like to ignore the timestamp when comparing: .. code-block:: python from datetime import datetime class MyObject(object): def __init__(self, name): self.timestamp = datetime.now() self.name = name To compare lots of these, you would first write a comparer: .. code-block:: python def compare_my_object(x, y, context): if x.name == y.name: return return 'MyObject named %s != MyObject named %s' % ( context.label('x', repr(x.name)), context.label('y', repr(y.name)), ) Then you'd register that comparer for your type: .. code-block:: python from testfixtures.comparison import register register(MyObject, compare_my_object) .. invisible-code-block: python import testfixtures.comparison assert testfixtures.comparison._registry == { MyObject: compare_my_object, list: compare_sequence, } Now, it'll get used when comparing objects of that type, even if they're contained within other objects: >>> compare([1, MyObject('foo')], [1, MyObject('bar')]) Traceback (most recent call last): ... AssertionError: sequence not as expected: same: [1] first: [] second: [] While comparing [1]: MyObject named 'foo' != MyObject named 'bar' From this example, you can also see that a comparer can indicate that two objects are equal, for :func:`compare`'s purposes, by returning ``None``: >>> MyObject('foo') == MyObject('foo') False >>> compare(MyObject('foo'), MyObject('foo')) You can also see that you can, and should, use the context object passed in to add labels to the representations of the objects being compared if the comparison fails: >>> compare(expected=MyObject('foo'), actual=MyObject('bar')) Traceback (most recent call last): ... AssertionError: MyObject named 'foo' (expected) != MyObject named 'bar' (actual) .. invisible-code-block: python r.restore() # set up for the next test r = Replacer() r.replace('testfixtures.comparison._registry', {}) It may be that you only want to use a comparer or set of comparers for a particular test. If that's the case, you can pass :func:`compare` a ``comparers`` parameter consisting of a dictionary that maps types to comparers. These will be added to the global registry for the duration of the call: >>> compare(MyObject('foo'), MyObject('bar'), ... comparers={MyObject: compare_my_object}) Traceback (most recent call last): ... AssertionError: MyObject named 'foo' != MyObject named 'bar' .. invisible-code-block: python import testfixtures.comparison assert testfixtures.comparison._registry == {} r.restore() A full list of the available comparers included can be found below the API documentation for :func:`compare`. These make good candidates for registering for your own classes, if they provide the necessary behaviour, and their source is also good to read when wondering how to implement your own comparers. You may be wondering what the ``context`` object passed to the comparer is for; it allows you to hand off comparison of parts of the two objects currently being compared back to the :func:`compare` machinery, it also allows you to pass options to your comparison function. For example, you may have an object that has a couple of dictionaries as attributes: .. code-block:: python from datetime import datetime class Request(object): def __init__(self, uri, headers, body): self.uri = uri self.headers = headers self.body = body When your tests encounter instances of these that are not as expected, you want feedback about which bits of the request or response weren't as expected. This can be achieved by implementing a comparer as follows: .. code-block:: python def compare_request(x, y, context): uri_different = x.uri != y.uri headers_different = context.different(x.headers, y.headers, '.headers') body_different = context.different(x.body, y.body, '.body') if uri_different or headers_different or body_different: return 'Request for %r != Request for %r' % ( x.uri, y.uri ) .. note:: A comparer should always return some text when it considers the two objects it is comparing to be different. This comparer can either be registered globally or passed to each :func:`compare` call and will give detailed feedback about how the requests were different: >>> compare(Request('/foo', {'method': 'POST'}, {'my_field': 'value_1'}), ... Request('/foo', {'method': 'GET'}, {'my_field': 'value_2'}), ... comparers={Request: compare_request}) Traceback (most recent call last): ... AssertionError: Request for '/foo' != Request for '/foo' While comparing .headers: dict not as expected: values differ: 'method': 'POST' != 'GET' While comparing .headers['method']: 'POST' != 'GET' While comparing .body: dict not as expected: values differ: 'my_field': 'value_1' != 'value_2' While comparing .body['my_field']: 'value_1' != 'value_2' As an example of passing options through to a comparer, suppose you wanted to compare all decimals in a nested data structure by rounding them to a number of decimal places that varies from test to test. The comparer could be implemented and registered as follows: .. invisible-code-block: python from testfixtures.comparison import _registry r = Replacer() r.replace('testfixtures.comparison._registry', dict(_registry)) .. code-block:: python from decimal import Decimal from testfixtures.comparison import register def compare_decimal(x, y, context): precision = context.get_option('precision', 2) if round(x, precision) != round(y, precision): return '%r != %r when rounded to %i decimal places' % ( x, y, precision ) register(Decimal, compare_decimal) Now, this comparer will be used for comparing all decimals and the precision used will be that passed to :func:`compare`: >>> expected_order = {'price': Decimal('1.234'), 'quantity': 5} >>> actual_order = {'price': Decimal('1.236'), 'quantity': 5} >>> compare(expected_order, actual_order, precision=1) >>> compare(expected_order, actual_order, precision=3) Traceback (most recent call last): ... AssertionError: dict not as expected: same: ['quantity'] values differ: 'price': Decimal('1.234') != Decimal('1.236') While comparing ['price']: Decimal('1.234') != Decimal('1.236') when rounded to 3 decimal places If no precision is passed, the default of ``2`` will be used: >>> compare(Decimal('2.006'), Decimal('2.009')) >>> compare(Decimal('2.001'), Decimal('2.009')) Traceback (most recent call last): ... AssertionError: Decimal('2.001') != Decimal('2.009') when rounded to 2 decimal places .. invisible-code-block: python r.restore() .. _strict-comparison: Ignoring ``__eq__`` ~~~~~~~~~~~~~~~~~~~ Some objects, such as those from the Django ORM, have pretty broken implementations or ``__eq__``. Since :func:`compare` normally relies on this, it can result in objects appearing to be equal when they are not. Take this class, for example: .. code-block:: python class OrmObj(object): def __init__(self, a): self.a = a def __eq__(self, other): return True def __repr__(self): return 'OrmObj: '+str(self.a) If we compare normally, we erroneously understand the objects to be equal: >>> compare(actual=OrmObj(1), expected=OrmObj(2)) In order to get a sane comparison, we need to both supply a custom comparer as described above, and use the ``ignore_eq`` parameter: .. code-block:: python def compare_orm_obj(x, y, context): if x.a != y.a: return 'OrmObj: %s != %s' % (x.a, y.a) >>> compare(actual=OrmObj(1), expected=OrmObj(2), ... comparers={OrmObj: compare_orm_obj}, ignore_eq=True) Traceback (most recent call last): ... AssertionError: OrmObj: 2 != 1 Strict comparison ~~~~~~~~~~~~~~~~~ If is it important that the two values being compared are of exactly the same type, rather than just being equal as far as Python is concerned, then the strict mode of :func:`compare` should be used. For example, these two instances will normally appear to be equal provided the elements within them are the same: >>> TypeA = namedtuple('A', 'x') >>> TypeB = namedtuple('B', 'x') >>> compare(TypeA(1), TypeB(1)) If this type difference is important, then the `strict` parameter should be used: >>> compare(TypeA(1), TypeB(1), strict=True) Traceback (most recent call last): ... AssertionError: A(x=1) () != B(x=1) () .. _comparison-objects: Comparison objects ------------------ Another common problem with the checking in tests is that you may only want to make assertions about the type of an object that is nested in a data structure, or even just compare a subset of an object's attributes. TextFixtures provides the :class:`~testfixtures.Comparison` class to help in situations like these. Comparisons will appear to be equal to any object they are compared with that matches their specification. For example, take the following class: .. code-block:: python class SomeClass: def __init__(self, x, y): self.x, self.y = x, y When a comparison fails, the :class:`~testfixtures.Comparison` will not equal the object it was compared with and its representation changes to give information about what went wrong: >>> from testfixtures import Comparison as C >>> c = C(SomeClass, x=2) >>> print(repr(c)) x: 2 >>> c == SomeClass(1, 2) False >>> print(repr(c)) attributes in actual but not Comparison: 'y': 2 attributes differ: 'x': 2 (Comparison) != 1 (actual) .. note:: :meth:`~unittest.TestCase.assertEqual` has regressed in Python 3.4 and will now truncate the text shown in assertions with no way to configure this behaviour. Use :func:`compare` instead, which will give you other desirable behaviour as well as showing you the full output of failed comparisons. Types of comparison ~~~~~~~~~~~~~~~~~~~ There are several ways a comparison can be set up depending on what you want to check. If you only care about the class of an object, you can set up the comparison with only the class: >>> C(SomeClass) == SomeClass(1, 2) True This can also be achieved by specifying the type of the object as a dotted name: >>> import sys >>> C('types.ModuleType') == sys True Alternatively, if you happen to have an object already around, comparison can be done with it: >>> C(SomeClass(1,2)) == SomeClass(1,2) True If you only care about certain attributes, this can also easily be achieved by doing a partial comparison: >>> C(SomeClass, x=1, partial=True) == SomeClass(1, 2) True The above can be problematic if you want to compare an object with attributes that share names with parameters to the :class:`~testfixtures.Comparison` constructor. For this reason, you can pass the attributes in a dictionary: >>> compare(C(SomeClass, {'partial':3}, partial=True), SomeClass(1, 2)) Traceback (most recent call last): ... AssertionError: attributes in Comparison but not actual: 'partial': 3 != <...SomeClass...> Gotchas ~~~~~~~ - If the object being compared has an ``__eq__`` method, such as Django model instances, then the :class:`~testfixtures.Comparison` must be the first object in the equality check. The following class is an example of this: .. code-block:: python class SomeModel: def __eq__(self,other): if isinstance(other,SomeModel): return True return False It will not work correctly if used as the second object in the expression: >>> SomeModel() == C(SomeModel) False However, if the comparison is correctly placed first, then everything will behave as expected: >>> C(SomeModel)==SomeModel() True - It probably goes without saying, but comparisons should not be used on both sides of an equality check: >>> C(SomeClass) == C(SomeClass) False Mapping Comparison objects --------------------------- When comparing mappings such as :class:`dict` and :class:`~collections.OrderedDict`, you may need to check the order of the keys is as you expect. :class:`MappingComparison` objects can be used for this: >>> from collections import OrderedDict >>> from testfixtures import compare, MappingComparison as M >>> compare(expected=M((('a', 1), ('c', 3), ('d', 2)), ordered=True), ... actual=OrderedDict((('a', 1), ('d', 2), ('c', 3)))) Traceback (most recent call last): ... AssertionError:... wrong key order: same: ['a'] expected: ['c', 'd'] actual: ['d', 'c'] (expected) != OrderedDict([('a', 1), ('d', 2), ('c', 3)]) (actual) You may also only care about certain keys being present in a mapping. This can also be achieved with :class:`MappingComparison` objects: >>> compare(expected=M(a=1, d=2, partial=True), actual={'a': 1, 'c': 3}) Traceback (most recent call last): ... AssertionError:... ignored: ['c'] same: ['a'] in expected but not actual: 'd': 2 (expected) != {'a': 1, 'c': 3} (actual) Where there are differences, they may be hard to spot. In this case, you can ask for a more detailed explanation of what wasn't as expected: >>> compare(expected=M((('a', [1, 2]), ('d', [1, 3])), ordered=True, recursive=True), ... actual=OrderedDict((('a', [1, 2]), ('d', [1, 4])))) Traceback (most recent call last): ... AssertionError:... same: ['a'] values differ: 'd': [1, 3] (expected) != [1, 4] (actual) While comparing ['d']: sequence not as expected: same: [1] expected: [3] actual: [4] (expected) != OrderedDict([('a', [1, 2]), ('d', [1, 4])]) (actual) Round Comparison objects ------------------------- When comparing numerics you often want to be able to compare to a given precision to allow for rounding issues which make precise equality impossible. For these situations, you can use :class:`RoundComparison` objects wherever you would use floats or Decimals, and they will compare equal to any float or Decimal that matches when both sides are rounded to the specified precision. Here's an example: .. code-block:: python from testfixtures import compare, RoundComparison as R compare(R(1234.5678, 2), 1234.5681) .. note:: You should always pass the same type of object to the :class:`RoundComparison` object as you intend compare it with. If the type of the rounded expected value is not the same as the type of the rounded value being compared against it, a :class:`TypeError` will be raised. Range Comparison objects ------------------------- When comparing orderable types just as numbers, dates and time, you may only know what range a value will fall into. :class:`RangeComparison` objects let you confirm a value is within a certain tolerance or range. Here's an example: .. code-block:: python from testfixtures import compare, RangeComparison as R compare(R(123.456, 789), Decimal(555.01)) .. note:: :class:`RangeComparison` is inclusive of both the lower and upper bound. Sequence Comparison objects --------------------------- When comparing sequences, you may not care about the order of items in the sequence. While this type of comparison can often be achieved by pouring the sequence into a :class:`set`, this may not be possible if the items in the sequence are unhashable, or part of a nested data structure. :class:`SequenceComparison` objects can be used in this case: >>> from testfixtures import compare, SequenceComparison as S >>> compare(expected={'k': S({1}, {2}, ordered=False)}, actual={'k': [{2}, {1}]}) You may also only care about certain items being present in a sequence, but where it is important that those items are in the order you expected. This can also be achieved with :class:`SequenceComparison` objects: >>> compare(expected=S(1, 3, 5, partial=True), actual=[1, 2, 3, 4, 6]) Traceback (most recent call last): ... AssertionError:... ignored: [2, 4, 6] same: [1, 3] expected: [5] actual: [] (expected) != [1, 2, 3, 4, 6] (actual) Where there are differences, they may be hard to spot. In this case, you can ask for a more detailed explanation of what wasn't as expected: >>> compare(expected=S({1: 'a'}, {2: 'c'}, recursive=True), actual=[{1: 'a'}, {2: 'd'}]) Traceback (most recent call last): ... AssertionError:... same: [{1: 'a'}] expected: [{2: 'c'}] actual: [{2: 'd'}] While comparing [1]: dict not as expected: values differ: 2: 'c' (expected) != 'd' (actual) While comparing [1][2]: 'c' (expected) != 'd' (actual) (expected) != [{1: 'a'}, {2: 'd'}] (actual) There are also the :class:`Subset` and :class:`Permutation` shortcuts: >>> from testfixtures import Subset, Permutation >>> assert Subset({1}, {2}) == [{1}, {2}, {3}] >>> assert Permutation({1}, {2}) == [{2}, {1}] .. _stringcomparison: String Comparison objects ------------------------- When comparing sequences of strings, particularly those comping from things like the python logging package, you often end up wanting to express a requirement that one string should be almost like another, or maybe fit a particular regular expression. For these situations, you can use :class:`StringComparison` objects wherever you would use normal strings, and they will compare equal to any string that matches the regular expression they are created with. Here's an example: .. code-block:: python from testfixtures import compare, StringComparison as S compare(expected=S('Starting thread \d+'), actual='Starting thread 132356') If you need to specify flags, this can be done in one of three ways: - As parameters: .. code-block:: python compare(expected=S(".*BaR", dotall=True, ignorecase=True), actual="foo\nbar") - As you would to :func:`re.compile`: .. code-block:: python import re compare(expected=S(".*BaR", re.DOTALL|re.IGNORECASE), actual="foo\nbar") - Inline: .. code-block:: python compare(expected=S("(?s:.*bar)"), actual="foo\nbar") Differentiating chunks of text ------------------------------ TextFixtures provides a function that will compare two strings and give a unified diff as a result. This can be handy as a third parameter to :meth:`~unittest.TestCase.assertEqual` or just as a general utility function for comparing two lumps of text. As an example: >>> from testfixtures import diff >>> print(diff('line1\nline2\nline3', ... 'line1\nlineA\nline3')) --- first +++ second @@ -1,3 +1,3 @@ line1 -line2 +lineA line3 testfixtures-6.18.3/docs/components.txt000077500000000000000000000042731412502526400202520ustar00rootroot00000000000000Testing with zope.component =========================== .. invisible-code-block: python from testfixtures import Replacer r = Replacer() r.replace('testfixtures.components.TestComponents.atexit_setup', True) from zope.component import getSiteManager from testfixtures.components import TestComponents .. currentmodule:: testfixtures `zope.component`__ is a fantastic aspect-oriented library for Python, however its unit testing support is somewhat convoluted. If you need to test code that registers adapters, utilities and the like then you may need to provide a sterile component registry. For historical reasons, component registries are known as `Site Managers` in :mod:`zope.component`. __ http://pypi.python.org/pypi/zope.component Testfixtures provides the a :class:`~components.TestComponents` helper which provides just such a sterile registry. It should be instantiated in your :class:`TestCase`'s :meth:`setUp` method. It's :meth:`uninstall` method should be called in the test's :meth:`tearDown` method. Normally, :meth:`zope.component.getSiteManager` returns whatever the current registry is. This may be influenced by frameworks that use :mod:`zope.component` which can means that unit tests have no baseline to start with: >>> from zope.component import getSiteManager >>> original = getSiteManager() >>> print(original) Once we've got a :class:`TestComponents` in place, we know what we're getting: >>> from testfixtures.components import TestComponents >>> components = TestComponents() >>> getSiteManager() The registry that :func:`getSiteManager` returns is now also available as an attribute of the :class:`TestComponents` instance: >>> getSiteManager() is components.registry True It's also empty: >>> tuple(components.registry.registeredUtilities()) () >>> tuple(components.registry.registeredAdapters()) () >>> tuple(components.registry.registeredHandlers()) () You can do whatever you like with this registry. When you're done, just call the :meth:`uninstall` method: >>> components.uninstall() Now you'll have the original registy back in place: >>> getSiteManager() is original True .. invisible-code-block: python r.restore() testfixtures-6.18.3/docs/conf.py000066400000000000000000000020231412502526400166070ustar00rootroot00000000000000# -*- coding: utf-8 -*- import datetime import os import time import pkg_resources on_rtd = os.environ.get('READTHEDOCS', None) == 'True' build_date = datetime.datetime.utcfromtimestamp(int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))) extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx' ] intersphinx_mapping = { 'http://docs.python.org': None, 'http://django.readthedocs.org/en/latest/': None, 'http://sybil.readthedocs.io/en/latest/': None, } # General source_suffix = '.txt' master_doc = 'index' project = 'testfixtures' copyright = '2008-2015 Simplistix Ltd, 2016-%s Chris Withers' % build_date.year version = release = pkg_resources.get_distribution(project).version exclude_trees = ['_build'] pygments_style = 'sphinx' # Options for HTML output if on_rtd: html_theme = 'default' else: html_theme = 'classic' htmlhelp_basename = project+'doc' # Options for LaTeX output latex_documents = [ ('index', project+'.tex', project+u' Documentation', 'Simplistix Ltd', 'manual'), ] testfixtures-6.18.3/docs/conftest.py000066400000000000000000000011631412502526400175130ustar00rootroot00000000000000from doctest import REPORT_NDIFF, ELLIPSIS from sybil import Sybil from sybil.parsers.doctest import DocTestParser, FIX_BYTE_UNICODE_REPR from sybil.parsers.codeblock import CodeBlockParser from sybil.parsers.capture import parse_captures from testfixtures.compat import PY3 from testfixtures.sybil import FileParser if PY3: pytest_collect_file = Sybil( parsers=[ DocTestParser(optionflags=REPORT_NDIFF|ELLIPSIS|FIX_BYTE_UNICODE_REPR), CodeBlockParser(['print_function']), parse_captures, FileParser('tempdir'), ], pattern='*.txt', ).pytest() testfixtures-6.18.3/docs/datetime.txt000066400000000000000000000346201412502526400176550ustar00rootroot00000000000000Mocking dates and times ======================= .. currentmodule:: testfixtures Testing code that involves dates and times or which has behaviour dependent on the date or time it is executed at has historically been tricky. Mocking lets you perform tests on this type of code and testfixtures provides three specialised mock objects to help with this. Dates ~~~~~ The testfixtures package provides the :func:`~testfixtures.test_date` function that returns a subclass of :class:`datetime.date` with a :meth:`~datetime.date.today` method that will return a consistent sequence of dates each time it is called. This enables you to write tests for code such as the following, from the ``testfixtures.tests.sample1`` package: .. literalinclude:: ../testfixtures/tests/sample1.py :lines: 8-10,21-22 :class:`~testfixtures.Replace` can be used to apply the mock as shown in the following example, which could appear in either a unit test or a doc test: >>> from testfixtures import Replace, test_date >>> from testfixtures.tests.sample1 import str_today_1 >>> with Replace('testfixtures.tests.sample1.date', test_date()): ... str_today_1() ... str_today_1() '2001-01-01' '2001-01-02' If you need a specific date to be returned, you can specify it: >>> with Replace('testfixtures.tests.sample1.date', test_date(1978,6,13)): ... str_today_1() '1978-06-13' If you need to test with a whole sequence of specific dates, this can be done as follows: >>> with Replace('testfixtures.tests.sample1.date', test_date(None)) as d: ... d.add(1978,6,13) ... d.add(2009,11,12) ... str_today_1() ... str_today_1() '1978-06-13' '2009-11-12' Another way to test with a specific sequence of dates is to use the ``delta_type`` and ``delta`` parameters to :func:`~testfixtures.test_date`. These parameters control the type and size, respectively, of the difference between each date returned. For example, where 2 days elapse between each returned value: >>> with Replace('testfixtures.tests.sample1.date', ... test_date(1978, 6, 13, delta=2, delta_type='days')) as d: ... str_today_1() ... str_today_1() ... str_today_1() '1978-06-13' '1978-06-15' '1978-06-17' The ``delta_type`` can be any keyword parameter accepted by the :class:`~datetime.timedelta` constructor. Specifying a ``delta`` of zero can be an effective way of ensuring that all calls to the :meth:`~testfixtures.test_date.today` method return the same value: >>> with Replace('testfixtures.tests.sample1.date', ... test_date(1978, 6, 13, delta=0)) as d: ... str_today_1() ... str_today_1() ... str_today_1() '1978-06-13' '1978-06-13' '1978-06-13' When using :func:`~testfixtures.test_date`, you can, at any time, set the next date to be returned using the :meth:`~testfixtures.test_date.set` method. The date returned after this will be the set date plus the ``delta`` in effect: >>> with Replace('testfixtures.tests.sample1.date', test_date(delta=2)) as d: ... str_today_1() ... d.set(1978,8,1) ... str_today_1() ... str_today_1() '2001-01-01' '1978-08-01' '1978-08-03' Datetimes ~~~~~~~~~ The testfixtures package provides the :func:`~testfixtures.test_datetime` function that returns a subclass of :class:`datetime.datetime` with a :meth:`~datetime.datetime.now` method that will return a consistent sequence of :obj:`~datetime.datetime` objects each time it is called. This enables you to write tests for code such as the following, from the ``testfixtures.tests.sample1`` package: .. literalinclude:: ../testfixtures/tests/sample1.py :lines: 8-10,11-12 We use a :class:`~testfixtures.Replace` as follows, which could appear in either a unit test or a doc test: >>> from testfixtures import Replace, test_datetime >>> from testfixtures.tests.sample1 import str_now_1 >>> with Replace('testfixtures.tests.sample1.datetime', test_datetime()): ... str_now_1() ... str_now_1() '2001-01-01 00:00:00' '2001-01-01 00:00:10' If you need a specific datetime to be returned, you can specify it: >>> with Replace('testfixtures.tests.sample1.datetime', ... test_datetime(1978,6,13,1,2,3)): ... str_now_1() '1978-06-13 01:02:03' If you need to test with a whole sequence of specific datetimes, this can be done as follows: >>> with Replace('testfixtures.tests.sample1.datetime', ... test_datetime(None)) as d: ... d.add(1978,6,13,16,0,1) ... d.add(2009,11,12,11,41,20) ... str_now_1() ... str_now_1() '1978-06-13 16:00:01' '2009-11-12 11:41:20' Another way to test with a specific sequence of datetimes is to use the ``delta_type`` and ``delta`` parameters to :func:`~testfixtures.test_datetime`. These parameters control the type and size, respectively, of the difference between each datetime returned. For example, where 2 hours elapse between each returned value: >>> with Replace( ... 'testfixtures.tests.sample1.datetime', ... test_datetime(1978, 6, 13, 16, 0, 1, delta=2, delta_type='hours') ... ) as d: ... str_now_1() ... str_now_1() ... str_now_1() '1978-06-13 16:00:01' '1978-06-13 18:00:01' '1978-06-13 20:00:01' The ``delta_type`` can be any keyword parameter accepted by the :class:`~datetime.timedelta` constructor. Specifying a ``delta`` of zero can be an effective way of ensuring that all calls to the :meth:`~testfixtures.test_datetime.now` method return the same value: >>> with Replace('testfixtures.tests.sample1.datetime', ... test_datetime(1978, 6, 13, 16, 0, 1, delta=0)) as d: ... str_now_1() ... str_now_1() ... str_now_1() '1978-06-13 16:00:01' '1978-06-13 16:00:01' '1978-06-13 16:00:01' When using :func:`~testfixtures.test_datetime`, you can, at any time, set the next datetime to be returned using the :meth:`~testfixtures.test_datetime.set` method. The value returned after this will be the set value plus the ``delta`` in effect: >>> with Replace('testfixtures.tests.sample1.datetime', ... test_datetime(delta=2)) as d: ... str_now_1() ... d.set(1978,8,1) ... str_now_1() ... str_now_1() '2001-01-01 00:00:00' '1978-08-01 00:00:00' '1978-08-01 00:00:02' .. _timezones: Timezones --------- For the examples in this section, we need to have a timezone to work with: .. code-block:: python from datetime import tzinfo, timedelta class ATZInfo(tzinfo): def tzname(self, dt): return 'A TimeZone' def utcoffset(self, dt): # In general, this timezone is 5 hours behind UTC offset = timedelta(hours=-5) return offset+self.dst(dt) def dst(self, dt): # However, between March and September, it is only # 4 hours behind UTC if 3 < dt.month < 9: return timedelta(hours=1) return timedelta() By default, the internal queue of datetimes in a :func:`~testfixtures.test_datetime` simulates local time in the UTC timezone: >>> datetime = test_datetime(delta=0) This means we get the following when the simulated date is 1st Jan 2001: >>> datetime.set(2001, 1, 1, 10, 0) >>> datetime.now() datetime.datetime(2001, 1, 1, 10, 0) >>> datetime.utcnow() datetime.datetime(2001, 1, 1, 10, 0) >>> datetime.now(ATZInfo()) datetime.datetime(2001, 1, 1, 5, 0, tzinfo=) We get the following when the simulated date is 1st Apr 2001: >>> datetime.set(2001, 4, 1, 10, 0) >>> datetime.now() datetime.datetime(2001, 4, 1, 10, 0) >>> datetime.utcnow() datetime.datetime(2001, 4, 1, 10, 0) >>> datetime.now(ATZInfo()) datetime.datetime(2001, 4, 1, 6, 0, tzinfo=) If you wish to simulate a different local time, you should pass its :class:`datetime.tzinfo` to the :func:`~testfixtures.test_datetime` constructor: >>> datetime = test_datetime(delta=0, tzinfo=ATZInfo()) This means we get the following when the simulated date is 1st Jan 2001: >>> datetime.set(2001, 1, 1, 10, 0) >>> datetime.now() datetime.datetime(2001, 1, 1, 10, 0) >>> datetime.utcnow() datetime.datetime(2001, 1, 1, 15, 0) >>> datetime.now(ATZInfo()) datetime.datetime(2001, 1, 1, 10, 0, tzinfo=) We get the following when the simulated date is 1st Apr 2001: >>> datetime.set(2001, 4, 1, 10, 0) >>> datetime.now() datetime.datetime(2001, 4, 1, 10, 0) >>> datetime.utcnow() datetime.datetime(2001, 4, 1, 14, 0) >>> datetime.now(ATZInfo()) datetime.datetime(2001, 4, 1, 10, 0, tzinfo=) .. warning:: For your own sanity, you should avoid using the ``tzinfo`` parameter or passing :class:`~datetime.datetime` instances with non-``None`` :attr:`~datetime.datetime.tzinfo` attributes when calling :meth:`~tdatetime.add` or :meth:`~tdatetime.set`. Times ~~~~~ The testfixtures package provides the :func:`~testfixtures.test_time` function that, when called, returns a replacement for the :func:`time.time` function. This enables you to write tests for code such as the following, from the ``testfixtures.tests.sample1`` package: .. literalinclude:: ../testfixtures/tests/sample1.py :lines: 30-34 We use a :class:`~testfixtures.Replace` as follows, which could appear in either a unit test or a doc test: >>> from testfixtures import Replace, test_time >>> from testfixtures.tests.sample1 import str_time >>> with Replace('testfixtures.tests.sample1.time', test_time()): ... str_time() ... str_time() '978307200.0' '978307201.0' If you need an integer representing a specific time to be returned, you can specify it: >>> with Replace('testfixtures.tests.sample1.time', ... test_time(1978, 6, 13, 1, 2, 3)): ... str_time() '266547723.0' If you need to test with a whole sequence of specific timestamps, this can be done as follows: >>> with Replace('testfixtures.tests.sample1.time', test_time(None)) as t: ... t.add(1978,6,13,16,0,1) ... t.add(2009,11,12,11,41,20) ... str_time() ... str_time() '266601601.0' '1258026080.0' Another way to test with a specific sequence of timestamps is to use the ``delta_type`` and ``delta`` parameters to :func:`~testfixtures.test_time`. These parameters control the type and size, respectively, of the difference between each timestamp returned. For example, where 2 hours elapse between each returned value: >>> with Replace( ... 'testfixtures.tests.sample1.time', ... test_time(1978, 6, 13, 16, 0, 1, delta=2, delta_type='hours') ... ) as d: ... str_time() ... str_time() ... str_time() '266601601.0' '266608801.0' '266616001.0' The ``delta_type`` can be any keyword parameter accepted by the :class:`~datetime.timedelta` constructor. Specifying a ``delta`` of zero can be an effective way of ensuring that all calls to the :meth:`~time.time` function return the same value: >>> with Replace('testfixtures.tests.sample1.time', ... test_time(1978, 6, 13, 16, 0, 1, delta=0)) as d: ... str_time() ... str_time() ... str_time() '266601601.0' '266601601.0' '266601601.0' When using :func:`~testfixtures.test_time`, you can, at any time, set the next timestamp to be returned using the :meth:`~testfixtures.test_time.set` method. The value returned after this will be the set value plus the ``delta`` in effect: >>> with Replace('testfixtures.tests.sample1.time', test_time(delta=2)) as d: ... str_time() ... d.set(1978,8,1) ... str_time() ... str_time() '978307200.0' '270777600.0' '270777602.0' Gotchas with dates and times ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Using these specialised mock objects can have some intricacies as described below: Local references to functions ----------------------------- There are situations where people may have obtained a local reference to the :meth:`~datetime.date.today` or :meth:`~datetime.datetime.now` methods, such as the following code from the ``testfixtures.tests.sample1`` package: .. literalinclude:: ../testfixtures/tests/sample1.py :lines: 8-10,14-18,24-28 In these cases, you need to be careful with the replacement: >>> from testfixtures import Replacer, test_datetime >>> from testfixtures.tests.sample1 import str_now_2, str_today_2 >>> with Replacer() as replace: ... today = replace('testfixtures.tests.sample1.today', test_date().today) ... now = replace('testfixtures.tests.sample1.now', test_datetime().now) ... str_today_2() ... str_now_2() '2001-01-01' '2001-01-01 00:00:00' .. _strict-dates-and-times: Use with code that checks class types ------------------------------------- When using the above specialist mocks, you may find code that checks the type of parameters passed may get confused. This is because, by default, :class:`test_datetime` and :class:`test_date` return instances of the real :class:`~datetime.datetime` and :class:`~datetime.date` classes: >>> from testfixtures import test_datetime >>> from datetime import datetime >>> tdatetime = test_datetime() >>> issubclass(tdatetime, datetime) True >>> tdatetime.now().__class__ <...'datetime.datetime'> The above behaviour, however, is generally what you want as other code in your application and, more importantly, in other code such as database adapters, may handle instances of the real :class:`~datetime.datetime` and :class:`~datetime.date` classes, but not instances of the :class:`test_datetime` and :class:`test_date` mocks. That said, this behaviour can cause problems if you check the type of an instance against one of the mock classes. Most people might expect the following to return ``True``: >>> isinstance(tdatetime(2011, 1, 1), tdatetime) False >>> isinstance(tdatetime.now(), tdatetime) False If this causes a problem for you, then both :class:`~datetime.datetime` and :class:`~datetime.date` take a `strict` keyword parameter that can be used as follows: >>> tdatetime = test_datetime(strict=True) >>> tdatetime.now().__class__ >>> isinstance(tdatetime.now(), tdatetime) True You will need to take care that you have replaced occurrences of the class where type checking is done with the correct :class:`test_datetime` or :class:`test_date`. Also, be aware that the :meth:`~tdatetime.date` method of :class:`test_datetime` instances will still return a normal :class:`~datetime.date` instance. If type checking related to this is causing problems, the type the :meth:`~tdatetime.date` method returns can be controlled as shown in the following example: .. code-block:: python from testfixtures import test_date, test_datetime date_type = test_date(strict=True) datetime_type = test_datetime(strict=True, date_type=date_type) With things set up like this, the :meth:`~tdatetime.date` method will return an instance of the :class:`date_type` mock: >>> somewhen = datetime_type.now() >>> somewhen.date() tdate(2001, 1, 1) >>> _.__class__ is date_type True testfixtures-6.18.3/docs/development.txt000066400000000000000000000025121412502526400203760ustar00rootroot00000000000000Development =========== .. highlight:: bash If you wish to contribute to this project, then you should fork the repository found here: https://github.com/Simplistix/testfixtures/ Once that has been done and you have a checkout, you can follow these instructions to perform various development tasks: Setting up a virtualenv ----------------------- The recommended way to set up a development environment is to turn your checkout into a virtualenv and then install the package in editable form as follows:: $ virtualenv . $ bin/pip install -U -e .[test,build] Running the tests ----------------- Once you've set up a virtualenv, the tests can be run as follows:: $ source bin/activate $ pytest Building the documentation -------------------------- The Sphinx documentation is built by doing the following from the directory containing setup.py:: $ source bin/activate $ cd docs $ make html To check that the description that will be used on PyPI renders properly, do the following:: $ python setup.py --long-description | rst2html.py > desc.html The resulting ``desc.html`` should be checked by opening in a browser. Making a release ---------------- To make a release, just update ``versions.txt``, update the change log and push to https://github.com/Simplistix/testfixtures and Carthorse should take care of the rest. testfixtures-6.18.3/docs/django.txt000077500000000000000000000050141412502526400173210ustar00rootroot00000000000000Testing when using django ========================= Django's ORM has an unfortunate implementation choice to consider :class:`~django.db.models.Model` instances to be identical as long as their primary keys are the same: >>> from testfixtures.tests.test_django.models import SampleModel >>> SampleModel(id=1, value=1) == SampleModel(id=1, value=2) True To work around this, :mod:`testfixtures.django` :ref:`registers ` a :func:`comparer ` for the django :class:`~django.db.models.Model` class. However, for this to work, ``ignore_eq=True`` must be passed: >>> from testfixtures import compare >>> import testfixtures.django # to register the comparer... >>> compare(SampleModel(id=1, value=1), SampleModel(id=1, value=2), ... ignore_eq=True) Traceback (most recent call last): ... AssertionError: SampleModel not as expected: same: [u'id'] values differ: 'value': 1 != 2 Since the above can quickly become cumbersome, a django-specific version of :func:`~testfixtures.compare`, with ignoring ``__eq__`` built in, is provided: >>> from testfixtures.django import compare as django_compare >>> django_compare(SampleModel(id=1, value=1), SampleModel(id=1, value=2)) Traceback (most recent call last): ... AssertionError: SampleModel not as expected: same: [u'id'] values differ: 'value': 1 != 2 It may also be that you want to ignore fields over which you have no control and cannot easily mock, such as created or modified times. For this, you can use the `ignore_fields` option: >>> compare(SampleModel(id=1, value=1), SampleModel(id=1, value=2), ... ignore_eq=True, ignore_fields=['value']) .. note:: The implementation of the comparer for :class:`~django.db.models.Model` instances ignores fields that have ``editable`` set to ``False``. By default, non-editable fields are ignored: >>> django_compare(SampleModel(not_editable=1), SampleModel(not_editable=2)) If you wish to include these fields in the comparison, pass the ``non_editable_fields`` option: >>> django_compare(SampleModel(not_editable=1), SampleModel(not_editable=2), ... non_editable_fields=True) Traceback (most recent call last): ... AssertionError: SampleModel not as expected: same: ['created', u'id', 'value'] values differ: 'not_editable': 1 != 2 .. note:: The registered comparer currently ignores :class:`many to many ` fields. Patches to fix this deficiency are welcome! testfixtures-6.18.3/docs/exceptions.txt000066400000000000000000000107161412502526400202420ustar00rootroot00000000000000Testing exceptions ================== .. currentmodule:: testfixtures The :mod:`unittest` support for asserting that exceptions are raised when expected is fairly weak. Like many other Python testing libraries, testfixtures has tools to help with this. The :class:`ShouldRaise` context manager ---------------------------------------- If you are using a version of Python where the :keyword:`with` statement can be used, it's recommended that you use the :class:`ShouldRaise` context manager. Suppose we wanted to test the following function to make sure that the right exception was raised: .. code-block:: python def the_thrower(throw=True): if throw: raise ValueError('Not good!') The following example shows how to test that the correct exception is raised: >>> from testfixtures import ShouldRaise >>> with ShouldRaise(ValueError('Not good!')): ... the_thrower() If the exception raised doesn't match the one expected, :class:`ShouldRaise` will raise an :class:`AssertionError` causing the tests in which it occurs to fail: >>> with ShouldRaise(ValueError('Is good!')): ... the_thrower() Traceback (most recent call last): ... AssertionError: ValueError('Is good!'...) (expected) != ValueError('Not good!'...) (raised) If you're not concerned about anything more than the type of the exception that's raised, you can check as follows: >>> from testfixtures import ShouldRaise >>> with ShouldRaise(ValueError): ... the_thrower() If you're feeling slack and just want to check that an exception is raised, but don't care about the type of that exception, the following will suffice: >>> from testfixtures import ShouldRaise >>> with ShouldRaise(): ... the_thrower() If no exception is raised by the code under test, :class:`ShouldRaise` will raise an :class:`AssertionError` to indicate this: >>> from testfixtures import ShouldRaise >>> with ShouldRaise(): ... the_thrower(throw=False) Traceback (most recent call last): ... AssertionError: No exception raised! :class:`ShouldRaise` has been implemented such that it can be successfully used to test if code raises both :class:`SystemExit` and :class:`KeyboardInterrupt` exceptions. To help with :class:`SystemExit` and other exceptions that are tricky to construct yourself, :class:`ShouldRaise` instances have a :attr:`~ShouldRaise.raised` attribute. This will contain the actual exception raised and can be used to inspect parts of it: >>> import sys >>> from testfixtures import ShouldRaise >>> with ShouldRaise() as s: ... sys.exit(42) >>> s.raised.code 42 The :func:`should_raise` decorator ----------------------------------------- If you are working in a traditional :mod:`unittest` environment and want to check that a particular test function raises an exception, you may find the decorator suits your needs better: .. code-block:: python from testfixtures import should_raise @should_raise(ValueError('Not good!')) def test_function(): the_thrower() This decorator behaves exactly as the :class:`ShouldRaise` context manager described in the documentation above. .. note:: It is slightly recommended that you use the context manager rather than the decorator in most cases. With the decorator, all exceptions raised within the decorated function will be checked, which can hinder test development. With the context manager, you can make assertions about only the exact lines of code that you expect to raise the exception. Exceptions that are conditionally raised ---------------------------------------- Some exceptions are only raised in certain versions of Python. For example, in Python 2, ``bytes()`` will turn both bytes and strings into bytes, while in Python 3, it will raise an exception when presented with a string. If you wish to make assertions that this behaviour is expected, you can use the ``unless`` option to :class:`ShouldRaise` as follows: .. code-block:: python import sys from testfixtures import ShouldRaise PY2 = sys.version_info[:2] < (3, 0) with ShouldRaise(TypeError, unless=PY2): bytes('something') .. note:: Do **not** abuse this functionality to make sloppy assertions. It is always better have two different tests that cover a case when an exception should be raised and a case where an exception should not be raised rather than using it above functionality. It is *only* provided to help in cases where something in the environment that cannot be mocked out or controlled influences whether or not an exception is raised. testfixtures-6.18.3/docs/files.txt000066400000000000000000000461351412502526400171670ustar00rootroot00000000000000Testing with files and directories ================================== .. currentmodule:: testfixtures Working with files and directories in tests can often require excessive amounts of boilerplate code to make sure that the tests happen in their own sandbox, files and directories contain what they should or code processes test files correctly, and the sandbox is cleared up at the end of the tests. Methods of use -------------- To help with this, testfixtures provides the :class:`TempDirectory` class that hides most of the boilerplate code you would need to write. Suppose you wanted to test the following function: .. code-block:: python import os def foo2bar(dirpath, filename): path = os.path.join(dirpath, filename) with open(path, 'rb') as input: data = input.read() data = data.replace(b'foo', b'bar') with open(path, 'wb') as output: output.write(data) There are several different ways depending on the type of test you are writing: The context manager ~~~~~~~~~~~~~~~~~~~ If you're using a version of Python where the ``with`` keyword is available, a :class:`TempDirectory` can be used as a context manager: >>> from testfixtures import TempDirectory >>> with TempDirectory() as d: ... d.write('test.txt', b'some foo thing') ... foo2bar(d.path, 'test.txt') ... d.read('test.txt') '...' b'some bar thing' The decorator ~~~~~~~~~~~~~ If you are working in a traditional :mod:`unittest` environment and only work with files or directories in a particular test function, you may find the decorator suits your needs better: .. code-block:: python from testfixtures import tempdir, compare @tempdir() def test_function(dir): dir.write('test.txt', b'some foo thing') foo2bar(dir.path, 'test.txt') compare(dir.read('test.txt'), b'some bar thing') .. check the above raises no assertion error: >>> test_function() .. note:: This method is not compatible with pytest's fixture discovery stuff. Instead, put a fixture such as the following in your `conftest.py`: .. code-block:: python from testfixtures import TempDirectory import pytest @pytest.fixture() def dir(): with TempDirectory() as dir: yield dir Manual usage ~~~~~~~~~~~~ If you want to work with files or directories for the duration of a doctest or in every test in a :class:`~unittest.TestCase`, then you can use the :class:`TempDirectory` manually. The instantiation and replacement are done in the ``setUp`` function of the :class:`~unittest.TestCase` or passed to the :class:`~doctest.DocTestSuite` constructor: >>> from testfixtures import TempDirectory >>> d = TempDirectory() You can then use the temporary directory for your testing: >>> d.write('test.txt', b'some foo thing') '...' >>> foo2bar(d.path, 'test.txt') >>> d.read('test.txt') == b'some bar thing' True Then, in the ``tearDown`` function of the :class:`~unittest.TestCase` or passed to the :class:`~doctest.DocTestSuite` constructor, you should make sure the temporary directory is cleaned up: >>> d.cleanup() If you have multiple :class:`TempDirectory` objects in use, you can easily clean them all up: >>> TempDirectory.cleanup_all() Features of a temporary directory --------------------------------- No matter which usage pattern you pick, you will always end up with a :class:`TempDirectory` object. These have an array of methods that let you perform common file and directory related tasks without all the manual boiler plate. The following sections show you how to perform the various tasks you're likely to bump into in the course of testing. .. create a tempdir for the examples: >>> tempdir = TempDirectory() Computing paths ~~~~~~~~~~~~~~~ If you need to know the real path of the temporary directory, the :class:`TempDirectory` object has a :attr:`~TempDirectory.path` attribute: >>> tempdir.path '...tmp...' A common use case is to want to compute a path within the temporary directory to pass to code under test. This can be done with the :meth:`~TempDirectory.getpath` method: >>> tempdir.getpath('foo').rsplit(os.sep,1)[-1] 'foo' If you want to compute a deeper path, you can either pass either a tuple or a forward slash-separated path: >>> tempdir.getpath(('foo', 'baz')).rsplit(os.sep, 2)[-2:] ['foo', 'baz'] >>> tempdir.getpath('foo/baz') .rsplit(os.sep, 2)[-2:] ['foo', 'baz'] .. note:: If passing a string containing path separators, a forward slash should be used as the separator regardless of the underlying platform separator. Writing files ~~~~~~~~~~~~~ To write to a file in the root of the temporary directory, you pass the name of the file and the content you want to write: >>> tempdir.write('myfile.txt', b'some text') '...' >>> with open(os.path.join(tempdir.path, 'myfile.txt')) as f: ... print(f.read()) some text The full path of the newly written file is returned: >>> path = tempdir.write('anotherfile.txt', b'some more text') >>> with open(path) as f: ... print(f.read()) some more text You can also write files into a sub-directory of the temporary directory, whether or not that directory exists, as follows: >>> path = tempdir.write(('some', 'folder', 'afile.txt'), b'the text') >>> with open(path) as f: ... print(f.read()) the text You can also specify the path to write to as a forward-slash separated string: >>> path = tempdir.write('some/folder/bfile.txt', b'the text') >>> with open(path) as f: ... print(f.read()) the text .. note:: Forward slashes should be used regardless of the file system or operating system in use. Creating directories ~~~~~~~~~~~~~~~~~~~~ If you just want to create a sub-directory in the temporary directory you can do so as follows: .. new tempdir: >>> tempdir = TempDirectory() >>> tempdir.makedir('output') '...' >>> os.path.isdir(os.path.join(tempdir.path, 'output')) True As with file creation, the full path of the sub-directory that has just been created is returned: >>> path = tempdir.makedir('more_output') >>> os.path.isdir(path) True Finally, you can create a nested sub-directory even if the intervening parent directories do not exist: >>> os.path.exists(os.path.join(tempdir.path, 'some')) False >>> path = tempdir.makedir(('some', 'sub', 'dir')) >>> os.path.exists(path) True You can also specify the path to write to as a forward-slash separated string: >>> os.path.exists(os.path.join(tempdir.path, 'another')) False >>> path = tempdir.makedir('another/sub/dir') >>> os.path.exists(path) True .. note:: Forward slashes should be used regardless of the file system or operating system in use. Checking the contents of files ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Once a file has been written into the temporary directory, you will often want to check its contents. This is done with the :meth:`TempDirectory.read` method. Suppose the code you are testing creates some files: .. new tempdir: >>> tempdir = TempDirectory() .. code-block:: python def spew(path): with open(os.path.join(path, 'root.txt'), 'wb') as f: f.write(b'root output') os.mkdir(os.path.join(path, 'subdir')) with open(os.path.join(path, 'subdir', 'file.txt'), 'wb') as f: f.write(b'subdir output') os.mkdir(os.path.join(path, 'subdir', 'logs')) We can test this function by passing it the temporary directory's path and then using the :meth:`TempDirectory.read` method to check the files were created with the correct content: >>> spew(tempdir.path) >>> tempdir.read('root.txt') b'root output' >>> tempdir.read(('subdir', 'file.txt')) b'subdir output' The second part of the above test shows how to use the :meth:`TempDirectory.read` method to check the contents of files that are in sub-directories of the temporary directory. This can also be done by specifying the path relative to the root of the temporary directory as a forward-slash separated string: >>> tempdir.read('subdir/file.txt') b'subdir output' .. note:: Forward slashes should be used regardless of the file system or operating system in use. Checking the contents of directories ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's good practice to test that your code is only writing files you expect it to and to check they are being written to the path you expect. :meth:`TempDirectory.compare` is the method to use to do this. As an example, we could check that the :func:`spew` function above created no extraneous files as follows: >>> tempdir.compare([ ... 'root.txt', ... 'subdir/', ... 'subdir/file.txt', ... 'subdir/logs/', ... ]) If we only wanted to check the sub-directory, we would specify the path to start from, relative to the root of the temporary directory: >>> tempdir.compare([ ... 'file.txt', ... 'logs/', ... ], path='subdir') If, like git, we only cared about files, we could do the comparison as follows: >>> tempdir.compare([ ... 'root.txt', ... 'subdir/file.txt', ... ], files_only=True) And finally, if we only cared about files at a particular level, we could turn off the recursive comparison as follows: >>> tempdir.compare([ ... 'root.txt', ... 'subdir', ... ], recursive=False) The :meth:`~testfixtures.TempDirectory.compare` method can also be used to check whether a directory contains nothing, for example: >>> tempdir.compare(path=('subdir', 'logs'), expected=()) The above can also be done by specifying the sub-directory to be checked as a forward-slash separated path: >>> tempdir.compare(path='subdir/logs', expected=()) If the actual directory contents do not match the expected contents passed in, an :class:`~exceptions.AssertionError` is raised, which will show up as a unit test failure: >>> tempdir.compare(['subdir'], recursive=False) Traceback (most recent call last): ... AssertionError: sequence not as expected: same: () expected: ('subdir',) actual: ('root.txt', 'subdir') In some circumstances, you may want to ignore certain files or sub-directories when checking contents. To make this easy, the :class:`~testfixtures.TempDirectory` constructor takes an optional `ignore` parameter which, if provided, should contain a sequence of regular expressions. If any of the regular expressions return a match when used to search through the results of any of the the methods covered in this section, that result will be ignored. For example, suppose we are testing some revision control code, but don't really care about the revision control system's metadata directories, which may or may not be present: .. code-block:: python from random import choice def svn_ish(dirpath, filename): if choice((True, False)): os.mkdir(os.path.join(dirpath, '.svn')) with open(os.path.join(dirpath, filename), 'wb') as f: f.write(b'something') To test this, we can use any of the previously described methods. When used manually or as a context manager, this would be as follows: >>> with TempDirectory(ignore=['.svn']) as d: ... svn_ish(d.path, 'test.txt') ... d.compare(['test.txt']) The decorator would be as follows: .. code-block:: python from testfixtures import tempdir, compare @tempdir(ignore=['.svn']) def test_function(d): svn_ish(d.path, 'test.txt') d.compare(['test.txt']) .. check the above raises no assertion error: >>> test_function() .. set things up again: >>> tempdir = TempDirectory() >>> spew(tempdir.path) If you are working with doctests, the :meth:`~testfixtures.TempDirectory.listdir` method can be used instead: >>> tempdir.listdir() root.txt subdir >>> tempdir.listdir('subdir') file.txt logs >>> tempdir.listdir(('subdir', 'logs')) No files or directories found. The above example also shows how to check the contents of sub-directories of the temporary directory and also shows what is printed when a directory contains nothing. The :meth:`~testfixtures.TempDirectory.listdir` method can also take a path separated by forward slashes, which can make doctests a little more readable. The above test could be written as follows: >>> tempdir.listdir('subdir/logs') No files or directories found. However, if you have a nested folder structure, such as that created by our :func:`spew` function, it can be easier to just inspect the whole tree of files and folders created. You can do this by using the `recursive` parameter to :meth:`~testfixtures.TempDirectory.listdir`: >>> tempdir.listdir(recursive=True) root.txt subdir/ subdir/file.txt subdir/logs/ Bytes versus Strings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. new tempdir: >>> tempdir = TempDirectory() You'll notice that all of the examples so far have used raw bytes as their data and written to and read from files only in binary mode. This keeps all the examples nice and simple and working consistently between Python 2 and Python 3. One of the big changes between Python 2 and Python 3 was that the default string type became unicode instead of binary, and a new type for bytes was introduced. This little snippet shows the difference by defining two constants for the British Pound symbol: .. code-block:: python import sys PY3 = sys.version_info[:2] >= (3, 0) if PY3: some_bytes = '\xa3'.encode('utf-8') some_text = '\xa3' else: some_bytes = '\xc2\xa3' some_text = '\xc2\xa3'.decode('utf-8') Python 3 is much stricter than Python 2 about the byte versus string boundary and :class:`TempDirectory` has been changed to help work with this by only reading and writing files in binary mode and providing parameters to control decoding and encoding when you want to read and write text. For example, when writing, you can either write bytes directly, as we have been in the examples so far: >>> path = tempdir.write('currencies.txt', some_bytes) >>> with open(path, 'rb') as currencies: ... currencies.read() b'\xc2\xa3' Or, you can write text, but must specify an encoding to use when writing the data to the file: >>> path = tempdir.write('currencies.txt', some_text, 'utf-8') >>> with open(path, 'rb') as currencies: ... currencies.read() b'\xc2\xa3' The same is true when reading files. You can either read bytes: >>> tempdir.read('currencies.txt') == some_bytes True Or, you can read text, but must specify an encoding that will be used to decode the data in the file: >>> tempdir.read('currencies.txt', 'utf-8') == some_text True Working with an existing sandbox -------------------------------- Some testing infrastructure already provides a sandbox temporary directory, however that infrastructure might not provide the same level of functionality that :class:`~testfixtures.TempDirectory` provides. For this reason, it is possible to wrap an existing directory such as the following with a :class:`~testfixtures.TempDirectory`: >>> from tempfile import mkdtemp >>> thedir = mkdtemp() When working with the context manager, this is done as follows: >>> with TempDirectory(path=thedir) as d: ... d.write('file', b'data') ... d.makedir('directory') ... sorted(os.listdir(thedir)) '...' '...' ['directory', 'file'] .. check thedir still exists and reset >>> from shutil import rmtree >>> os.path.exists(thedir) True >>> rmtree(thedir) >>> thedir = mkdtemp() For the decorator, usage would be as follows: .. code-block:: python from testfixtures import tempdir, compare @tempdir(path=thedir) def test_function(d): d.write('file', b'data') d.makedir('directory') assert sorted(os.listdir(thedir))==['directory', 'file'] .. check the above raises no assertion error and that thedir still exits: >>> test_function() >>> os.path.exists(thedir) True It is important to note that if an existing directory is used, it will not be deleted by either the decorator or the context manager. You will need to make sure that the directory is cleaned up as required. .. check the above statement is true: >>> os.path.exists(thedir) True .. better clean it up: >>> rmtree(thedir) Using with Sybil ----------------- `Sybil`__ is a tool for testing the examples found in documentation. It works by applying a set of specialised parsers to the documentation and testing or otherwise using the examples returned by those parsers. __ http://sybil.readthedocs.io The key differences between testing with Sybil and traditional doctests are that it is possible to plug in different types of parser, not just the "python console session" one, and so it is possible to test different types of examples. testfixtures provides one these parsers to aid working with :class:`~testfixtures.TempDirectory` objects. This parser makes use of :rst:dir:`topic` directives with specific classes set to perform different actions. The following sections describe how to use this parser to help with writing temporary files and checking their contents. Setting up ~~~~~~~~~~ To use the Sybil parser, you need to make sure a :class:`TempDirectory` instance is available under a particular name in the sybil test namespace. This name is then passed to the parser's constructor and the parser is passed to the :class:`~sybil.Sybil` constructor. The following example shows how to use Sybil's `pytest`__ integration to execute all of the examples below. These require not only the testfixtures parser but also the Sybil parsers that give more traditional doctest behaviour, invisible code blocks that are useful for setting things up and checking examples without breaking up the flow of the documentation, and capturing of examples from the documentation to use for use in other forms of testing: __ https://docs.pytest.org/en/latest/ .. literalinclude:: ../testfixtures/tests/conftest.py Writing files ~~~~~~~~~~~~~ To write a file, a :rst:dir:`topic` with a class of ``write-file`` is included in the documentation. The following example is a complete reStructuredText file that shows how to write a file that is then used by a later example: .. literalinclude:: ../testfixtures/tests/configparser-read.txt Checking the contents of files ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To read a file, a :rst:dir:`topic` with a class of ``read-file`` is included in the documentation. The following example is a complete reStructuredText file that shows how to check the values written by the code being documented while also using this check as part of the documentation: .. literalinclude:: ../testfixtures/tests/configparser-write.txt Checking the contents of directories ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ While :class:`~testfixtures.sybil.FileParser` itself does not offer any facility for checking the contents of directories, Sybil's :func:`~sybil.parsers.capture.parse_captures` can be used in conjunction with the existing features of a :class:`TempDirectory` to illustrate the contents expected in a directory seamlessly within the documentation. Here's a complete reStructuredText document that illustrates this technique: .. literalinclude:: ../testfixtures/tests/directory-contents.txt .. clean up all tempdirs: >>> TempDirectory.cleanup_all() A note on encoding and line endings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As currently implemented, the parser provided by testfixtures only works with textual file content that can be encoded using the ASCII character set. This content will always be written with ``'\n'`` line separators and, when read, will always have its line endings normalised to ``'\n'``. If you hit any limitations caused by this, please raise an issue in the tracker on GitHub. testfixtures-6.18.3/docs/index.txt000066400000000000000000000013531412502526400171650ustar00rootroot00000000000000.. include:: ../README.rst The sections below describe the use of the various tools included: .. toctree:: :maxdepth: 1 comparing.txt mocking.txt datetime.txt logging.txt streams.txt files.txt exceptions.txt warnings.txt popen.txt django.txt components.txt utilities.txt If you're looking for a description of a particular tool, please see the API reference: .. toctree:: :maxdepth: 1 api.txt For details of how to install the package or get involved in its development, please see the sections below: .. toctree:: :maxdepth: 1 installation.txt development.txt changes.txt license.txt Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` testfixtures-6.18.3/docs/installation.txt000066400000000000000000000013651412502526400205620ustar00rootroot00000000000000Installation Instructions ========================= If you want to experiment with testfixtures, the easiest way to install it is to do the following in a virtualenv:: pip install testfixtures If your package uses setuptools and you decide to use testfixtures, then you should do one of the following: - Specify ``testfixtures`` in the ``tests_require`` parameter of your package's call to ``setup`` in :file:`setup.py`. - Add an ``extra_requires`` parameter in your call to ``setup`` as follows: .. invisible-code-block: python from testfixtures.mock import Mock setup = Mock() .. code-block:: python setup( # other stuff here extras_require=dict( test=['testfixtures'], ) ) testfixtures-6.18.3/docs/license.txt000066400000000000000000000000741412502526400174770ustar00rootroot00000000000000======= License ======= .. literalinclude:: ../LICENSE.txt testfixtures-6.18.3/docs/logging.txt000066400000000000000000000323261412502526400175100ustar00rootroot00000000000000Testing logging =============== .. currentmodule:: testfixtures Python includes a :mod:`logging` package, and while it is widely used, many people assume that logging calls do not need to be tested or find the prospect too daunting. To help with this, testfixtures allows you to easily capture the output of calls to Python's logging framework and make sure they were as expected. .. note:: The :class:`LogCapture` class is useful for checking that your code logs the right messages. If you want to check that the configuration of your handlers is correct, please see the :ref:`section ` below. Methods of capture ------------------ There are three different techniques for capturing messages logged to the Python logging framework, depending on the type of test you are writing. They are all described in the sections below. The context manager ~~~~~~~~~~~~~~~~~~~ If you're using a version of Python where the ``with`` keyword is available, the context manager provided by testfixtures can be used: >>> import logging >>> from testfixtures import LogCapture >>> with LogCapture() as l: ... logger = logging.getLogger() ... logger.info('a message') ... logger.error('an error') For the duration of the ``with`` block, log messages are captured. The context manager provides a check method that raises an exception if the logging wasn't as you expected: >>> l.check( ... ('root', 'INFO', 'a message'), ... ('root', 'ERROR', 'another error'), ... ) Traceback (most recent call last): ... AssertionError: sequence not as expected: same: (('root', 'INFO', 'a message'),) expected: (('root', 'ERROR', 'another error'),) actual: (('root', 'ERROR', 'an error'),) It also has a string representation that allows you to see what has been logged, which is useful for doc tests: >>> print(l) root INFO a message root ERROR an error The decorator ~~~~~~~~~~~~~ If you are working in a traditional :mod:`unittest` environment and only want to capture logging for a particular test function, you may find the decorator suits your needs better: .. code-block:: python from testfixtures import log_capture @log_capture() def test_function(capture): logger = logging.getLogger() logger.info('a message') logger.error('an error') capture.check( ('root', 'INFO', 'a message'), ('root', 'ERROR', 'an error'), ) .. check the above raises no assertion error: >>> test_function() .. note:: This method is not compatible with pytest's fixture discovery stuff. Instead, put a fixture such as the following in your `conftest.py`: .. code-block:: python import pytest @pytest.fixture(autouse=True) def capture(): with LogCapture() as capture: yield capture Manual usage ~~~~~~~~~~~~ If you want to capture logging for the duration of a doctest or in every test in a :class:`~unittest.TestCase`, then you can use the :class:`~testfixtures.LogCapture` manually. The instantiation and replacement are done in the ``setUp`` function of the :class:`~unittest.TestCase` or passed to the :class:`~doctest.DocTestSuite` constructor: >>> from testfixtures import LogCapture >>> l = LogCapture() You can then execute whatever will log the messages you want to test for: >>> from logging import getLogger >>> getLogger().info('a message') At any point, you can check what has been logged using the check method: >>> l.check(('root', 'INFO', 'a message')) Alternatively, you can use the string representation of the :class:`~testfixtures.LogCapture`: >>> print(l) root INFO a message Then, in the ``tearDown`` function of the :class:`~unittest.TestCase` or passed to the :class:`~doctest.DocTestSuite` constructor, you should make sure you stop the capturing: >>> l.uninstall() If you have multiple :class:`~testfixtures.LogCapture` objects in use, you can easily uninstall them all: >>> LogCapture.uninstall_all() Checking captured log messages ------------------------------ Regardless of how you use the :class:`~testfixtures.LogCapture` to capture messages, there are three ways of checking that the messages captured were as expected. The following example is useful for showing these: .. code-block:: python from testfixtures import LogCapture from logging import getLogger logger = getLogger() with LogCapture() as log: logger.info('start of block number %i', 1) try: logger.debug('inside try block') raise RuntimeError('No code to run!') except: logger.error('error occurred', exc_info=True) The check methods ~~~~~~~~~~~~~~~~~ :obj:`~testfixtures.LogCapture` instances have :meth:`~testfixtures.LogCapture.check` and :meth:`~testfixtures.LogCapture.check_present` methods to make assertions about entries that have been logged. :meth:`~testfixtures.LogCapture.check` will compare the log messages captured with those you expect. Expected messages are expressed, by default, as three-element tuples where the first element is the name of the logger to which the message should have been logged, the second element is the string representation of the level at which the message should have been logged and the third element is the message that should have been logged after any parameter interpolation has taken place. If things are as you expected, the method will not raise any exceptions: >>> log.check( ... ('root', 'INFO', 'start of block number 1'), ... ('root', 'DEBUG', 'inside try block'), ... ('root', 'ERROR', 'error occurred'), ... ) However, if the actual messages logged were different, you'll get an :class:`~exceptions.AssertionError` explaining what happened: >>> log.check(('root', 'INFO', 'start of block number 1')) Traceback (most recent call last): ... AssertionError: sequence not as expected: same: (('root', 'INFO', 'start of block number 1'),) expected: () actual: (('root', 'DEBUG', 'inside try block'), ('root', 'ERROR', 'error occurred')) In contrast, :meth:`~testfixtures.LogCapture.check_present` will only check that the messages you specify are present, and that their order is as specified. Other messages will be ignored: >>> log.check_present( ... ('root', 'INFO', 'start of block number 1'), ... ('root', 'ERROR', 'error occurred'), ... ) If the order of messages is non-deterministic, then you can be explict that the order doesn't matter: >>> log.check_present( ... ('root', 'ERROR', 'error occurred'), ... ('root', 'INFO', 'start of block number 1'), ... order_matters=False ... ) Printing ~~~~~~~~ The :obj:`~testfixtures.LogCapture` has a string representation that shows what messages it has captured. This can be useful in doc tests: >>> print(log) root INFO start of block number 1 root DEBUG inside try block root ERROR error occurred This representation can also be used to check that no logging has occurred: >>> empty = LogCapture() >>> print(empty) No logging captured Inspecting ~~~~~~~~~~ The :obj:`~testfixtures.LogCapture` also keeps a list of the :class:`~logging.LogRecord` instances it captures. This is useful when you want to check specifics of the captured logging that aren't available from either the string representation or the :meth:`~testfixtures.LogCapture.check` method. A common case of this is where you want to check that exception information was logged for certain messages: .. code-block:: python from testfixtures import compare, Comparison as C compare(C(RuntimeError('No code to run!')), log.records[-1].exc_info[1]) If you wish the extraction specified in the ``attributes`` parameter to the :obj:`~testfixtures.LogCapture` constructor to be taken into account, you can examine the list of recorded entries returned by the :meth:`~testfixtures.LogCapture.actual` method: .. code-block:: python assert log.actual()[-1][-1] == 'error occurred' Only capturing specific logging ------------------------------- Some actions that you want to test may generate a lot of logging, only some of which you actually need to care about. The logging you care about is often only that above a certain log level. If this is the case, you can configure :obj:`~testfixtures.LogCapture` to only capture logging at or above a specific level: >>> with LogCapture(level=logging.INFO) as l: ... logger = getLogger() ... logger.debug('junk') ... logger.info('something we care about') ... logger.error('an error') >>> print(l) root INFO something we care about root ERROR an error In other cases this problem can be alleviated by only capturing a specific logger: >>> with LogCapture('specific') as l: ... getLogger('something').info('junk') ... getLogger('specific').info('what we care about') ... getLogger().info('more junk') >>> print(l) specific INFO what we care about However, it may be that while you don't want to capture all logging, you do want to capture logging from multiple specific loggers: >>> with LogCapture(('one','two')) as l: ... getLogger('three').info('3') ... getLogger('two').info('2') ... getLogger('one').info('1') >>> print(l) two INFO 2 one INFO 1 It may also be that the simplest thing to do is only capture logging for part of your test. This is particularly common with long doc tests. To make this easier, :class:`~testfixtures.LogCapture` supports manual installation and un-installation as shown in the following example: >>> l = LogCapture(install=False) >>> getLogger().info('junk') >>> l.install() >>> getLogger().info('something we care about') >>> l.uninstall() >>> getLogger().info('more junk') >>> l.install() >>> getLogger().info('something else we care about') >>> print(l) root INFO something we care about root INFO something else we care about .. uninstall: >>> LogCapture.uninstall_all() Once you have the filtered to the entries you would like to make assertions about, you may also want to look at a different set of attributes that the defaults for :class:`~testfixtures.LogCapture`: >>> with LogCapture(attributes=('levelname', 'getMessage')) as log: ... logger = getLogger() ... logger.debug('a debug message') ... logger.info('something %s', 'info') ... logger.error('an error') >>> log.check(('DEBUG', 'a debug message'), ('INFO', 'something info'), ('ERROR', 'an error')) As you can see, if a specified attribute is callable, it will be called and the result used to form part of the entry. If you need even more control, you can pass a callable to the ``attributes`` parameter, which can extract any required information from the records and return it in the most appropriate form: .. code-block:: python def extract(record): return {'level': record.levelname, 'message': record.getMessage()} >>> with LogCapture(attributes=extract) as log: ... logger = getLogger() ... logger.debug('a debug message') ... logger.error('an error') >>> log.check( ... {'level': 'DEBUG', 'message': 'a debug message'}, ... {'level': 'ERROR', 'message': 'an error'}, ... ) .. _check-log-config: Checking the configuration of your log handlers ----------------------------------------------- :class:`LogCapture` is good for checking that your code is logging the correct messages; just as important is checking that your application has correctly configured log handers. This can be done using a unit test such as the following: .. code-block:: python from testfixtures import Comparison as C, compare from unittest import TestCase import logging import sys class LoggingConfigurationTests(TestCase): # We mock out the handlers list for the logger we're # configuring in such a way that we have no handlers # configured at the start of the test and the handlers our # configuration installs are removed at the end of the test. def setUp(self): self.logger = logging.getLogger() self.orig_handlers = self.logger.handlers self.logger.handlers = [] self.level = self.logger.level def tearDown(self): self.logger.handlers = self.orig_handlers self.logger.level = self.level def test_basic_configuration(self): # Our logging configuration code, in this case just a # call to basicConfig: logging.basicConfig(format='%(levelname)s %(message)s', level=logging.INFO) # Now we check the configuration is as expected: compare(self.logger.level, 20) compare([ C('logging.StreamHandler', stream=sys.stderr, formatter=C('logging.Formatter', _fmt='%(levelname)s %(message)s', partial=True), level=logging.NOTSET, partial=True) ], self.logger.handlers) .. the result: >>> import unittest >>> from testfixtures.compat import StringIO >>> suite = unittest.TestLoader().loadTestsFromTestCase(LoggingConfigurationTests) >>> stream = StringIO() >>> result = unittest.TextTestRunner(verbosity=0, stream=stream).run(suite) >>> if result.errors or result.failures: print(stream.getvalue()) >>> result testfixtures-6.18.3/docs/make.bat000066400000000000000000000047621412502526400167310ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation set SPHINXBUILD=..\bin\sphinx-build set ALLSPHINXOPTS=-d _build/doctrees %SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. changes to make an overview over all changed/added/deprecated items echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (_build\*) do rmdir /q /s %%i del /q /s _build\* goto end ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% _build/html echo. echo.Build finished. The HTML pages are in _build/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% _build/dirhtml echo. echo.Build finished. The HTML pages are in _build/dirhtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% _build/pickle echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% _build/json echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% _build/htmlhelp echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in _build/htmlhelp. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% _build/latex echo. echo.Build finished; the LaTeX files are in _build/latex. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% _build/changes echo. echo.The overview file is in _build/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% _build/linkcheck echo. echo.Link check complete; look for any errors in the above output ^ or in _build/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% _build/doctest echo. echo.Testing of doctests in the sources finished, look at the ^ results in _build/doctest/output.txt. goto end ) :end testfixtures-6.18.3/docs/mocking.txt000066400000000000000000000314331412502526400175070ustar00rootroot00000000000000Mocking out objects and methods =============================== .. py:currentmodule:: testfixtures Mocking is the process of replacing objects used in your code with ones that make testing easier, but only while the tests are running. This may mean replacing resources or dependencies, such as database connections or file paths, with ones that are isolated for testing. It may also mean replacing chunks of complex functionality that aren't the subject of the test with mock objects that allow you to check that the mocked out functionality is being used as expected. What to mock with ----------------- Python has a standard mock implementation in the form of :mod:`unittest.mock` which is also available as a `rolling backport`__ so that the latest features and bugfixes can be used in any version of Python. __ https://mock.readthedocs.io For convenience, testfixtures provides a facade over both of these in the form of :mod:`testfixtures.mock`. The contents are identical and preference is given to the rolling backport if it is present. The facade also contains any bugfixes that are critical to the operation of functionality provided by testfixtures. Testfixtures also provides specialised mocks for dealing with :doc:`dates and times ` and :doc:`subprocesses `. How to mock ----------- Testfixtures provides :class:`Replace`, :class:`Replacer` and the :func:`replace` decorator to mock out objects. These work in a similar way to :func:`unittest.mock.patch`, and have been around longer. They still provide a little more flexibility than :func:`~unittest.mock.patch`, so you whichever feels best in your codebase. Methods of replacement ---------------------- There are three different methods of mocking out functionality that can be used to replace functions, classes or even individual methods on a class. Consider the following module: .. topic:: testfixtures.tests.sample1 :class: module .. literalinclude:: ../testfixtures/tests/sample1.py :pyobject: X .. do the import quietly >>> from testfixtures.tests.sample1 import X We want to mock out the ``y`` method of the ``X`` class, with, for example, the following function: .. code-block:: python def mock_y(self): return 'mock y' The context manager ~~~~~~~~~~~~~~~~~~~ For replacement of a single thing, it's easiest to use the :class:`~testfixtures.Replace` context manager: .. code-block:: python from testfixtures import Replace def test_function(): with Replace('testfixtures.tests.sample1.X.y', mock_y): print(X().y()) For the duration of the ``with`` block, the replacement is used: >>> test_function() mock y For multiple replacements to do, or where the you need access to the replacement within the code block under test, the :class:`~testfixtures.Replacer` context manager can be used instead: .. code-block:: python from testfixtures.mock import Mock from testfixtures import Replacer def test_function(): with Replacer() as replace: mock_y = replace('testfixtures.tests.sample1.X.y', Mock()) mock_y.return_value = 'mock y' print(X().y()) For the duration of the ``with`` block, the replacement is used: >>> test_function() mock y The decorator ~~~~~~~~~~~~~ If you are working in a traditional :mod:`unittest` environment and want to replace different things in different test functions, you may find the decorator suits your needs better: .. code-block:: python from testfixtures import replace @replace('testfixtures.tests.sample1.X.y', mock_y) def test_function(): print(X().y()) When using the decorator, the replacement is used for the duration of the decorated callable's execution: >>> test_function() mock y If you need to manipulate or inspect the object that's used as a replacement, you can add an extra parameter to your function. The decorator will see this and pass the replacement in it's place: .. code-block:: python from testfixtures.mock import Mock, call from testfixtures import compare, replace @replace('testfixtures.tests.sample1.X.y', Mock()) def test_function(mocked_y): mocked_y.return_value = 'mock y' print(X().y()) compare(mocked_y.mock_calls, expected=[call()]) The above still results in the same output: >>> test_function() mock y .. note:: This method is not compatible with pytest's fixture discovery stuff. Instead, put a fixture such as the following in your `conftest.py`: .. code-block:: python from testfixtures import Replace import pytest @pytest.fixture() def mocked_y(): m = Mock() with Replace('testfixtures.tests.sample1.X.y', m): yield m Manual usage ~~~~~~~~~~~~ If you want to replace something for the duration of a doctest or you want to replace something for every test in a :class:`~unittest.TestCase`, then you can use the :class:`~testfixtures.Replacer` manually. The instantiation and replacement are done in the ``setUp`` function of the :class:`~unittest.TestCase` or passed to the :class:`~doctest.DocTestSuite` constructor: >>> from testfixtures import Replacer >>> replace = Replacer() >>> replace('testfixtures.tests.sample1.X.y', mock_y) <...> The replacement then stays in place until removed: >>> X().y() 'mock y' Then, in the ``tearDown`` function of the :class:`~unittest.TestCase` or passed to the :class:`~doctest.DocTestSuite` constructor, the replacement is removed: >>> replace.restore() >>> X().y() 'original y' The :meth:`~testfixtures.Replacer.restore` method can also be added as an :meth:`~unittest.TestCase.addCleanup` if that is easier or more compact in your test suite. Replacing more than one thing ----------------------------- Both the :class:`~testfixtures.Replacer` and the :func:`~testfixtures.replace` decorator can be used to replace more than one thing at a time. For the former, this is fairly obvious: .. code-block:: python def test_function(): with Replacer() as replace: y = replace('testfixtures.tests.sample1.X.y', Mock()) y.return_value = 'mock y' aMethod = replace('testfixtures.tests.sample1.X.aMethod', Mock()) aMethod.return_value = 'mock method' x = X() print(x.y(), x.aMethod()) .. the result: >>> test_function() mock y mock method For the decorator, it's less obvious but still pretty easy: .. code-block:: python from testfixtures import replace @replace('testfixtures.tests.sample1.X.y', Mock()) @replace('testfixtures.tests.sample1.X.aMethod', Mock()) def test_function(aMethod, y): print(aMethod, y) aMethod().return_value = 'mock method' y().return_value = 'mock y' x = X() print(aMethod, y) print(x.y(), x.aMethod()) You'll notice that you can still get access to the replacements, even though there are several of them. Replacing things that may not be there -------------------------------------- The following code shows a situation where ``hpy`` may or may not be present depending on whether the ``guppy`` package is installed or not. .. topic:: testfixtures.tests.sample2 :class: module .. literalinclude:: ../testfixtures/tests/sample2.py :lines: 10-19 To test the behaviour of the code that uses ``hpy`` in both of these cases, regardless of whether or not the ``guppy`` package is actually installed, we need to be able to mock out both ``hpy`` and the ``guppy`` global. This is done by doing non-strict replacement, as shown in the following :class:`~unittest.TestCase`: .. imports >>> import unittest,sys .. code-block:: python from testfixtures.tests.sample2 import dump from testfixtures import replace from testfixtures.mock import Mock, call class Tests(unittest.TestCase): @replace('testfixtures.tests.sample2.guppy', True) @replace('testfixtures.tests.sample2.hpy', Mock(), strict=False) def test_method(self, hpy): dump('somepath') compare([ call(), call().heap(), call().heap().stat.dump('somepath') ], hpy.mock_calls) @replace('testfixtures.tests.sample2.guppy', False) @replace('testfixtures.tests.sample2.hpy', Mock(), strict=False) def test_method_no_heapy(self,hpy): dump('somepath') compare(hpy.mock_calls,[]) .. the result: >>> from testfixtures.compat import StringIO >>> suite = unittest.TestLoader().loadTestsFromTestCase(Tests) >>> unittest.TextTestRunner(verbosity=0,stream=StringIO()).run(suite) The :meth:`~testfixtures.Replacer.replace` method and calling a :class:`Replacer` also supports non-strict replacement using the same keyword parameter. Replacing items in dictionaries and lists ----------------------------------------- :class:`~testfixtures.Replace`, :class:`~testfixtures.Replacer` and the :func:`~testfixtures.replace` decorator can be used to replace items in dictionaries and lists. For example, suppose you have a data structure like the following: .. topic:: testfixtures.tests.sample1 :class: module .. literalinclude:: ../testfixtures/tests/sample1.py :lines: 67-70 You can mock out the value associated with ``key`` and the second element in the ``complex_key`` list as follows: .. code-block:: python from pprint import pprint from testfixtures import Replacer from testfixtures.tests.sample1 import someDict def test_function(): with Replacer() as replace: replace('testfixtures.tests.sample1.someDict.key', 'foo') replace('testfixtures.tests.sample1.someDict.complex_key.1', 42) pprint(someDict) While the replacement is in effect, the new items are in place: >>> test_function() {'complex_key': [1, 42, 3], 'key': 'foo'} When it is no longer in effect, the originals are returned: >>> pprint(someDict) {'complex_key': [1, 2, 3], 'key': 'value'} .. _removing_attr_and_item: Removing attributes and dictionary items ---------------------------------------- :class:`~testfixtures.Replace`, :class:`~testfixtures.Replacer` and the :func:`~testfixtures.replace` decorator can be used to remove attributes from objects and remove items from dictionaries. For example, suppose you have a data structure like the following: .. topic:: testfixtures.tests.sample1 :class: module .. literalinclude:: ../testfixtures/tests/sample1.py :lines: 67-70 If you want to remove the ``key`` for the duration of a test, you can do so as follows: .. code-block:: python from testfixtures import Replace, not_there from testfixtures.tests.sample1 import someDict def test_function(): with Replace('testfixtures.tests.sample1.someDict.key', not_there): pprint(someDict) While the replacement is in effect, ``key`` is gone: >>> test_function() {'complex_key': [1, 2, 3]} When it is no longer in effect, ``key`` is returned: >>> pprint(someDict) {'complex_key': [1, 2, 3], 'key': 'value'} If you want the whole ``someDict`` dictionary to be removed for the duration of a test, you would do so as follows: .. code-block:: python from testfixtures import Replace, not_there from testfixtures.tests import sample1 def test_function(): with Replace('testfixtures.tests.sample1.someDict', not_there): print(hasattr(sample1, 'someDict')) While the replacement is in effect, ``key`` is gone: >>> test_function() False When it is no longer in effect, ``key`` is returned: >>> pprint(sample1.someDict) {'complex_key': [1, 2, 3], 'key': 'value'} Gotchas ------- - Make sure you replace the object where it's used and not where it's defined. For example, with the following code from the ``testfixtures.tests.sample1`` package: .. literalinclude:: ../testfixtures/tests/sample1.py :lines: 30-34 You might be tempted to mock things as follows: >>> replace = Replacer() >>> replace('time.time', Mock()) <...> But this won't work: >>> from testfixtures.tests.sample1 import str_time >>> type(float(str_time())) <... 'float'> You need to replace :func:`~time.time` where it's used, not where it's defined: >>> replace('testfixtures.tests.sample1.time', Mock()) <...> >>> str_time() "<...Mock...>" .. cleanup >>> replace.restore() A corollary of this is that you need to replace *all* occurrences of an original to safely be able to test. This can be tricky when an original is imported into many modules that may be used by a particular test. - You can't replace whole top level modules, and nor should you want to! The reason being that everything up to the last dot in the replacement target specifies where the replacement will take place, and the part after the last dot is used as the name of the thing to be replaced: >>> Replacer().replace('sys', Mock()) Traceback (most recent call last): ... ValueError: target must contain at least one dot! testfixtures-6.18.3/docs/popen.txt000066400000000000000000000166221412502526400172040ustar00rootroot00000000000000 .. currentmodule:: testfixtures.popen Testing use of the subprocess package ===================================== When using the :mod:`subprocess` package there are two approaches to testing: * Have your tests exercise the real processes being instantiated and used. * Mock out use of the :mod:`subprocess` package and provide expected output while recording interactions with the package to make sure they are as expected. While the first of these should be preferred, it means that you need to have all the external software available everywhere you wish to run tests. Your tests will also need to make sure any dependencies of that software on an external environment are met. If that external software takes a long time to run, your tests will also take a long time to run. These challenges can often make the second approach more practical and can be the more pragmatic approach when coupled with a mock that accurately simulates the behaviour of a subprocess. :class:`~testfixtures.popen.MockPopen` is an attempt to provide just such a mock. .. note:: To use :class:`~testfixtures.popen.MockPopen`, you must have the :mod:`mock` package installed or be using Python 3.3 or later. .. warning:: Previous versions of this mock made use of :attr:`~unittest.mock.Mock.mock_calls`. These are deceptively incapable of recording some information important in the use of this mock, so please switch to making assertions about :attr:`~MockPopen.all_calls` and :attr:`~MockPopenInstance.calls` instead. Example usage ------------- As an example, suppose you have code such as the following that you need to test: .. literalinclude:: ../testfixtures/tests/test_popen_docs.py :lines: 4-12 Tests that exercise this code using :class:`~testfixtures.popen.MockPopen` could be written as follows: .. literalinclude:: ../testfixtures/tests/test_popen_docs.py :lines: 16-52 Passing input to processes -------------------------- If your testing requires passing input to the subprocess, you can do so by checking for the input passed to :meth:`~subprocess.Popen.communicate` method when you check the calls on the mock as shown in this example: .. literalinclude:: ../testfixtures/tests/test_popen_docs.py :pyobject: TestMyFunc.test_communicate_with_input :dedent: 4 .. note:: Accessing ``.stdin`` isn't current supported by this mock. Reading from ``stdout`` and ``stderr`` -------------------------------------- The :attr:`~MockPopenInstance.stdout` and :attr:`~MockPopenInstance.stderr` attributes of the mock returned by :class:`~testfixtures.popen.MockPopen` will be file-like objects as with the real :class:`~subprocess.Popen` and can be read as shown in this example: .. literalinclude:: ../testfixtures/tests/test_popen_docs.py :pyobject: TestMyFunc.test_read_from_stdout_and_stderr :dedent: 4 .. warning:: While these streams behave a lot like the streams of a real :class:`~subprocess.Popen` object, they do not exhibit the deadlocking behaviour that can occur when the two streams are read as in the example above. Be very careful when reading :attr:`~MockPopenInstance.stdout` and :attr:`~MockPopenInstance.stderr` and consider using :meth:`~subprocess.Popen.communicate` instead. Writing to ``stdin`` -------------------- If you set ``stdin=PIPE`` in your call to :class:`~subprocess.Popen` then the :attr:`~MockPopenInstance.stdin` attribute of the mock returned by :class:`~testfixtures.popen.MockPopen` will be a mock and you can then examine the write calls to it as shown in this example: .. literalinclude:: ../testfixtures/tests/test_popen_docs.py :pyobject: TestMyFunc.test_write_to_stdin :dedent: 4 Specifying the return code -------------------------- Often code will need to behave differently depending on the return code of the launched process. Specifying a simulated response code, along with testing for the correct usage of :meth:`~subprocess.Popen.wait`, can be seen in the following example: .. literalinclude:: ../testfixtures/tests/test_popen_docs.py :pyobject: TestMyFunc.test_wait_and_return_code :dedent: 4 Checking for signal sending --------------------------- Calls to :meth:`~MockPopenInstance.send_signal`, :meth:`MockPopenInstance.terminate` and :meth:`MockPopenInstance.kill` are all recorded by the mock returned by :class:`~testfixtures.popen.MockPopen` but otherwise do nothing as shown in the following example, which doesn't make sense for a real test of sub-process usage but does show how the mock behaves: .. literalinclude:: ../testfixtures/tests/test_popen_docs.py :pyobject: TestMyFunc.test_send_signal :dedent: 4 Polling a process ----------------- The :meth:`~subprocess.Popen.poll` method is often used as part of a loop in order to do other work while waiting for a sub-process to complete. The mock returned by :class:`~testfixtures.popen.MockPopen` supports this by allowing the :meth:`~MockPopenInstance.poll` method to be called a number of times before the :attr:`~MockPopenInstance.returncode` is set using the ``poll_count`` parameter as shown in the following example: .. literalinclude:: ../testfixtures/tests/test_popen_docs.py :pyobject: TestMyFunc.test_poll_until_result :dedent: 4 Different behaviour on sequential processes ------------------------------------------- If your code needs to call the same command but have different behaviour on each call, then you can pass a callable behaviour like this: .. literalinclude:: ../testfixtures/tests/test_popen_docs.py :pyobject: TestMyFunc.test_multiple_responses :dedent: 4 If you need to keep state across calls, such as accumulating :attr:`~MockPopenInstance.stdin` or failing for a configurable number of calls, then wrap that behaviour up into a class: .. literalinclude:: ../testfixtures/tests/test_popen_docs.py :pyobject: CustomBehaviour This can then be used like this: .. literalinclude:: ../testfixtures/tests/test_popen_docs.py :pyobject: TestMyFunc.test_count_down :dedent: 4 Using default behaviour ----------------------- If you're testing something that needs to make many calls to many different commands that all behave the same, it can be tedious to specify the behaviour of each with :class:`~MockPopen.set_command`. For this case, :class:`~MockPopen` has the :class:`~MockPopen.set_default` method which can be used to set the behaviour of any command that has not been specified with :class:`~MockPopen.set_command` as shown in the following example: .. literalinclude:: ../testfixtures/tests/test_popen_docs.py :pyobject: TestMyFunc.test_default_behaviour :dedent: 4 Tracking multiple simultaneous processes ---------------------------------------- Conversely, if you're testing something that spins up multiple subprocesses and manages their simultaneous execution, you will want to explicitly define the behaviour of each process using :class:`~MockPopen.set_command` and then make assertions about each process using :attr:`~MockPopen.all_calls`. For example, suppose we wanted to test this function: .. literalinclude:: ../testfixtures/tests/test_popen_docs.py :pyobject: process_in_batches Then you could test it as follows: .. literalinclude:: ../testfixtures/tests/test_popen_docs.py :pyobject: TestMyFunc.test_multiple_processes :dedent: 4 Note that the order of all calls is explicitly recorded. If the order of these calls is non-deterministic due to your method of process management, you will need to do more work and be very careful when testing. testfixtures-6.18.3/docs/streams.txt000066400000000000000000000052721412502526400175400ustar00rootroot00000000000000Testing output to streams ========================= .. currentmodule:: testfixtures In many situations, it's perfectly legitimate for output to be printed to one of the standard streams. To aid with testing this kind of output, testfixtures provides the :class:`OutputCapture` helper. This helper is a context manager that captures output sent to ``sys.stdout`` and ``sys.stderr`` and provides a :meth:`~OutputCapture.compare` method to check that the output was as expected. Here's a simple example: .. code-block:: python from testfixtures import OutputCapture import sys with OutputCapture() as output: # code under test print("Hello!") print("Something bad happened!", file=sys.stderr) output.compare('\n'.join([ "Hello!", "Something bad happened!", ])) To make life easier, both the actual and expected output are stripped of leading and trailing whitespace before the comparison is done: >>> with OutputCapture() as o: ... print(' Bar! ') ... o.compare(' Foo! ') Traceback (most recent call last): ... AssertionError: 'Foo!' (expected) != 'Bar!' (actual) However, if you need to make very explicit assertions about what has been written to the stream then you can do so using the `captured` property of the :class:`OutputCapture`: >>> with OutputCapture() as o: ... print(' Bar! ') >>> print(repr(o.captured)) ' Bar! \n' If you need to explicitly check whether output went to ``stdout`` or ``stderr``, `separate` mode can be used: .. code-block:: python from testfixtures import OutputCapture import sys with OutputCapture(separate=True) as output: print("Hello!") print("Something bad happened!", file=sys.stderr) output.compare( stdout="Hello!", stderr="Something bad happened!", ) Finally, you may sometimes want to disable an :class:`OutputCapture` without removing it from your code. This often happens when you want to insert a debugger call while an :class:`OutputCapture` is active; if it remains enabled, all debugger output will be captured making the debugger very difficult to use! To deal with this problem, the :class:`OutputCapture` may be disabled and then re-enabled as follows: >>> with OutputCapture() as o: ... print('Foo') ... o.disable() ... print('Bar') ... o.enable() ... print('Baz') Bar >>> print(o.captured) Foo Baz .. note:: Some debuggers, notably :mod:`pdb`, do interesting things with streams such that calling :meth:`~OutputCapture.disable` from within the debugger will have no effect. A good fallback is to type the following, which will almost always restore output to where you want it: .. code-block:: python import sys; sys.stdout=sys.__stdout__ testfixtures-6.18.3/docs/utilities.txt000066400000000000000000000065011412502526400200710ustar00rootroot00000000000000Utilities ========= .. currentmodule:: testfixtures This section describes a few handy functions that didn't fit nicely in any other section. .. _generator: The ``generator`` helper ------------------------ It can be handy when testing to be able to turn a simple sequence into a generator. This can be necessary when you want to check that your code will behave correctly when processing a generator instead of a simple sequence, or when you're looking to make assertions about the expected return value of a callable that returns a generator. If you need to turn a simple sequence into a generator, the :func:`generator` function is the way to do it: >>> from testfixtures import generator >>> generator(1,2,3) .. invisible-code-block: python from __future__ import print_function Iterating over this generator will return the arguments passed to the :func:`generator` function: >>> for i in _: ... print(i, end=' ') 1 2 3 The ``wrap`` helper ------------------- The :func:`wrap` helper is a decorator function that allows you to wrap the call to the decorated callable with calls to other callables. This can be useful when you want to perform setup and teardown actions either side of a test function. For example, take the following functions: .. code-block:: python def before(): print("before") def after(): print("after") The :func:`wrap` helper can be used to wrap a function with these: .. code-block:: python from testfixtures import wrap @wrap(before,after) def a_function(): print("a_function") When the wrapped function is executed, the output is as follows: >>> a_function() before a_function after The section argument to :func:`wrap` is optional: .. code-block:: python from testfixtures import wrap @wrap(before) def a_function(): print("a_function") Now, the wrapped function gives the following output when executed: >>> a_function() before a_function Multiple wrapping functions can be provided by stacking :func:`wrap` decorations: .. code-block:: python def before1(): print("before 1") def after1(): print("after 1") def before2(): print("before 2") def after2(): print("after 2") @wrap(before2,after2) @wrap(before1,after1) def a_function(): print("a_function") The order of execution is illustrated below: >>> a_function() before 1 before 2 a_function after 2 after 1 The results of calling the wrapping functions executed before the wrapped function can be made available to the wrapped function provided it accepts positional arguments for these results: .. code-block:: python def before1(): return "return 1" def before2(): return "return 2" @wrap(before2) @wrap(before1) def a_function(r1,r2): print(r1) print(r2) Calling the wrapped function illustrates the behaviour: >>> a_function() return 1 return 2 Finally, the return value of the wrapped function will always be that of the original function: .. code-block:: python def before1(): return 1 def after1(): return 2 def before2(): return 3 def after2(): return 4 @wrap(before2,after2) @wrap(before1,after2) def a_function(): return 'original' When the above wrapped function is executed, the original return value is still returned: >>> a_function() 'original' testfixtures-6.18.3/docs/warnings.txt000066400000000000000000000051341412502526400177070ustar00rootroot00000000000000Testing warnings ================ .. currentmodule:: testfixtures The :mod:`unittest` support for asserting that warnings are issued when expected is fairly convoluted, so testfixtures has tools to help with this. The :class:`ShouldWarn` context manager --------------------------------------- This context manager allows you to assert that particular warnings are recorded in a block of code, for example: >>> from warnings import warn >>> from testfixtures import ShouldWarn >>> with ShouldWarn(UserWarning('you should fix that')): ... warn('you should fix that') If a warning issued doesn't match the one expected, :class:`ShouldWarn` will raise an :class:`AssertionError` causing the test in which it occurs to fail: >>> from warnings import warn >>> from testfixtures import ShouldWarn >>> with ShouldWarn(UserWarning('you should fix that')): ... warn("sorry dave, I can't let you do that") Traceback (most recent call last): ... AssertionError: sequence not as expected: same: [] expected: [ attributes differ: 'args': ('you should fix that',) (Comparison) != ("sorry dave, I can't let you do that",) (actual) ] actual: [UserWarning("sorry dave, I can't let you do that"...)] You can check multiple warnings in a particular piece of code: >>> from warnings import warn >>> from testfixtures import ShouldWarn >>> with ShouldWarn(UserWarning('you should fix that'), ... UserWarning('and that too')): ... warn('you should fix that') ... warn('and that too') If you want to inspect more details of the warnings issued, you can capture them into a list as follows: >>> from warnings import warn_explicit >>> from testfixtures import ShouldWarn >>> with ShouldWarn() as captured: ... warn_explicit(message='foo', category=DeprecationWarning, ... filename='bar.py', lineno=42) >>> len(captured) 1 >>> captured[0].message DeprecationWarning('foo'...) >>> captured[0].lineno 42 The :class:`ShouldNotWarn` context manager ------------------------------------------ If you do not expect any warnings to be logged in a piece of code, you can use the :class:`ShouldNotWarn` context manager. If any warnings are issued in the context it manages, it will raise an :class:`AssertionError` to indicate this: >>> from warnings import warn >>> from testfixtures import ShouldNotWarn >>> with ShouldNotWarn(): ... warn("woah dude") Traceback (most recent call last): ... AssertionError: sequence not as expected: same: [] expected: [] actual: [UserWarning('woah dude'...)] testfixtures-6.18.3/setup.cfg000066400000000000000000000003451412502526400162060ustar00rootroot00000000000000[wheel] universal=1 [tool:pytest] addopts = -p no:doctest norecursedirs=_build DJANGO_SETTINGS_MODULE=testfixtures.tests.test_django.settings filterwarnings = ignore::DeprecationWarning ignore::PendingDeprecationWarning testfixtures-6.18.3/setup.py000066400000000000000000000031501412502526400160740ustar00rootroot00000000000000# Copyright (c) 2008-2014 Simplistix Ltd, 2015-2020 Chris Withers # See license.txt for license details. import os from setuptools import setup, find_packages name = 'testfixtures' base_dir = os.path.dirname(__file__) optional = [ 'mock;python_version<"3"', 'zope.component', 'django<2;python_version<"3"', 'django;python_version>="3"', 'sybil', 'twisted' ] setup( name=name, version=open(os.path.join(base_dir, name, 'version.txt')).read().strip(), author='Chris Withers', author_email='chris@simplistix.co.uk', license='MIT', description=("A collection of helpers and mock objects " "for unit tests and doc tests."), long_description=open(os.path.join(base_dir, 'README.rst')).read(), url='https://github.com/Simplistix/testfixtures', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ], packages=find_packages(), zip_safe=False, include_package_data=True, extras_require=dict( test=['pytest>=3.6', 'pytest-cov', 'pytest-django', ]+optional, docs=['sphinx']+optional, build=['setuptools-git', 'wheel', 'twine'] ) ) testfixtures-6.18.3/testfixtures/000077500000000000000000000000001412502526400171345ustar00rootroot00000000000000testfixtures-6.18.3/testfixtures/__init__.py000066400000000000000000000016301412502526400212450ustar00rootroot00000000000000class singleton(object): def __init__(self, name): self.name = name def __repr__(self): return '<%s>' % self.name __str__ = __repr__ not_there = singleton('not_there') from testfixtures.comparison import ( Comparison, StringComparison, RoundComparison, compare, diff, RangeComparison, SequenceComparison, Subset, Permutation, MappingComparison ) from testfixtures.tdatetime import test_datetime, test_date, test_time from testfixtures.logcapture import LogCapture, log_capture from testfixtures.outputcapture import OutputCapture from testfixtures.resolve import resolve from testfixtures.replace import Replacer, Replace, replace from testfixtures.shouldraise import ShouldRaise, should_raise, ShouldAssert from testfixtures.shouldwarn import ShouldWarn, ShouldNotWarn from testfixtures.tempdirectory import TempDirectory, tempdir from testfixtures.utils import wrap, generator testfixtures-6.18.3/testfixtures/comparison.py000066400000000000000000001123161412502526400216640ustar00rootroot00000000000000from collections import OrderedDict from decimal import Decimal from difflib import unified_diff from functools import partial as partial_type, partial, reduce from operator import __or__ from pprint import pformat from types import GeneratorType import re from testfixtures import not_there from testfixtures.compat import ClassType, Iterable, Unicode, basestring, PY3, PY2 from testfixtures.resolve import resolve from testfixtures.utils import indent from testfixtures.mock import parent_name, mock_call, unittest_mock_call def diff(x, y, x_label='', y_label=''): """ A shorthand function that uses :mod:`difflib` to return a string representing the differences between the two string arguments. Most useful when comparing multi-line strings. """ return '\n'.join( unified_diff( x.split('\n'), y.split('\n'), x_label or 'first', y_label or 'second', lineterm='') ) def compare_simple(x, y, context): """ Returns a very simple textual difference between the two supplied objects. """ if x != y: repr_x = repr(x) repr_y = repr(y) if repr_x == repr_y: if type(x) is not type(y): return compare_with_type(x, y, context) x_attrs = _extract_attrs(x) y_attrs = _extract_attrs(y) diff_ = None if not (x_attrs is None or y_attrs is None): diff_ = _compare_mapping(x_attrs, y_attrs, context, x, 'attributes ', '.%s') if diff_: return diff_ return 'Both %s and %s appear as %r, but are not equal!' % ( context.x_label or 'x', context.y_label or 'y', repr_x ) return context.label('x', repr_x) + ' != ' + context.label('y', repr_y) def _extract_attrs(obj, ignore=None): try: attrs = vars(obj).copy() except TypeError: attrs = None else: if isinstance(obj, BaseException): attrs['args'] = obj.args has_slots = getattr(obj, '__slots__', not_there) is not not_there if has_slots: slots = set() for cls in type(obj).__mro__: slots.update(getattr(cls, '__slots__', ())) if slots and attrs is None: attrs = {} for n in slots: value = getattr(obj, n, not_there) if value is not not_there: attrs[n] = value if attrs is None: return None if ignore is not None: for attr in ignore: attrs.pop(attr, None) return attrs def _attrs_to_ignore(context, ignore_attributes, obj): ignore = context.get_option('ignore_attributes', ()) if isinstance(ignore, dict): ignore = ignore.get(type(obj), ()) ignore = set(ignore) ignore.update(ignore_attributes) return ignore def compare_object(x, y, context, ignore_attributes=()): """ Compare the two supplied objects based on their type and attributes. :param ignore_attributes: Either a sequence of strings containing attribute names to be ignored when comparing or a mapping of type to sequence of strings containing attribute names to be ignored when comparing that type. This may be specified as either a parameter to this function or in the ``context``. If specified in both, they will both apply with precedence given to whatever is specified is specified as a parameter. If specified as a parameter to this fucntion, it may only be a list of strings. """ if type(x) is not type(y) or isinstance(x, (ClassType, type)): return compare_simple(x, y, context) x_attrs = _extract_attrs(x, _attrs_to_ignore(context, ignore_attributes, x)) y_attrs = _extract_attrs(y, _attrs_to_ignore(context, ignore_attributes, y)) if x_attrs is None or y_attrs is None or not (x_attrs and y_attrs): return compare_simple(x, y, context) if context.ignore_eq or x_attrs != y_attrs: return _compare_mapping(x_attrs, y_attrs, context, x, 'attributes ', '.%s') def compare_exception(x, y, context): """ Compare the two supplied exceptions based on their message, type and attributes. """ if x.args != y.args: return compare_simple(x, y, context) return compare_object(x, y, context) def compare_with_type(x, y, context): """ Return a textual description of the difference between two objects including information about their types. """ source = locals() to_render = {} for name in 'x', 'y': obj = source[name] to_render[name] = context.label( name, '{0} ({1!r})'.format(_short_repr(obj), type(obj)) ) return '{x} != {y}'.format(**to_render) def compare_sequence(x, y, context, prefix=True): """ Returns a textual description of the differences between the two supplied sequences. """ l_x = len(x) l_y = len(y) i = 0 while i < l_x and i < l_y: if context.different(x[i], y[i], '[%i]' % i): break i += 1 if l_x == l_y and i == l_x: return return (('sequence not as expected:\n\n' if prefix else '')+ 'same:\n%s\n\n' '%s:\n%s\n\n' '%s:\n%s') % (pformat(x[:i]), context.x_label or 'first', pformat(x[i:]), context.y_label or 'second', pformat(y[i:]), ) def compare_generator(x, y, context): """ Returns a textual description of the differences between the two supplied generators. This is done by first unwinding each of the generators supplied into tuples and then passing those tuples to :func:`compare_sequence`. """ x = tuple(x) y = tuple(y) if not context.ignore_eq and x == y: return return compare_sequence(x, y, context) def compare_tuple(x, y, context): """ Returns a textual difference between two tuples or :func:`collections.namedtuple` instances. The presence of a ``_fields`` attribute on a tuple is used to decide whether or not it is a :func:`~collections.namedtuple`. """ x_fields = getattr(x, '_fields', None) y_fields = getattr(y, '_fields', None) if x_fields and y_fields: if x_fields == y_fields: return _compare_mapping(dict(zip(x_fields, x)), dict(zip(y_fields, y)), context, x) else: return compare_with_type(x, y, context) return compare_sequence(x, y, context) def compare_dict(x, y, context): """ Returns a textual description of the differences between the two supplied dictionaries. """ return _compare_mapping(x, y, context, x) def sorted_by_repr(sequence): return sorted(sequence, key=lambda o: repr(o)) def _compare_mapping(x, y, context, obj_for_class, prefix='', breadcrumb='[%r]', check_y_not_x=True): x_keys = set(x.keys()) y_keys = set(y.keys()) x_not_y = x_keys - y_keys y_not_x = y_keys - x_keys same = [] diffs = [] for key in sorted_by_repr(x_keys.intersection(y_keys)): if context.different(x[key], y[key], breadcrumb % (key, )): diffs.append('%r: %s != %s' % ( key, context.label('x', pformat(x[key])), context.label('y', pformat(y[key])), )) else: same.append(key) if not (x_not_y or (check_y_not_x and y_not_x) or diffs): return if obj_for_class is not_there: lines = [] else: lines = ['%s not as expected:' % obj_for_class.__class__.__name__] if same: try: same = sorted(same) except TypeError: pass lines.extend(('', '%ssame:' % prefix, repr(same))) x_label = context.x_label or 'first' y_label = context.y_label or 'second' if x_not_y: lines.extend(('', '%sin %s but not %s:' % (prefix, x_label, y_label))) for key in sorted_by_repr(x_not_y): lines.append('%r: %s' % ( key, pformat(x[key]) )) if y_not_x: lines.extend(('', '%sin %s but not %s:' % (prefix, y_label, x_label))) for key in sorted_by_repr(y_not_x): lines.append('%r: %s' % ( key, pformat(y[key]) )) if diffs: lines.extend(('', '%sdiffer:' % (prefix or 'values '))) lines.extend(diffs) return '\n'.join(lines) def compare_set(x, y, context): """ Returns a textual description of the differences between the two supplied sets. """ x_not_y = x - y y_not_x = y - x if not (y_not_x or x_not_y): return lines = ['%s not as expected:' % x.__class__.__name__, ''] x_label = context.x_label or 'first' y_label = context.y_label or 'second' if x_not_y: lines.extend(( 'in %s but not %s:' % (x_label, y_label), pformat(sorted_by_repr(x_not_y)), '', )) if y_not_x: lines.extend(( 'in %s but not %s:' % (y_label, x_label), pformat(sorted_by_repr(y_not_x)), '', )) return '\n'.join(lines)+'\n' trailing_whitespace_re = re.compile(r'\s+$', re.MULTILINE) def strip_blank_lines(text): result = [] for line in text.split('\n'): if line and not line.isspace(): result.append(line) return '\n'.join(result) def split_repr(text): parts = text.split('\n') for i, part in enumerate(parts[:-1]): parts[i] = repr(part + '\n') parts[-1] = repr(parts[-1]) return '\n'.join(parts) def compare_text(x, y, context): """ Returns an informative string describing the differences between the two supplied strings. The way in which this comparison is performed can be controlled using the following parameters: :param blanklines: If `False`, then when comparing multi-line strings, any blank lines in either argument will be ignored. :param trailing_whitespace: If `False`, then when comparing multi-line strings, trailing whilespace on lines will be ignored. :param show_whitespace: If `True`, then whitespace characters in multi-line strings will be replaced with their representations. """ blanklines = context.get_option('blanklines', True) trailing_whitespace = context.get_option('trailing_whitespace', True) show_whitespace = context.get_option('show_whitespace', False) if not trailing_whitespace: x = trailing_whitespace_re.sub('', x) y = trailing_whitespace_re.sub('', y) if not blanklines: x = strip_blank_lines(x) y = strip_blank_lines(y) if x == y: return labelled_x = context.label('x', repr(x)) labelled_y = context.label('y', repr(y)) if len(x) > 10 or len(y) > 10: if '\n' in x or '\n' in y: if show_whitespace: x = split_repr(x) y = split_repr(y) message = '\n' + diff(x, y, context.x_label, context.y_label) else: message = '\n%s\n!=\n%s' % (labelled_x, labelled_y) else: message = labelled_x+' != '+labelled_y return message def compare_bytes(x, y, context): if x == y: return labelled_x = context.label('x', repr(x)) labelled_y = context.label('y', repr(y)) return '\n%s\n!=\n%s' % (labelled_x, labelled_y) def compare_call(x, y, context): if x == y: return def extract(call): try: name, args, kwargs = call except ValueError: name = None args, kwargs = call return name, args, kwargs x_name, x_args, x_kw = extract(x) y_name, y_args, y_kw = extract(y) if x_name == y_name and x_args == y_args and x_kw == y_kw: return compare_call(getattr(x, parent_name), getattr(y, parent_name), context) if repr(x) != repr(y): return compare_text(repr(x), repr(y), context) different = ( context.different(x_name, y_name, ' function name') or context.different(x_args, y_args, ' args') or context.different(x_kw, y_kw, ' kw') ) if not different: return return 'mock.call not as expected:' def compare_partial(x, y, context): x_attrs = dict(func=x.func, args=x.args, keywords=x.keywords) y_attrs = dict(func=y.func, args=y.args, keywords=y.keywords) if x_attrs != y_attrs: return _compare_mapping(x_attrs, y_attrs, context, x, 'attributes ', '.%s') def _short_repr(obj): repr_ = repr(obj) if len(repr_) > 30: repr_ = repr_[:30] + '...' return repr_ _registry = { dict: compare_dict, set: compare_set, list: compare_sequence, tuple: compare_tuple, str: compare_text, Unicode: compare_text, int: compare_simple, float: compare_simple, Decimal: compare_simple, GeneratorType: compare_generator, mock_call.__class__: compare_call, unittest_mock_call.__class__: compare_call, BaseException: compare_exception, partial_type: compare_partial, } if PY3: _registry[bytes] = compare_bytes def register(type, comparer): """ Register the supplied comparer for the specified type. This registration is global and will be in effect from the point this function is called until the end of the current process. """ _registry[type] = comparer def _mro(obj): class_ = getattr(obj, '__class__', None) if class_ is None: # must be an old-style class object in Python 2! return (obj, ) mro = getattr(class_, '__mro__', None) if mro is None: # instance of old-style class in Python 2! return (class_, ) return mro def _shared_mro(x, y): y_mro = set(_mro(y)) for class_ in _mro(x): if class_ in y_mro: yield class_ _unsafe_iterables = basestring, dict class CompareContext(object): x_label = y_label = None def __init__(self, options): self.registries = [] comparers = options.pop('comparers', None) if comparers: self.registries.append(comparers) self.registries.append(_registry) self.recursive = options.pop('recursive', True) self.strict = options.pop('strict', False) self.ignore_eq = options.pop('ignore_eq', False) if 'expected' in options or 'actual' in options: self.x_label = 'expected' self.y_label = 'actual' self.x_label = options.pop('x_label', self.x_label) self.y_label = options.pop('y_label', self.y_label) self.options = options self.message = '' self.breadcrumbs = [] self._seen = set() def extract_args(self, args): possible = [] expected = self.options.pop('expected', not_there) if expected is not not_there: possible.append(expected) possible.extend(args) actual = self.options.pop('actual', not_there) if actual is not not_there: possible.append(actual) x = self.options.pop('x', not_there) if x is not not_there: possible.append(x) y = self.options.pop('y', not_there) if y is not not_there: possible.append(y) if len(possible) != 2: message = 'Exactly two objects needed, you supplied:' if possible: message += ' {}'.format(possible) if self.options: message += ' {}'.format(self.options) raise TypeError(message) return possible def get_option(self, name, default=None): return self.options.get(name, default) def label(self, side, value): r = str(value) label = getattr(self, side+'_label') if label: r += ' ('+label+')' return r def _lookup(self, x, y): if self.strict and type(x) is not type(y): return compare_with_type for class_ in _shared_mro(x, y): for registry in self.registries: comparer = registry.get(class_) if comparer: return comparer # fallback for iterables if ((isinstance(x, Iterable) and isinstance(y, Iterable)) and not (isinstance(x, _unsafe_iterables) or isinstance(y, _unsafe_iterables))): return compare_generator # special handling for Comparisons: if isinstance(x, Comparison) or isinstance(y, Comparison): return compare_simple return compare_object def _separator(self): return '\n\nWhile comparing %s: ' % ''.join(self.breadcrumbs[1:]) def seen(self, x, y): # don't get confused by interning: singleton_types = basestring, int, float if isinstance(x, singleton_types) and isinstance(y, singleton_types): return False key = id(x), id(y) if key in self._seen: return True self._seen.add(key) def different(self, x, y, breadcrumb): if self.seen(x, y): # a self-referential hierarchy; so lets say this one is # equal and hope the first time we saw it covers things... return False recursed = bool(self.breadcrumbs) self.breadcrumbs.append(breadcrumb) existing_message = self.message self.message = '' current_message = '' try: if not (self.strict or self.ignore_eq) and x == y: return False comparer = self._lookup(x, y) result = comparer(x, y, self) specific_comparer = comparer is not compare_simple if self.strict: if x == y and not specific_comparer: return False if result: if specific_comparer and recursed: current_message = self._separator() if specific_comparer or not recursed: current_message += result if self.recursive: current_message += self.message return result finally: self.message = existing_message + current_message self.breadcrumbs.pop() def _resolve_lazy(source): return str(source() if callable(source) else source) def compare(*args, **kw): """ Compare two objects, raising an :class:`AssertionError` if they are not the same. The :class:`AssertionError` raised will attempt to provide descriptions of the differences found. The two objects to compare can be passed either positionally or using explicit keyword arguments named ``x`` and ``y``, or ``expected`` and ``actual``. Any other keyword parameters supplied will be passed to the functions that end up doing the comparison. See the :mod:`API documentation below ` for details of these. :param prefix: If provided, in the event of an :class:`AssertionError` being raised, the prefix supplied will be prepended to the message in the :class:`AssertionError`. This may be a callable, in which case it will only be resolved if needed. :param suffix: If provided, in the event of an :class:`AssertionError` being raised, the suffix supplied will be appended to the message in the :class:`AssertionError`. This may be a callable, in which case it will only be resolved if needed. :param x_label: If provided, in the event of an :class:`AssertionError` being raised, the object passed as the first positional argument, or ``x`` keyword argument, will be labelled with this string in the message in the :class:`AssertionError`. :param y_label: If provided, in the event of an :class:`AssertionError` being raised, the object passed as the second positional argument, or ``y`` keyword argument, will be labelled with this string in the message in the :class:`AssertionError`. :param raises: If ``False``, the message that would be raised in the :class:`AssertionError` will be returned instead of the exception being raised. :param recursive: If ``True``, when a difference is found in a nested data structure, attempt to highlight the location of the difference. :param strict: If ``True``, objects will only compare equal if they are of the same type as well as being equal. :param ignore_eq: If ``True``, object equality, which relies on ``__eq__`` being correctly implemented, will not be used. Instead, comparers will be looked up and used and, if no suitable comparer is found, objects will be considered equal if their hash is equal. :param comparers: If supplied, should be a dictionary mapping types to comparer functions for those types. These will be added to the comparer registry for the duration of this call. """ __tracebackhide__ = True prefix = kw.pop('prefix', None) suffix = kw.pop('suffix', None) raises = kw.pop('raises', True) context = CompareContext(kw) x, y = context.extract_args(args) if not context.different(x, y, not_there): return message = context.message if prefix: message = _resolve_lazy(prefix) + ': ' + message if suffix: message += '\n' + _resolve_lazy(suffix) if raises: raise AssertionError(message) return message class StatefulComparison(object): """ A base class for stateful comparison objects. """ failed = '' expected = None name_attrs = () def __eq__(self, other): return not(self != other) def name(self): name = type(self).__name__ if self.name_attrs: name += '(%s)' % ', '.join('%s=%r' % (n, getattr(self, n)) for n in self.name_attrs) return name def body(self): return pformat(self.expected)[1:-1] def __repr__(self): name = self.name() body = self.failed or self.body() prefix = '<%s%s>' % (name, self.failed and '(failed)' or '') if '\n' in body: return '\n'+prefix+'\n'+body.strip('\n')+'\n'+'' % name elif body: return prefix + body + '' return prefix class Comparison(StatefulComparison): """ These are used when you need to compare an object's type, a subset of its attributes or make equality checks with objects that do not natively support comparison. :param object_or_type: The object or class from which to create the :class:`Comparison`. :param attribute_dict: An optional dictionary containing attributes to place on the :class:`Comparison`. :param partial: If true, only the specified attributes will be checked and any extra attributes of the object being compared with will be ignored. :param attributes: Any other keyword parameters passed will placed as attributes on the :class:`Comparison`. :param strict: .. deprecated:: 6.16.0 Use ``partial`` instead. """ def __init__(self, object_or_type, attribute_dict=None, partial=False, **attributes): self.partial = partial or not attributes.pop('strict', True) if attributes: if attribute_dict is None: attribute_dict = attributes else: attribute_dict.update(attributes) if isinstance(object_or_type, basestring): container, method, name, c = resolve(object_or_type) if c is not_there: raise AttributeError( '%r could not be resolved' % object_or_type ) elif isinstance(object_or_type, (ClassType, type)): c = object_or_type else: c = object_or_type.__class__ if attribute_dict is None: attribute_dict = _extract_attrs(object_or_type) self.expected_type = c self.expected_attributes = attribute_dict def __ne__(self, other): # .__class__ is important for Py2 compatibility. if self.expected_type is not other.__class__: self.failed = 'wrong type' return True if self.expected_attributes is None: return False attribute_names = set(self.expected_attributes.keys()) if self.partial: actual_attributes = {} else: actual_attributes = _extract_attrs(other) attribute_names -= set(actual_attributes) for name in attribute_names: try: actual_attributes[name] = getattr(other, name) except AttributeError: pass kw = {'x_label': 'Comparison', 'y_label': 'actual'} context = CompareContext(kw) self.failed = _compare_mapping(self.expected_attributes, actual_attributes, context, obj_for_class=not_there, prefix='attributes ', breadcrumb='.%s', check_y_not_x=not self.partial) return bool(self.failed) def name(self): name = 'C:' module = getattr(self.expected_type, '__module__', None) if module: name = name + module + '.' name += (getattr(self.expected_type, '__name__', None) or repr(self.expected_type)) return name def body(self): if self.expected_attributes: # if we're not failed, show what we will expect: lines = [] for k, v in sorted(self.expected_attributes.items()): rv = repr(v) if '\n' in rv: rv = indent(rv) lines.append('%s: %s' % (k, rv)) return '\n'.join(lines) return '' class SequenceComparison(StatefulComparison): """ An object that can be used in comparisons of expected and actual sequences. :param expected: The items expected to be in the sequence. :param ordered: If the items are expected to be in the order specified. Defaults to ``True``. :param partial: If any items not expected should be ignored. Defaults to ``False``. :param recursive: If a difference is found, recursively compare the item where the difference was found to highlight exactly what was different. Defaults to ``False``. """ name_attrs = ('ordered', 'partial') def __init__(self, *expected, **kw): self.expected = expected # py2 :-( self.ordered = kw.pop('ordered', True) self.partial = kw.pop('partial', False) self.recursive = kw.pop('recursive', False) assert not kw, 'unexpected parameter' self.checked_indices = set() def __ne__(self, other): try: actual = original_actual = list(other) except TypeError: self.failed = 'bad type' return True expected = list(self.expected) actual = list(actual) matched = [] matched_expected_indices = [] matched_actual_indices = [] missing_from_expected = actual missing_from_expected_indices = actual_indices = list(range(len(actual))) missing_from_actual = [] missing_from_actual_indices = [] start = 0 for e_i, e in enumerate(expected): try: i = actual.index(e, start) a_i = actual_indices.pop(i) except ValueError: missing_from_actual.append(e) missing_from_actual_indices.append(e_i) else: matched.append(missing_from_expected.pop(i)) matched_expected_indices.append(e_i) matched_actual_indices.append(a_i) self.checked_indices.add(a_i) if self.ordered: start = i matches_in_order = matched_actual_indices == sorted(matched_actual_indices) all_matched = not (missing_from_actual or missing_from_expected) partial_match = self.partial and not missing_from_actual if (matches_in_order or not self.ordered) and (all_matched or partial_match): return False expected_indices = matched_expected_indices+missing_from_actual_indices actual_indices = matched_actual_indices if self.partial: # try to give a clue as to what didn't match: if self.recursive and self.ordered and missing_from_expected: actual_indices.append(missing_from_expected_indices.pop(0)) missing_from_expected.pop(0) ignored = missing_from_expected missing_from_expected = None else: actual_indices += missing_from_expected_indices ignored = None message = [] def add_section(name, content): if content: message.append(name+':\n'+pformat(content)) add_section('ignored', ignored) if self.ordered: message.append(compare( expected=[self.expected[i] for i in sorted(expected_indices)], actual=[original_actual[i] for i in sorted(actual_indices)], recursive=self.recursive, raises=False ).split('\n\n', 1)[1]) else: add_section('same', matched) add_section('in expected but not actual', missing_from_actual) add_section('in actual but not expected', missing_from_expected) self.failed = '\n\n'.join(message) return True class Subset(SequenceComparison): """ A shortcut for :class:`SequenceComparison` that checks if the specified items are present in the sequence. """ name_attrs = () def __init__(self, *expected): super(Subset, self).__init__(*expected, ordered=False, partial=True) class Permutation(SequenceComparison): """ A shortcut for :class:`SequenceComparison` that checks if the set of items in the sequence is as expected, but without checking ordering. """ def __init__(self, *expected): super(Permutation, self).__init__(*expected, ordered=False, partial=False) class MappingComparison(StatefulComparison): """ An object that can be used in comparisons of expected and actual mappings. :param expected_mapping: The mapping that should be matched expressed as either a sequence of ``(key, value)`` tuples or a mapping. :param expected_items: The items that should be matched. :param ordered: If the keys in the mapping are expected to be in the order specified. Defaults to ``False``. :param partial: If any keys not expected should be ignored. Defaults to ``False``. :param recursive: If a difference is found, recursively compare the value where the difference was found to highlight exactly what was different. Defaults to ``False``. """ name_attrs = ('ordered', 'partial') def __init__(self, *expected_mapping, **expected_items): # py2 :-( self.ordered = expected_items.pop('ordered', False) self.partial = expected_items.pop('partial', False) self.recursive = expected_items.pop('recursive', False) if PY2 and self.ordered: if expected_items: raise TypeError('order undefined on Python 2') elif expected_mapping and type(expected_mapping[0]) is dict: raise TypeError('dict order undefined on Python 2') if len(expected_mapping) == 1: expected = OrderedDict(*expected_mapping) else: expected = OrderedDict(expected_mapping) expected.update(expected_items) self.expected = expected def body(self): # this can all go away and use the super class once py2 is gone :'( parts = [] text_length = 0 for key, value in self.expected.items(): part = repr(key)+': '+pformat(value) text_length += len(part) parts.append(part) if text_length > 60: sep = ',\n' else: sep = ', ' return sep.join(parts) def __ne__(self, other): try: actual_keys = other.keys() actual_mapping = dict(other.items()) except AttributeError: self.failed = 'bad type' return True expected_keys = self.expected.keys() expected_mapping = self.expected if self.partial: ignored_keys = set(actual_keys) - set(expected_keys) for key in ignored_keys: del actual_mapping[key] # preserve the order: actual_keys = [k for k in actual_keys if k not in ignored_keys] else: ignored_keys = None mapping_differences = compare( expected=expected_mapping, actual=actual_mapping, recursive=self.recursive, raises=False ) if self.ordered: key_differences = compare( expected=list(expected_keys), actual=list(actual_keys), recursive=self.recursive, raises=False ) else: key_differences = None if key_differences or mapping_differences: message = [] if ignored_keys: message.append('ignored:\n'+pformat(sorted(ignored_keys))) if mapping_differences: message.append(mapping_differences.split('\n\n', 1)[1]) if key_differences: message.append('wrong key order:\n\n'+key_differences.split('\n\n', 1)[1]) self.failed = '\n\n'.join(message) return True return False class StringComparison: """ An object that can be used in comparisons of expected and actual strings where the string expected matches a pattern rather than a specific concrete string. :param regex_source: A string containing the source for a regular expression that will be used whenever this :class:`StringComparison` is compared with any :class:`basestring` instance. :param flags: Flags passed to :func:`re.compile`. :param flag_names: See the :ref:`examples `. """ def __init__(self, regex_source, flags=None, **flag_names): args = [regex_source] flags_ = [] if flags: flags_.append(flags) flags_.extend(getattr(re, f.upper()) for f in flag_names) if flags_: args.append(reduce(__or__, flags_)) self.re = re.compile(*args) def __eq__(self, other): if not isinstance(other, basestring): return if self.re.match(other): return True return False def __ne__(self, other): return not self == other def __repr__(self): return '' % self.re.pattern def __lt__(self, other): return self.re.pattern < other def __gt__(self, other): return self.re.pattern > other class RoundComparison: """ An object that can be used in comparisons of expected and actual numerics to a specified precision. :param value: numeric to be compared. :param precision: Number of decimal places to round to in order to perform the comparison. """ def __init__(self, value, precision): self.rounded = round(value, precision) self.precision = precision def __eq__(self, other): other_rounded = round(other, self.precision) if type(self.rounded) is not type(other_rounded): raise TypeError('Cannot compare %r with %r' % (self, type(other))) return self.rounded == other_rounded def __ne__(self, other): return not self == other def __repr__(self): return '' % (self.rounded, self.precision) class RangeComparison: """ An object that can be used in comparisons of orderable types to check that a value specified within the given range. :param lower_bound: the inclusive lower bound for the acceptable range. :param upper_bound: the inclusive upper bound for the acceptable range. """ def __init__(self, lower_bound, upper_bound): self.lower_bound = lower_bound self.upper_bound = upper_bound def __eq__(self, other): return self.lower_bound <= other <= self.upper_bound def __ne__(self, other): return not self == other def __repr__(self): return '' % (self.lower_bound, self.upper_bound) testfixtures-6.18.3/testfixtures/compat.py000066400000000000000000000024271412502526400207760ustar00rootroot00000000000000# compatibility module for different python versions import sys PY_VERSION = sys.version_info[:2] PY_36_PLUS = PY_VERSION >= (3, 6) PY_37_PLUS = PY_VERSION >= (3, 7) if PY_VERSION > (3, 0): PY2 = False PY3 = True Bytes = bytes Unicode = str basestring = str BytesLiteral = lambda x: x.encode('latin1') UnicodeLiteral = lambda x: x class_type_name = 'class' ClassType = type exception_module = 'builtins' new_class = type self_name = '__self__' from io import StringIO xrange = range from itertools import zip_longest from functools import reduce from collections.abc import Iterable from abc import ABC else: PY2 = True PY3 = False Bytes = str Unicode = unicode basestring = basestring BytesLiteral = lambda x: x UnicodeLiteral = lambda x: x.decode('latin1') class_type_name = 'type' from types import ClassType exception_module = 'exceptions' from new import classobj as new_class self_name = 'im_self' from StringIO import StringIO xrange = xrange from itertools import izip_longest as zip_longest reduce = reduce from collections import Iterable from abc import ABCMeta ABC = ABCMeta('ABC', (object,), {}) # compatible with Python 2 *and* 3 testfixtures-6.18.3/testfixtures/components.py000066400000000000000000000023521412502526400216750ustar00rootroot00000000000000""" Helpers for working with Zope and its components. """ import atexit import warnings from zope.component import getSiteManager from zope.interface.registry import Components class TestComponents: """ A helper for providing a sterile registry when testing with :mod:`zope.component`. Instantiation will install an empty registry that will be returned by :func:`zope.component.getSiteManager`. """ __test__ = False instances = set() atexit_setup = False def __init__(self): self.registry = Components('Testing') self.old = getSiteManager.sethook(lambda: self.registry) self.instances.add(self) if not self.__class__.atexit_setup: atexit.register(self.atexit) self.__class__.atexit_setup = True def uninstall(self): """ Remove the sterile registry and replace it with the one that was in place before this :class:`TestComponents` was instantiated. """ getSiteManager.sethook(self.old) self.instances.remove(self) @classmethod def atexit(cls): if cls.instances: warnings.warn( 'TestComponents instances not uninstalled by shutdown!' ) testfixtures-6.18.3/testfixtures/django.py000066400000000000000000000034241412502526400207530ustar00rootroot00000000000000from __future__ import absolute_import from functools import partial from django.db.models import Model from .comparison import _compare_mapping, register from . import compare as base_compare def instance_fields(instance): opts = instance._meta for name in ( 'concrete_fields', 'virtual_fields', 'private_fields', ): fields = getattr(opts, name, None) if fields: for field in fields: yield field def model_to_dict(instance, exclude, include_not_editable): data = {} for f in instance_fields(instance): if f.name in exclude: continue if not getattr(f, 'editable', False) and not include_not_editable: continue data[f.name] = f.value_from_object(instance) return data def compare_model(x, y, context): """ Returns an informative string describing the differences between the two supplied Django model instances. The way in which this comparison is performed can be controlled using the following parameters: :param ignore_fields: A sequence of fields to ignore during comparison, most commonly set to ``['id']``. By default, no fields are ignored. :param non_editable_fields: If `True`, then fields with ``editable=False`` will be included in the comparison. By default, these fields are ignored. """ ignore_fields = context.get_option('ignore_fields', set()) non_editable_fields= context.get_option('non_editable_fields', False) args = [] for obj in x, y: args.append(model_to_dict(obj, ignore_fields, non_editable_fields)) args.append(context) args.append(x) return _compare_mapping(*args) register(Model, compare_model) compare = partial(base_compare, ignore_eq=True) testfixtures-6.18.3/testfixtures/logcapture.py000066400000000000000000000247411412502526400216630ustar00rootroot00000000000000from collections import defaultdict import atexit import logging import warnings from pprint import pformat from .comparison import SequenceComparison, compare from .utils import wrap class LogCapture(logging.Handler): """ These are used to capture entries logged to the Python logging framework and make assertions about what was logged. :param names: A string (or tuple of strings) containing the dotted name(s) of loggers to capture. By default, the root logger is captured. :param install: If `True`, the :class:`LogCapture` will be installed as part of its instantiation. :param propagate: If specified, any captured loggers will have their `propagate` attribute set to the supplied value. This can be used to prevent propagation from a child logger to a parent logger that has configured handlers. :param attributes: The sequence of attribute names to return for each record or a callable that extracts a row from a record. If a sequence of attribute names, those attributes will be taken from the :class:`~logging.LogRecord`. If an attribute is callable, the value used will be the result of calling it. If an attribute is missing, ``None`` will be used in its place. If a callable, it will be called with the :class:`~logging.LogRecord` and the value returned will be used as the row.. :param recursive_check: If ``True``, log messages will be compared recursively by :meth:`LogCapture.check`. :param ensure_checks_above: The log level above which checks must be made for logged events. """ instances = set() atexit_setup = False installed = False default_ensure_checks_above = logging.NOTSET def __init__(self, names=None, install=True, level=1, propagate=None, attributes=('name', 'levelname', 'getMessage'), recursive_check=False, ensure_checks_above=None ): logging.Handler.__init__(self) if not isinstance(names, tuple): names = (names, ) self.names = names self.level = level self.propagate = propagate self.attributes = attributes self.recursive_check = recursive_check self.old = defaultdict(dict) #: The log level above which checks must be made for logged events. if ensure_checks_above is None: self.ensure_checks_above = self.default_ensure_checks_above else: self.ensure_checks_above = ensure_checks_above self.clear() # declares self.records: List[LogRecord] if install: self.install() @classmethod def atexit(cls): if cls.instances: warnings.warn( 'LogCapture instances not uninstalled by shutdown, ' 'loggers captured:\n' '%s' % ('\n'.join((str(i.names) for i in cls.instances))) ) def __len__(self): return len(self.records) def __getitem__(self, index): return self._actual_row(self.records[index]) def __contains__(self, what): for i, item in enumerate(self): if what == item: self.records[i].checked = True return True def clear(self): """Clear any entries that have been captured.""" self.records = [] def mark_all_checked(self): """ Mark all captured events as checked. This should be called if you have made assertions about logging other than through :class:`LogCapture` methods. """ for record in self.records: record.checked = True def ensure_checked(self, level=None): """ Ensure every entry logged above the specified `level` has been checked. Raises an :class:`AssertionError` if this is not the case. :param level: the logging level, defaults to :attr:`ensure_checks_above`. :type level: Optional[int] """ if level is None: level = self.ensure_checks_above if level == logging.NOTSET: return un_checked = [] for record in self.records: if record.levelno >= level and not record.checked: un_checked.append(self._actual_row(record)) if un_checked: raise AssertionError(( 'Not asserted ERROR log(s): %s' ) % (pformat(un_checked))) def emit(self, record): # record: logging.LogRecord record.checked = False self.records.append(record) def install(self): """ Install this :class:`LogHandler` into the Python logging framework for the named loggers. This will remove any existing handlers for those loggers and drop their level to that specified on this :class:`LogCapture` in order to capture all logging. """ for name in self.names: logger = logging.getLogger(name) self.old['levels'][name] = logger.level self.old['filters'][name] = logger.filters self.old['handlers'][name] = logger.handlers self.old['disabled'][name] = logger.disabled self.old['propagate'][name] = logger.propagate logger.setLevel(self.level) logger.filters = [] logger.handlers = [self] logger.disabled = False if self.propagate is not None: logger.propagate = self.propagate self.instances.add(self) if not self.__class__.atexit_setup: atexit.register(self.atexit) self.__class__.atexit_setup = True def uninstall(self): """ Un-install this :class:`LogHandler` from the Python logging framework for the named loggers. This will re-instate any existing handlers for those loggers that were removed during installation and restore their level that prior to installation. """ if self in self.instances: for name in self.names: logger = logging.getLogger(name) logger.setLevel(self.old['levels'][name]) logger.filters = self.old['filters'][name] logger.handlers = self.old['handlers'][name] logger.disabled = self.old['disabled'][name] logger.propagate = self.old['propagate'][name] self.instances.remove(self) @classmethod def uninstall_all(cls): "This will uninstall all existing :class:`LogHandler` objects." for i in tuple(cls.instances): i.uninstall() def _actual_row(self, record): # Convert a log record to a Tuple or attribute value according the attributes member. # record: logging.LogRecord if callable(self.attributes): return self.attributes(record) else: values = [] for a in self.attributes: value = getattr(record, a, None) if callable(value): value = value() values.append(value) if len(values) == 1: return values[0] else: return tuple(values) def actual(self): """ The sequence of actual records logged, having had their attributes extracted as specified by the ``attributes`` parameter to the :class:`LogCapture` constructor. This can be useful for making more complex assertions about logged records. The actual records logged can also be inspected by using the :attr:`records` attribute. :rtype: List """ actual = [] for r in self.records: actual.append(self._actual_row(r)) return actual def __str__(self): if not self.records: return 'No logging captured' return '\n'.join(["%s %s\n %s" % r for r in self.actual()]) def check(self, *expected): """ This will compare the captured entries with the expected entries provided and raise an :class:`AssertionError` if they do not match. :param expected: A sequence of entries of the structure specified by the ``attributes`` passed to the constructor. """ result = compare( expected, actual=self.actual(), recursive=self.recursive_check ) self.mark_all_checked() return result def check_present(self, *expected, **kw): """ This will check if the captured entries contain all of the expected entries provided and raise an :class:`AssertionError` if not. This will ignore entries that have been captured but that do not match those in ``expected``. :param expected: A sequence of entries of the structure specified by the ``attributes`` passed to the constructor. :param order_matters: A keyword-only parameter that controls whether the order of the captured entries is required to match those of the expected entries. Defaults to ``True``. """ order_matters = kw.pop('order_matters', True) assert not kw, 'order_matters is the only keyword parameter' actual = self.actual() expected = SequenceComparison( *expected, ordered=order_matters, partial=True, recursive=self.recursive_check ) if expected != actual: raise AssertionError(expected.failed) for index in expected.checked_indices: self.records[index].checked = True def __enter__(self): return self def __exit__(self, type, value, traceback): self.uninstall() self.ensure_checked() class LogCaptureForDecorator(LogCapture): def install(self): LogCapture.install(self) self.clear() return self def log_capture(*names, **kw): """ A decorator for making a :class:`LogCapture` installed an available for the duration of a test function. :param names: An optional sequence of names specifying the loggers to be captured. If not specified, the root logger will be captured. Keyword parameters other than ``install`` may also be supplied and will be passed on to the :class:`LogCapture` constructor. """ l = LogCaptureForDecorator(names or None, install=False, **kw) return wrap(l.install, l.uninstall) testfixtures-6.18.3/testfixtures/mock.py000066400000000000000000000056711412502526400204500ustar00rootroot00000000000000""" A facade for either :mod:`unittest.mock` or its `rolling backport`__, if it is installed, with a preference for the latter as it may well have newer functionality and bugfixes. The facade also contains any bugfixes that are critical to the operation of functionality provided by testfixtures. __ https://mock.readthedocs.io """ from __future__ import absolute_import import sys try: from mock import * from mock.mock import _Call from mock.mock import call as mock_call from mock.mock import version_info as backport_version except ImportError: backport_version = None class MockCall: pass mock_call = MockCall() try: from unittest.mock import * from unittest.mock import _Call except ImportError: # pragma: no cover pass try: from unittest.mock import call as unittest_mock_call except ImportError: class UnittestMockCall: pass unittest_mock_call = UnittestMockCall() def __eq__(self, other): if other is ANY: return True try: len_other = len(other) except TypeError: return False self_name = '' if len(self) == 2: self_args, self_kwargs = self else: self_name, self_args, self_kwargs = self if (getattr(self, 'parent', None) and getattr(other, 'parent', None) and self.parent != other.parent): return False other_name = '' if len_other == 0: other_args, other_kwargs = (), {} elif len_other == 3: other_name, other_args, other_kwargs = other elif len_other == 1: value, = other if isinstance(value, tuple): other_args = value other_kwargs = {} elif isinstance(value, str): other_name = value other_args, other_kwargs = (), {} else: other_args = () other_kwargs = value elif len_other == 2: # could be (name, args) or (name, kwargs) or (args, kwargs) first, second = other if isinstance(first, str): other_name = first if isinstance(second, tuple): other_args, other_kwargs = second, {} else: other_args, other_kwargs = (), second else: other_args, other_kwargs = first, second else: return False if self_name and other_name != self_name: return False # this order is important for ANY to work! return (other_args, other_kwargs) == (self_args, self_kwargs) has_backport = backport_version is not None has_unittest_mock = sys.version_info >= (3, 3, 0) if ( (has_backport and backport_version[:3] > (2, 0, 0)) or (3, 6, 7) < sys.version_info[:3] < (3, 7, 0) or sys.version_info[:3] > (3, 7, 1) ): parent_name = '_mock_parent' elif has_unittest_mock or has_backport: _Call.__eq__ = __eq__ parent_name = 'parent' else: # pragma: no cover - only hit during testing of packaging. parent_name = None testfixtures-6.18.3/testfixtures/outputcapture.py000066400000000000000000000114161412502526400224350ustar00rootroot00000000000000import os import sys from tempfile import TemporaryFile from testfixtures.comparison import compare from testfixtures.compat import StringIO, Unicode class OutputCapture(object): """ A context manager for capturing output to the :attr:`sys.stdout` and :attr:`sys.stderr` streams. :param separate: If ``True``, ``stdout`` and ``stderr`` will be captured separately and their expected values must be passed to :meth:`~OutputCapture.compare`. :param fd: If ``True``, the underlying file descriptors will be captured, rather than just the attributes on :mod:`sys`. This allows you to capture things like subprocesses that write directly to the file descriptors, but is more invasive, so only use it when you need it. :param strip_whitespace: When ``True``, which is the default, leading and training whitespace is trimmed from both the expected and actual values when comparing. .. note:: If ``separate`` is passed as ``True``, :attr:`OutputCapture.captured` will be an empty string. """ original_stdout = None original_stderr = None def __init__(self, separate=False, fd=False, strip_whitespace=True): self.separate = separate self.fd = fd self.strip_whitespace = strip_whitespace def __enter__(self): if self.fd: self.output = TemporaryFile() self.stdout = TemporaryFile() self.stderr = TemporaryFile() else: self.output = StringIO() self.stdout = StringIO() self.stderr = StringIO() self.enable() return self def __exit__(self, *args): self.disable() def disable(self): "Disable the output capture if it is enabled." if self.fd: for original, current in ( (self.original_stdout, sys.stdout), (self.original_stderr, sys.stderr), ): os.dup2(original, current.fileno()) os.close(original) else: sys.stdout = self.original_stdout sys.stderr = self.original_stderr def enable(self): "Enable the output capture if it is disabled." if self.original_stdout is None: if self.fd: self.original_stdout = os.dup(sys.stdout.fileno()) self.original_stderr = os.dup(sys.stderr.fileno()) else: self.original_stdout = sys.stdout self.original_stderr = sys.stderr if self.separate: if self.fd: os.dup2(self.stdout.fileno(), sys.stdout.fileno()) os.dup2(self.stderr.fileno(), sys.stderr.fileno()) else: sys.stdout = self.stdout sys.stderr = self.stderr else: if self.fd: os.dup2(self.output.fileno(), sys.stdout.fileno()) os.dup2(self.output.fileno(), sys.stderr.fileno()) else: sys.stdout = sys.stderr = self.output def _read(self, stream): if self.fd: stream.seek(0) return stream.read() else: return stream.getvalue() @property def captured(self): "A property containing any output that has been captured so far." return self._read(self.output) def compare(self, expected=u'', stdout=u'', stderr=u''): """ Compare the captured output to that expected. If the output is not the same, an :class:`AssertionError` will be raised. :param expected: A string containing the expected combined output of ``stdout`` and ``stderr``. :param stdout: A string containing the expected output to ``stdout``. :param stderr: A string containing the expected output to ``stderr``. """ expected_mapping = {} actual_mapping = {} for prefix, _expected, captured in ( ('captured', expected, self.captured), ('stdout', stdout, self._read(self.stdout)), ('stderr', stderr, self._read(self.stderr)), ): if self.fd and isinstance(_expected, Unicode): _expected = _expected.encode() if self.strip_whitespace: _expected = _expected.strip() captured = captured.strip() if _expected != captured: expected_mapping[prefix] = _expected actual_mapping[prefix] = captured if len(expected_mapping) == 1: compare(expected=tuple(expected_mapping.values())[0], actual=tuple(actual_mapping.values())[0]) compare(expected=expected_mapping, actual=actual_mapping) testfixtures-6.18.3/testfixtures/popen.py000066400000000000000000000232431412502526400206330ustar00rootroot00000000000000import pipes from functools import wraps, partial from io import TextIOWrapper from itertools import chain from subprocess import STDOUT, PIPE from tempfile import TemporaryFile from testfixtures.compat import basestring, PY3, zip_longest, reduce, PY2 from testfixtures.utils import extend_docstring from .mock import Mock, call def shell_join(command): if not isinstance(command, basestring): command = " ".join(pipes.quote(part) for part in command) return command class PopenBehaviour(object): """ An object representing the behaviour of a :class:`MockPopen` when simulating a particular command. """ def __init__(self, stdout=b'', stderr=b'', returncode=0, pid=1234, poll_count=3): self.stdout = stdout self.stderr = stderr self.returncode = returncode self.pid = pid self.poll_count = poll_count def record(func): @wraps(func) def recorder(self, *args, **kw): self._record((func.__name__,), *args, **kw) return func(self, *args, **kw) return recorder class MockPopenInstance(object): """ A mock process as returned by :class:`MockPopen`. """ #: A :class:`~unittest.mock.Mock` representing the pipe into this process. #: This is only set if ``stdin=PIPE`` is passed the constructor. #: The mock records writes and closes in :attr:`MockPopen.all_calls`. stdin = None #: A file representing standard output from this process. stdout = None #: A file representing error output from this process. stderr = None def __init__(self, mock_class, root_call, args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0, restore_signals=True, start_new_session=False, pass_fds=(), encoding=None, errors=None, text=None): self.mock = Mock() self.class_instance_mock = mock_class.mock.Popen_instance #: A :func:`unittest.mock.call` representing the call made to instantiate #: this mock process. self.root_call = root_call #: The calls made on this mock process, represented using #: :func:`~unittest.mock.call` instances. self.calls = [] self.all_calls = mock_class.all_calls cmd = shell_join(args) behaviour = mock_class.commands.get(cmd, mock_class.default_behaviour) if behaviour is None: raise KeyError('Nothing specified for command %r' % cmd) if callable(behaviour): behaviour = behaviour(command=cmd, stdin=stdin) self.behaviour = behaviour stdout_value = behaviour.stdout stderr_value = behaviour.stderr if stderr == STDOUT: line_iterator = chain.from_iterable(zip_longest( stdout_value.splitlines(True), stderr_value.splitlines(True) )) stdout_value = b''.join(l for l in line_iterator if l) stderr_value = None self.poll_count = behaviour.poll_count for name, option, mock_value in ( ('stdout', stdout, stdout_value), ('stderr', stderr, stderr_value) ): value = None if option is PIPE: value = TemporaryFile() value.write(mock_value) value.flush() value.seek(0) if PY3 and (universal_newlines or text or encoding): value = TextIOWrapper(value, encoding=encoding, errors=errors) setattr(self, name, value) if stdin == PIPE: self.stdin = Mock() for method in 'write', 'close': record_writes = partial(self._record, ('stdin', method)) getattr(self.stdin, method).side_effect = record_writes self.pid = behaviour.pid #: The return code of this mock process. self.returncode = None if PY3: self.args = args def _record(self, names, *args, **kw): for mock in self.class_instance_mock, self.mock: reduce(getattr, names, mock)(*args, **kw) for base_call, store in ( (call, self.calls), (self.root_call, self.all_calls) ): store.append(reduce(getattr, names, base_call)(*args, **kw)) if PY3: def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.wait() for stream in self.stdout, self.stderr: if stream: stream.close() @record def wait(self, timeout=None): "Simulate calls to :meth:`subprocess.Popen.wait`" self.returncode = self.behaviour.returncode return self.returncode @record def communicate(self, input=None, timeout=None): "Simulate calls to :meth:`subprocess.Popen.communicate`" self.returncode = self.behaviour.returncode return (self.stdout and self.stdout.read(), self.stderr and self.stderr.read()) else: @record def wait(self): "Simulate calls to :meth:`subprocess.Popen.wait`" self.returncode = self.behaviour.returncode return self.returncode @record def communicate(self, input=None): "Simulate calls to :meth:`subprocess.Popen.communicate`" self.returncode = self.behaviour.returncode return (self.stdout and self.stdout.read(), self.stderr and self.stderr.read()) @record def poll(self): "Simulate calls to :meth:`subprocess.Popen.poll`" while self.poll_count and self.returncode is None: self.poll_count -= 1 return None # This call to wait() is NOT how poll() behaves in reality. # poll() NEVER sets the returncode. # The returncode is *only* ever set by process completion. # The following is an artifact of the fixture's implementation. self.returncode = self.behaviour.returncode return self.returncode @record def send_signal(self, signal): "Simulate calls to :meth:`subprocess.Popen.send_signal`" pass @record def terminate(self): "Simulate calls to :meth:`subprocess.Popen.terminate`" pass @record def kill(self): "Simulate calls to :meth:`subprocess.Popen.kill`" pass class MockPopen(object): """ A specialised mock for testing use of :class:`subprocess.Popen`. An instance of this class can be used in place of the :class:`subprocess.Popen` and is often inserted where it's needed using :func:`unittest.mock.patch` or a :class:`~testfixtures.Replacer`. """ default_behaviour = None def __init__(self): self.commands = {} self.mock = Mock() #: All calls made using this mock and the objects it returns, represented using #: :func:`~unittest.mock.call` instances. self.all_calls = [] def _resolve_behaviour(self, stdout, stderr, returncode, pid, poll_count, behaviour): if behaviour is None: return PopenBehaviour( stdout, stderr, returncode, pid, poll_count ) else: return behaviour def set_command(self, command, stdout=b'', stderr=b'', returncode=0, pid=1234, poll_count=3, behaviour=None): """ Set the behaviour of this mock when it is used to simulate the specified command. :param command: A string representing the command to be simulated. """ self.commands[shell_join(command)] = self._resolve_behaviour( stdout, stderr, returncode, pid, poll_count, behaviour ) def set_default(self, stdout=b'', stderr=b'', returncode=0, pid=1234, poll_count=3, behaviour=None): """ Set the behaviour of this mock when it is used to simulate commands that have no explicit behavior specified using :meth:`~MockPopen.set_command` or :meth:`~MockPopen.set_callable`. """ self.default_behaviour = self._resolve_behaviour( stdout, stderr, returncode, pid, poll_count, behaviour ) def __call__(self, *args, **kw): self.mock.Popen(*args, **kw) root_call = call.Popen(*args, **kw) self.all_calls.append(root_call) return MockPopenInstance(self, root_call, *args, **kw) set_command_params = """ :param stdout: A string representing the simulated content written by the process to the stdout pipe. :param stderr: A string representing the simulated content written by the process to the stderr pipe. :param returncode: An integer representing the return code of the simulated process. :param pid: An integer representing the process identifier of the simulated process. This is useful if you have code the prints out the pids of running processes. :param poll_count: Specifies the number of times :meth:`MockPopen.poll` can be called before :attr:`MockPopen.returncode` is set and returned by :meth:`MockPopen.poll`. If supplied, ``behaviour`` must be either a :class:`PopenBehaviour` instance or a callable that takes the ``command`` string representing the command to be simulated and the ``stdin`` for that command and returns a :class:`PopenBehaviour` instance. """ # add the param docs, so we only have one copy of them! extend_docstring(set_command_params, [MockPopen.set_command, MockPopen.set_default]) testfixtures-6.18.3/testfixtures/replace.py000066400000000000000000000110321412502526400211160ustar00rootroot00000000000000from functools import partial from testfixtures.compat import ClassType from testfixtures.resolve import resolve, not_there from testfixtures.utils import wrap, extend_docstring import warnings def not_same_descriptor(x, y, descriptor): return isinstance(x, descriptor) and not isinstance(y, descriptor) class Replacer: """ These are used to manage the mocking out of objects so that units of code can be tested without having to rely on their normal dependencies. """ def __init__(self): self.originals = {} def _replace(self, container, name, method, value, strict=True): if value is not_there: if method == 'a': try: delattr(container, name) except AttributeError: pass if method == 'i': try: del container[name] except KeyError: pass else: if method == 'a': setattr(container, name, value) if method == 'i': container[name] = value def __call__(self, target, replacement, strict=True): """ Replace the specified target with the supplied replacement. """ container, method, attribute, t_obj = resolve(target) if method is None: raise ValueError('target must contain at least one dot!') if t_obj is not_there and strict: raise AttributeError('Original %r not found' % attribute) replacement_to_use = replacement if isinstance(container, (type, ClassType)): if not_same_descriptor(t_obj, replacement, classmethod): replacement_to_use = classmethod(replacement) elif not_same_descriptor(t_obj, replacement, staticmethod): replacement_to_use = staticmethod(replacement) self._replace(container, attribute, method, replacement_to_use, strict) if target not in self.originals: self.originals[target] = t_obj return replacement def replace(self, target, replacement, strict=True): """ Replace the specified target with the supplied replacement. """ self(target, replacement, strict) def restore(self): """ Restore all the original objects that have been replaced by calls to the :meth:`replace` method of this :class:`Replacer`. """ for target, original in tuple(self.originals.items()): container, method, attribute, found = resolve(target) self._replace(container, attribute, method, original, strict=False) del self.originals[target] def __enter__(self): return self def __exit__(self, type, value, traceback): self.restore() def __del__(self): if self.originals: # no idea why coverage misses the following statement # it's covered by test_replace.TestReplace.test_replacer_del warnings.warn( # pragma: no cover 'Replacer deleted without being restored, ' 'originals left: %r' % self.originals ) def replace(target, replacement, strict=True): """ A decorator to replace a target object for the duration of a test function. """ r = Replacer() return wrap(partial(r.__call__, target, replacement, strict), r.restore) class Replace(object): """ A context manager that uses a :class:`Replacer` to replace a single target. """ def __init__(self, target, replacement, strict=True): self.target = target self.replacement = replacement self.strict = strict self._replacer = Replacer() def __enter__(self): return self._replacer(self.target, self.replacement, self.strict) def __exit__(self, exc_type, exc_val, exc_tb): self._replacer.restore() replace_params_doc = """ :param target: A string containing the dotted-path to the object to be replaced. This path may specify a module in a package, an attribute of a module, or any attribute of something contained within a module. :param replacement: The object to use as a replacement. :param strict: When `True`, an exception will be raised if an attempt is made to replace an object that does not exist. """ # add the param docs, so we only have one copy of them! extend_docstring(replace_params_doc, [Replacer.__call__, Replacer.replace, replace, Replace]) testfixtures-6.18.3/testfixtures/resolve.py000066400000000000000000000025001412502526400211620ustar00rootroot00000000000000from testfixtures import not_there def resolve(dotted_name): names = dotted_name.split('.') used = names.pop(0) found = __import__(used) container = found method = None n = None for n in names: container = found used += '.' + n try: found = found.__dict__[n] method = 'a' except (AttributeError, KeyError): try: found = getattr(found, n) method = 'a' # pragma: no branch except AttributeError: try: __import__(used) except ImportError: method = 'i' try: found = found[n] # pragma: no branch except KeyError: found = not_there # pragma: no branch except TypeError: try: n = int(n) except ValueError: method = 'a' found = not_there else: found = found[n] # pragma: no branch else: found = getattr(found, n) method = 'a' # pragma: no branch return container, method, n, found testfixtures-6.18.3/testfixtures/rmtree.py000066400000000000000000000050301412502526400210020ustar00rootroot00000000000000# lamosity needed to make things reliable on Windows :-( # (borrowed from Python's test_support.py) import errno import os import shutil import sys import time import warnings if sys.platform.startswith("win"): # pragma: no cover def _waitfor(func, pathname, waitall=False): # Perform the operation func(pathname) # Now setup the wait loop if waitall: dirname = pathname else: dirname, name = os.path.split(pathname) dirname = dirname or '.' # Check for `pathname` to be removed from the filesystem. # The exponential backoff of the timeout amounts to a total # of ~1 second after which the deletion is probably an error # anyway. # Testing on a i7@4.3GHz shows that usually only 1 iteration is # required when contention occurs. timeout = 0.001 while timeout < 1.0: # pragma: no branch # Note we are only testing for the existence of the file(s) in # the contents of the directory regardless of any security or # access rights. If we have made it this far, we have sufficient # permissions to do that much using Python's equivalent of the # Windows API FindFirstFile. # Other Windows APIs can fail or give incorrect results when # dealing with files that are pending deletion. L = os.listdir(dirname) if not (L if waitall else name in L): # pragma: no branch return # Increase the timeout and try again time.sleep(timeout) # pragma: no cover timeout *= 2 # pragma: no cover warnings.warn('tests may fail, delete still pending for ' + pathname, # pragma: no cover RuntimeWarning, stacklevel=4) def _rmtree(path): def _rmtree_inner(path): for name in os.listdir(path): fullname = os.path.join(path, name) if os.path.isdir(fullname): _waitfor(_rmtree_inner, fullname, waitall=True) os.rmdir(fullname) else: os.unlink(fullname) _waitfor(_rmtree_inner, path, waitall=True) _waitfor(os.rmdir, path) else: _rmtree = shutil.rmtree def rmtree(path): try: _rmtree(path) except OSError as e: # pragma: no cover # Unix returns ENOENT, Windows returns ESRCH. if e.errno not in (errno.ENOENT, errno.ESRCH): # pragma: no branch raise testfixtures-6.18.3/testfixtures/shouldraise.py000066400000000000000000000067121412502526400220360ustar00rootroot00000000000000from contextlib import contextmanager from functools import wraps from testfixtures import diff, compare from .compat import ClassType param_docs = """ :param exception: This can be one of the following: * `None`, indicating that an exception must be raised, but the type is unimportant. * An exception class, indicating that the type of the exception is important but not the parameters it is created with. * An exception instance, indicating that an exception exactly matching the one supplied should be raised. :param unless: Can be passed a boolean that, when ``True`` indicates that no exception is expected. This is useful when checking that exceptions are only raised on certain versions of Python. """ class ShouldRaise(object): __doc__ = """ This context manager is used to assert that an exception is raised within the context it is managing. """ + param_docs #: The exception captured by the context manager. #: Can be used to inspect specific attributes of the exception. raised = None def __init__(self, exception=None, unless=False): self.exception = exception self.expected = not unless def __enter__(self): return self def __exit__(self, type_, actual, traceback): __tracebackhide__ = True self.raised = actual if self.expected: if self.exception: if actual is not None: if isinstance(self.exception, (ClassType, type)): actual = type(actual) if self.exception is not actual: return False else: if type(self.exception) is not type(actual): return False compare(self.exception, actual, x_label='expected', y_label='raised') elif not actual: raise AssertionError('No exception raised!') elif actual: return False return True class should_raise: __doc__ = """ A decorator to assert that the decorated function will raised an exception. An exception class or exception instance may be passed to check more specifically exactly what exception will be raised. """ + param_docs def __init__(self, exception=None, unless=None): self.exception = exception self.unless = unless def __call__(self, target): @wraps(target) def _should_raise_wrapper(*args, **kw): with ShouldRaise(self.exception, self.unless): target(*args, **kw) return _should_raise_wrapper @contextmanager def ShouldAssert(expected_text): """ A context manager to check that an :class:`AssertionError` is raised and its text is as expected. """ try: yield except AssertionError as e: actual_text = str(e) if expected_text != actual_text: raise AssertionError(diff(expected_text, actual_text, x_label='expected', y_label='actual')) else: raise AssertionError('Expected AssertionError(%r), None raised!' % expected_text) testfixtures-6.18.3/testfixtures/shouldwarn.py000066400000000000000000000042001412502526400216700ustar00rootroot00000000000000import warnings from testfixtures import Comparison as C, compare class ShouldWarn(warnings.catch_warnings): """ This context manager is used to assert that warnings are issued within the context it is managing. :param expected: This should be a sequence made up of one or more elements, each of one of the following types: * A warning class, indicating that the type of the warnings is important but not the parameters it is created with. * A warning instance, indicating that a warning exactly matching the one supplied should have been issued. If no expected warnings are passed, you will need to inspect the contents of the list returned by the context manager. :param filters: If passed, these are used to create a filter such that only warnings you are interested in will be considered by this :class:`ShouldWarn` instance. The names and meanings are the same as the parameters for :func:`warnings.filterwarnings`. """ _empty_okay = False def __init__(self, *expected, **filters): super(ShouldWarn, self).__init__(record=True) self.expected = [C(e) for e in expected] self.filters = filters def __enter__(self): self.recorded = super(ShouldWarn, self).__enter__() warnings.filterwarnings("always", **self.filters) return self.recorded def __exit__(self, exc_type, exc_val, exc_tb): super(ShouldWarn, self).__exit__(exc_type, exc_val, exc_tb) if not self.recorded and self._empty_okay: return if not self.expected and self.recorded and not self._empty_okay: return compare(self.expected, actual=[wm.message for wm in self.recorded]) class ShouldNotWarn(ShouldWarn): """ This context manager is used to assert that no warnings are issued within the context it is managing. """ _empty_okay = True def __init__(self): super(ShouldNotWarn, self).__init__() testfixtures-6.18.3/testfixtures/sybil.py000066400000000000000000000043621412502526400206350ustar00rootroot00000000000000from __future__ import absolute_import import os import re import textwrap from sybil import Region from testfixtures import diff FILEBLOCK_START = re.compile(r'^\.\.\s*topic::?\s*(.+)\b', re.MULTILINE) FILEBLOCK_END = re.compile(r'(\n\Z|\n(?=\S))') CLASS = re.compile(r'\s+:class:\s*(read|write)-file') class FileBlock(object): def __init__(self, path, content, action): self.path, self.content, self.action = path, content, action class FileParser(object): """ A `Sybil `__ parser that parses certain ReST sections to read and write files in the configured :class:`TempDirectory`. :param name: This is the name of the :class:`TempDirectory` to use in the Sybil test namespace. """ def __init__(self, name): self.name = name def __call__(self, document): for start_match, end_match, source in document.find_region_sources( FILEBLOCK_START, FILEBLOCK_END ): lines = source.splitlines() class_ = CLASS.match(lines[1]) if not class_: continue index = 3 if lines[index].strip() == '::': index += 1 source = textwrap.dedent('\n'.join(lines[index:])).lstrip() if source[-1] != '\n': source += '\n' parsed = FileBlock( path=start_match.group(1), content=source, action=class_.group(1) ) yield Region( start_match.start(), end_match.end(), parsed, self.evaluate ) def evaluate(self, example): block = example.parsed dir = example.namespace[self.name] if block.action == 'read': actual = dir.read(block.path, 'ascii').replace(os.linesep, '\n') if actual != block.content: return diff( block.content, actual, 'File %r, line %i:' % (example.path, example.line), 'Reading from "%s":' % dir.getpath(block.path) ) if block.action == 'write': dir.write(block.path, block.content, 'ascii') testfixtures-6.18.3/testfixtures/tdatetime.py000066400000000000000000000120701412502526400214660ustar00rootroot00000000000000from calendar import timegm from datetime import datetime, timedelta, date from testfixtures.compat import new_class @classmethod def add(cls, *args, **kw): if 'tzinfo' in kw or len(args) > 7: raise TypeError('Cannot add using tzinfo on %s' % cls.__name__) if args and isinstance(args[0], cls.__bases__[0]): inst = args[0] tzinfo = getattr(inst, 'tzinfo', None) if tzinfo: if tzinfo != cls._tzta: raise ValueError( 'Cannot add %s with tzinfo of %s as configured to use %s' % ( inst.__class__.__name__, tzinfo, cls._tzta )) inst = inst.replace(tzinfo=None) if cls._ct: inst = cls._ct(inst) cls._q.append(inst) else: cls._q.append(cls(*args, **kw)) @classmethod def set_(cls, *args, **kw): if cls._q: cls._q = [] cls.add(*args, **kw) @classmethod def tick(cls, *args, **kw): if kw: delta = timedelta(**kw) else: delta, = args cls._q[-1] += delta def __add__(self, other): r = super(self.__class__, self).__add__(other) if self._ct: r = self._ct(r) return r def __new__(cls, *args, **kw): if cls is cls._cls: return super(cls, cls).__new__(cls, *args, **kw) else: return cls._cls(*args, **kw) @classmethod def instantiate(cls): r = cls._q.pop(0) if not cls._q: cls._gap += cls._gap_d n = r + timedelta(**{cls._gap_t: cls._gap}) if cls._ct: n = cls._ct(n) cls._q.append(n) return r @classmethod def now(cls, tz=None): r = cls._instantiate() if tz is not None: if cls._tzta: r = r - cls._tzta.utcoffset(r) r = tz.fromutc(r.replace(tzinfo=tz)) return cls._ct(r) @classmethod def utcnow(cls): r = cls._instantiate() if cls._tzta is not None: r = r - cls._tzta.utcoffset(r) return r def test_factory(n, type, default, args, kw, tz=None, **to_patch): q = [] to_patch['_q'] = q to_patch['_tzta'] = tz to_patch['add'] = add to_patch['set'] = set_ to_patch['tick'] = tick to_patch['__add__'] = __add__ if '__new__' not in to_patch: to_patch['__new__'] = __new__ class_ = new_class(n, (type, ), to_patch) strict = kw.pop('strict', False) if strict: class_._cls = class_ else: class_._cls = type if args != (None, ): if not (args or kw): args = default class_.add(*args, **kw) return class_ def correct_date_method(self): return self._date_type( self.year, self.month, self.day ) @classmethod def correct_datetime(cls, dt): return cls._cls( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, dt.tzinfo, ) def test_datetime(*args, **kw): if len(args) > 7: tz = args[7] args = args[:7] else: tz = kw.pop('tzinfo', getattr(args[0], 'tzinfo', None) if args else None) if 'delta' in kw: gap = kw.pop('delta') gap_delta = 0 else: gap = 0 gap_delta = 10 delta_type = kw.pop('delta_type', 'seconds') date_type = kw.pop('date_type', date) return test_factory( 'tdatetime', datetime, (2001, 1, 1, 0, 0, 0), args, kw, tz, _ct=correct_datetime, _instantiate=instantiate, now=now, utcnow=utcnow, _gap=gap, _gap_d=gap_delta, _gap_t=delta_type, date=correct_date_method, _date_type=date_type, ) test_datetime.__test__ = False @classmethod def correct_date(cls, d): return cls._cls( d.year, d.month, d.day, ) def test_date(*args, **kw): if 'delta' in kw: gap = kw.pop('delta') gap_delta = 0 else: gap = 0 gap_delta = 1 delta_type = kw.pop('delta_type', 'days') return test_factory( 'tdate', date, (2001, 1, 1), args, kw, _ct=correct_date, today=instantiate, _gap=gap, _gap_d=gap_delta, _gap_t=delta_type, ) ms = 10**6 def __time_new__(cls, *args, **kw): if args or kw: return super(cls, cls).__new__(cls, *args, **kw) else: val = cls.instantiate() t = timegm(val.utctimetuple()) t += (float(val.microsecond)/ms) return t test_date.__test__ = False def test_time(*args, **kw): if 'tzinfo' in kw or len(args) > 7 or (args and getattr(args[0], 'tzinfo', None)): raise TypeError("You don't want to use tzinfo with test_time") if 'delta' in kw: gap = kw.pop('delta') gap_delta = 0 else: gap = 0 gap_delta = 1 delta_type = kw.pop('delta_type', 'seconds') return test_factory( 'ttime', datetime, (2001, 1, 1, 0, 0, 0), args, kw, _ct=None, instantiate=instantiate, _gap=gap, _gap_d=gap_delta, _gap_t=delta_type, __new__=__time_new__, ) test_time.__test__ = False testfixtures-6.18.3/testfixtures/tempdirectory.py000066400000000000000000000334261412502526400224100ustar00rootroot00000000000000import atexit import os import warnings from re import compile from tempfile import mkdtemp from testfixtures.comparison import compare from testfixtures.compat import basestring from testfixtures.utils import wrap from .rmtree import rmtree class TempDirectory: """ A class representing a temporary directory on disk. :param ignore: A sequence of strings containing regular expression patterns that match filenames that should be ignored by the :class:`TempDirectory` listing and checking methods. :param create: If `True`, the temporary directory will be created as part of class instantiation. :param path: If passed, this should be a string containing a physical path to use as the temporary directory. When passed, :class:`TempDirectory` will not create a new directory to use. :param encoding: A default encoding to use for :meth:`read` and :meth:`write` operations when the ``encoding`` parameter is not passed to those methods. """ instances = set() atexit_setup = False #: The physical path of the :class:`TempDirectory` on disk path = None def __init__(self, ignore=(), create=True, path=None, encoding=None): self.ignore = [] for regex in ignore: self.ignore.append(compile(regex)) self.path = path self.encoding = encoding self.dont_remove = bool(path) if create: self.create() @classmethod def atexit(cls): if cls.instances: warnings.warn( 'TempDirectory instances not cleaned up by shutdown:\n' '%s' % ('\n'.join(i.path for i in cls.instances)) ) def create(self): """ Create a temporary directory for this instance to use if one has not already been created. """ if self.path: return self self.path = mkdtemp() self.instances.add(self) if not self.__class__.atexit_setup: atexit.register(self.atexit) self.__class__.atexit_setup = True return self def cleanup(self): """ Delete the temporary directory and anything in it. This :class:`TempDirectory` cannot be used again unless :meth:`create` is called. """ if self.path and os.path.exists(self.path) and not self.dont_remove: rmtree(self.path) del self.path if self in self.instances: self.instances.remove(self) @classmethod def cleanup_all(cls): """ Delete all temporary directories associated with all :class:`TempDirectory` objects. """ for i in tuple(cls.instances): i.cleanup() def actual(self, path=None, recursive=False, files_only=False, followlinks=False): path = self._join(path) if path else self.path result = [] if recursive: for dirpath, dirnames, filenames in os.walk( path, followlinks=followlinks ): dirpath = '/'.join(dirpath[len(path)+1:].split(os.sep)) if dirpath: dirpath += '/' for dirname in dirnames: if not files_only: result.append(dirpath+dirname+'/') for name in sorted(filenames): result.append(dirpath+name) else: for n in os.listdir(path): result.append(n) filtered = [] for path in sorted(result): ignore = False for regex in self.ignore: if regex.search(path): ignore = True break if ignore: continue filtered.append(path) return filtered def listdir(self, path=None, recursive=False): """ Print the contents of the specified directory. :param path: The path to list, which can be: * `None`, indicating the root of the temporary directory should be listed. * A tuple of strings, indicating that the elements of the tuple should be used as directory names to traverse from the root of the temporary directory to find the directory to be listed. * A forward-slash separated string, indicating the directory or subdirectory that should be traversed to from the temporary directory and listed. :param recursive: If `True`, the directory specified will have its subdirectories recursively listed too. """ actual = self.actual(path, recursive) if not actual: print('No files or directories found.') for n in actual: print(n) def compare(self, expected, path=None, files_only=False, recursive=True, followlinks=False): """ Compare the expected contents with the actual contents of the temporary directory. An :class:`AssertionError` will be raised if they are not the same. :param expected: A sequence of strings containing the paths expected in the directory. These paths should be forward-slash separated and relative to the root of the temporary directory. :param path: The path to use as the root for the comparison, relative to the root of the temporary directory. This can either be: * A tuple of strings, making up the relative path. * A forward-slash separated string. If it is not provided, the root of the temporary directory will be used. :param files_only: If specified, directories will be excluded from the list of actual paths used in the comparison. :param recursive: If passed as ``False``, only the direct contents of the directory specified by ``path`` will be included in the actual contents used for comparison. :param followlinks: If passed as ``True``, symlinks and hard links will be followed when recursively building up the actual list of directory contents. """ __tracebackhide__ = True compare(expected=sorted(expected), actual=tuple(self.actual( path, recursive, files_only, followlinks )), recursive=False) def check(self, *expected): """ .. deprecated:: 4.3.0 Compare the contents of the temporary directory with the expected contents supplied. This method only checks the root of the temporary directory. :param expected: A sequence of strings containing the names expected in the directory. """ compare(expected, tuple(self.actual()), recursive=False) def check_dir(self, dir, *expected): """ .. deprecated:: 4.3.0 Compare the contents of the specified subdirectory of the temporary directory with the expected contents supplied. This method will only check the contents of the subdirectory specified and will not recursively check subdirectories. :param dir: The subdirectory to check, which can be: * A tuple of strings, indicating that the elements of the tuple should be used as directory names to traverse from the root of the temporary directory to find the directory to be checked. * A forward-slash separated string, indicating the directory or subdirectory that should be traversed to from the temporary directory and checked. :param expected: A sequence of strings containing the names expected in the directory. """ compare(expected, tuple(self.actual(dir)), recursive=False) def check_all(self, dir, *expected): """ .. deprecated:: 4.3.0 Recursively compare the contents of the specified directory with the expected contents supplied. :param dir: The directory to check, which can be: * A tuple of strings, indicating that the elements of the tuple should be used as directory names to traverse from the root of the temporary directory to find the directory to be checked. * A forward-slash separated string, indicating the directory or subdirectory that should be traversed to from the temporary directory and checked. * An empty string, indicating that the whole temporary directory should be checked. :param expected: A sequence of strings containing the paths expected in the directory. These paths should be forward-slash separated and relative to the root of the temporary directory. """ compare(expected, tuple(self.actual(dir, recursive=True)), recursive=False) def _join(self, name): # make things platform independent if isinstance(name, basestring): name = name.split('/') relative = os.sep.join(name).rstrip(os.sep) if relative.startswith(os.sep): if relative.startswith(self.path): return relative raise ValueError( 'Attempt to read or write outside the temporary Directory' ) return os.path.join(self.path, relative) def makedir(self, dirpath): """ Make an empty directory at the specified path within the temporary directory. Any intermediate subdirectories that do not exist will also be created. :param dirpath: The directory to create, which can be: * A tuple of strings. * A forward-slash separated string. :returns: The full path of the created directory. """ thepath = self._join(dirpath) os.makedirs(thepath) return thepath def write(self, filepath, data, encoding=None): """ Write the supplied data to a file at the specified path within the temporary directory. Any subdirectories specified that do not exist will also be created. The file will always be written in binary mode. The data supplied must either be bytes or an encoding must be supplied to convert the string into bytes. :param filepath: The path to the file to create, which can be: * A tuple of strings. * A forward-slash separated string. :param data: A string containing the data to be written. :param encoding: The encoding to be used if data is not bytes. Should not be passed if data is already bytes. :returns: The full path of the file written. """ if isinstance(filepath, basestring): filepath = filepath.split('/') if len(filepath) > 1: dirpath = self._join(filepath[:-1]) if not os.path.exists(dirpath): os.makedirs(dirpath) thepath = self._join(filepath) encoding = encoding or self.encoding if encoding is not None: data = data.encode(encoding) with open(thepath, 'wb') as f: f.write(data) return thepath def getpath(self, path): """ Return the full path on disk that corresponds to the path relative to the temporary directory that is passed in. :param path: The path to the file to create, which can be: * A tuple of strings. * A forward-slash separated string. :returns: A string containing the full path. """ return self._join(path) def read(self, filepath, encoding=None): """ Reads the file at the specified path within the temporary directory. The file is always read in binary mode. Bytes will be returned unless an encoding is supplied, in which case a unicode string of the decoded data will be returned. :param filepath: The path to the file to read, which can be: * A tuple of strings. * A forward-slash separated string. :param encoding: The encoding used to decode the data in the file. :returns: A string containing the data read. """ with open(self._join(filepath), 'rb') as f: data = f.read() encoding = encoding or self.encoding if encoding is not None: return data.decode(encoding) return data def __enter__(self): return self def __exit__(self, type, value, traceback): self.cleanup() def tempdir(*args, **kw): """ A decorator for making a :class:`TempDirectory` available for the duration of a test function. All arguments and parameters are passed through to the :class:`TempDirectory` constructor. """ kw['create'] = False l = TempDirectory(*args, **kw) return wrap(l.create, l.cleanup) testfixtures-6.18.3/testfixtures/tests/000077500000000000000000000000001412502526400202765ustar00rootroot00000000000000testfixtures-6.18.3/testfixtures/tests/__init__.py000066400000000000000000000001001412502526400223760ustar00rootroot00000000000000import warnings warnings.simplefilter('default', ImportWarning) testfixtures-6.18.3/testfixtures/tests/configparser-read.txt000066400000000000000000000016531412502526400244370ustar00rootroot00000000000000Here's an example configuration file: .. topic:: example.cfg :class: write-file :: [A Section] dir=frob long: this value continues on the next line .. invisible-code-block: python from testfixtures.compat import PY3 # change to the temp directory import os original_dir = os.getcwd() os.chdir(tempdir.path) To parse this file using the :mod:`ConfigParser` module, you would do the following: .. code-block:: python if PY3: from configparser import ConfigParser else: from ConfigParser import ConfigParser config = ConfigParser() config.read('example.cfg') The items in the section are now available as follows: >>> for name, value in sorted(config.items('A Section')): ... print('{0!r}:{1!r}'.format(name, value)) 'dir':'frob' 'long':'this value continues\non the next line' .. invisible-code-block: python # change out again import os os.chdir(original_dir) testfixtures-6.18.3/testfixtures/tests/configparser-write.txt000066400000000000000000000015621412502526400246550ustar00rootroot00000000000000.. invisible-code-block: python from testfixtures.compat import PY3 # change to the temp directory import os original_dir = os.getcwd() os.chdir(tempdir.path) To construct a configuration file using the :mod:`ConfigParser` module, you would do the following: .. code-block:: python if PY3: from configparser import ConfigParser else: from ConfigParser import ConfigParser config = ConfigParser() config.add_section('A Section') config.set('A Section', 'dir', 'frob') f = open('example.cfg','w') config.write(f) f.close() The generated configuration file will be as follows: .. topic:: example.cfg :class: read-file :: [A Section] dir = frob .. config parser writes whitespace at the end, be careful when testing! .. invisible-code-block: python # change out again import os os.chdir(original_dir) testfixtures-6.18.3/testfixtures/tests/conftest.py000066400000000000000000000011741412502526400225000ustar00rootroot00000000000000from sybil import Sybil from sybil.parsers.doctest import DocTestParser from sybil.parsers.codeblock import CodeBlockParser from sybil.parsers.capture import parse_captures from testfixtures import TempDirectory from testfixtures.sybil import FileParser def sybil_setup(namespace): namespace['tempdir'] = TempDirectory() def sybil_teardown(namespace): namespace['tempdir'].cleanup() pytest_collect_file = Sybil( parsers=[ DocTestParser(), CodeBlockParser(), parse_captures, FileParser('tempdir'), ], pattern='*.txt', setup=sybil_setup, teardown=sybil_teardown, ).pytest() testfixtures-6.18.3/testfixtures/tests/directory-contents.txt000066400000000000000000000013431412502526400246770ustar00rootroot00000000000000Here's an example piece of code that creates some files and directories: .. code-block:: python import os def spew(path): with open(os.path.join(path, 'root.txt'), 'wb') as f: f.write(b'root output') os.mkdir(os.path.join(path, 'subdir')) with open(os.path.join(path, 'subdir', 'file.txt'), 'wb') as f: f.write(b'subdir output') os.mkdir(os.path.join(path, 'subdir', 'logs')) This function is used as follows: >>> spew(tempdir.path) This will create the following files and directories:: root.txt subdir/ subdir/file.txt subdir/logs/ .. -> expected_listing .. invisible-code-block: python # check the listing was as expected tempdir.compare(expected_listing.strip().split('\n')) testfixtures-6.18.3/testfixtures/tests/sample1.py000066400000000000000000000021641412502526400222150ustar00rootroot00000000000000# NB: This file is used in the documentation, if you make changes, ensure # you update the line numbers in popen.txt! """ A sample module containing the kind of code that testfixtures helps with testing """ from datetime import datetime, date def str_now_1(): return str(datetime.now()) now = datetime.now def str_now_2(): return str(now()) def str_today_1(): return str(date.today()) today = date.today def str_today_2(): return str(today()) from time import time def str_time(): return str(time()) class X: def y(self): return "original y" @classmethod def aMethod(cls): return cls @staticmethod def bMethod(): return 2 def z(): return "original z" class SampleClassA: def __init__(self, *args): self.args = args class SampleClassB(SampleClassA): pass def a_function(): return (SampleClassA(1), SampleClassB(2), SampleClassA(3)) someDict = dict( key='value', complex_key=[1, 2, 3], ) class Slotted(object): __slots__ = ['x', 'y'] def __init__(self, x, y): self.x = x self.y = y testfixtures-6.18.3/testfixtures/tests/sample2.py000066400000000000000000000006341412502526400222160ustar00rootroot00000000000000# NB: This file is used in the documentation, if you make changes, ensure # you update the line numbers in popen.txt! """ A sample module containing the kind of code that testfixtures helps with testing """ from testfixtures.tests.sample1 import X, z try: from guppy import hpy guppy = True except ImportError: guppy = False def dump(path): if guppy: hpy().heap().stat.dump(path) testfixtures-6.18.3/testfixtures/tests/test_compare.py000066400000000000000000001637701412502526400233530ustar00rootroot00000000000000import re from datetime import date, datetime from decimal import Decimal from functools import partial from collections import namedtuple from testfixtures.shouldraise import ShouldAssert from testfixtures.tests.sample1 import SampleClassA, SampleClassB, Slotted from testfixtures.mock import Mock, call from re import compile from testfixtures import ( Comparison as C, Replacer, ShouldRaise, compare, generator, singleton, ) from testfixtures.compat import ( class_type_name, exception_module, PY3, xrange, BytesLiteral, UnicodeLiteral, PY2, PY_37_PLUS, ABC ) from testfixtures.comparison import compare_sequence, compare_object from unittest import TestCase hexaddr = compile('0x[0-9A-Fa-f]+') def hexsub(raw): return hexaddr.sub('...', raw) call_list_repr = repr(Mock().mock_calls.__class__) marker = object() _compare = compare class Lazy: def __init__(self, message): self.message = message def __str__(self): return self.message def check_raises(x=marker, y=marker, message=None, regex=None, compare=compare, **kw): args = [] for value in x, y: if value is not marker: args.append(value) for value in 'x', 'y': explicit = 'explicit_{}'.format(value) if explicit in kw: kw[value] = kw[explicit] del kw[explicit] try: compare(*args, **kw) except Exception as e: if not isinstance(e, AssertionError): # pragma: no cover raise actual = hexsub(e.args[0]) if message is not None: # handy for debugging, but can't be relied on for tests! _compare(actual, expected=message, show_whitespace=True) assert actual == message else: if not regex.match(actual): # pragma: no cover raise AssertionError( '%r did not match %r' % (actual, regex.pattern) ) else: raise AssertionError('No exception raised!') class CompareHelper(object): def check_raises(self, *args, **kw): check_raises(*args, **kw) class TestCompare(CompareHelper, TestCase): def test_object_same(self): o = object() compare(o, o) def test_object_diff(self): self.check_raises( object(), object(), ' != ' ) def test_different_types(self): self.check_raises('x', 1, "'x' != 1") def test_number_same(self): compare(1, 1) def test_number_different(self): self.check_raises(1, 2, '1 != 2') def test_decimal_different(self): self.check_raises(Decimal(1), Decimal(2), "Decimal('1') != Decimal('2')") def test_different_with_labels(self): self.check_raises(1, 2, '1 (expected) != 2 (actual)', x_label='expected', y_label='actual') def test_string_same(self): compare('x', 'x') def test_unicode_string_different(self): if PY2: expected = "u'a' != 'b'" else: expected = "'a' != b'b'" self.check_raises( UnicodeLiteral('a'), BytesLiteral('b'), expected ) def test_bytes_different(self): if PY2: expected = ( "\n" "'12345678901'\n" '!=\n' "'12345678902'" ) else: expected = ( "\n" "b'12345678901'\n" '!=\n' "b'12345678902'" ) self.check_raises( BytesLiteral('12345678901'), BytesLiteral('12345678902'), expected ) def test_bytes_same_strict(self): compare(actual=b'', expected=b'', strict=True) if PY3: def test_moar_bytes_different(self): self.check_raises( actual=b'{"byte_pound":"b\'\\\\xa3\'"}', expected=b'{"byte_pound":"b\\\'\\xa3\'"}', message = ( "\n" "b'{\"byte_pound\":\"b\\\\\\'\\\\xa3\\\'\"}' (expected)\n" '!=\n' "b'{\"byte_pound\":\"b\\\'\\\\\\\\xa3\\\'\"}' (actual)" ) ) def test_string_diff_short(self): self.check_raises( '\n'+('x'*9), '\n'+('y'*9), "'\\nxxxxxxxxx' != '\\nyyyyyyyyy'" ) def test_string_diff_long(self): self.check_raises( 'x'*11, 'y'*11, "\n'xxxxxxxxxxx'\n!=\n'yyyyyyyyyyy'" ) def test_string_diff_long_newlines(self): self.check_raises( 'x'*5+'\n'+'y'*5, 'x'*5+'\n'+'z'*5, "\n--- first\n+++ second\n@@ -1,2 +1,2 @@\n xxxxx\n-yyyyy\n+zzzzz" ) def test_string_diff_short_labels(self): self.check_raises( '\n'+('x'*9), '\n'+('y'*9), "'\\nxxxxxxxxx' (expected) != '\\nyyyyyyyyy' (actual)", x_label='expected', y_label='actual' ) def test_string_diff_long_labels(self): self.check_raises( 'x'*11, 'y'*11, "\n'xxxxxxxxxxx' (expected)\n!=\n'yyyyyyyyyyy' (actual)", x_label='expected', y_label='actual' ) def test_string_diff_long_newlines_labels(self): self.check_raises( 'x'*5+'\n'+'y'*5, 'x'*5+'\n'+'z'*5, "\n--- expected\n+++ actual\n" "@@ -1,2 +1,2 @@\n xxxxx\n-yyyyy\n+zzzzz", x_label='expected', y_label='actual' ) def test_exception_same_object(self): e = ValueError('some message') compare(e, e) def test_exception_same_c_wrapper(self): e1 = ValueError('some message') e2 = ValueError('some message') compare(C(e1), e2) def test_exception_different_object(self): e1 = ValueError('some message') e2 = ValueError('some message') compare(e1, e2) def test_exception_different_object_c_wrapper(self): e1 = ValueError('some message') e2 = ValueError('some message') compare(C(e1), e2) def test_exception_diff(self): e1 = ValueError('some message') e2 = ValueError('some other message') if PY_37_PLUS: self.check_raises( e1, e2, "ValueError('some message') != ValueError('some other message')" ) else: self.check_raises( e1, e2, "ValueError('some message',) != ValueError('some other message',)" ) def test_exception_diff_c_wrapper(self): e1 = ValueError('some message') e2 = ValueError('some other message') self.check_raises( C(e1), e2, ("\n" "\n" "attributes differ:\n" "'args': ('some message',) (Comparison) " "!= ('some other message',) (actual)\n" "" " != ValueError('some other message'{message})" ).format(module=exception_module, message='' if PY_37_PLUS else ',')) def test_sequence_long(self): self.check_raises( ['quite a long string 1', 'quite a long string 2', 'quite a long string 3', 'quite a long string 4', 'quite a long string 5', 'quite a long string 6', 'quite a long string 7', 'quite a long string 8'], ['quite a long string 1', 'quite a long string 2', 'quite a long string 3', 'quite a long string 4', 'quite a long string 9', 'quite a long string 10', 'quite a long string 11', 'quite a long string 12'], "sequence not as expected:\n\n" "same:\n" "['quite a long string 1',\n" " 'quite a long string 2',\n" " 'quite a long string 3',\n" " 'quite a long string 4']\n\n" "first:\n" "['quite a long string 5',\n" " 'quite a long string 6',\n" " 'quite a long string 7',\n" " 'quite a long string 8']\n\n" "second:\n" "['quite a long string 9',\n" " 'quite a long string 10',\n" " 'quite a long string 11',\n" " 'quite a long string 12']\n" "\n" "While comparing [4]: \n" "'quite a long string 5'\n" "!=\n" "'quite a long string 9'" ) def test_sequence_different_labels_supplied(self): self.check_raises( [1, 2, 3], [1, 2, 4], "sequence not as expected:\n\n" "same:\n" "[1, 2]\n\n" "expected:\n" "[3]\n\n" "actual:\n" "[4]", x_label='expected', y_label='actual', ) def test_list_same(self): compare([1, 2, 3], [1, 2, 3]) def test_list_different(self): self.check_raises( [1, 2, 3], [1, 2, 4], "sequence not as expected:\n\n" "same:\n" "[1, 2]\n\n" "first:\n" "[3]\n\n" "second:\n" "[4]" ) def test_list_different_float(self): self.check_raises( [1, 2, 3.0], [1, 2, 4.0], "sequence not as expected:\n\n" "same:\n" "[1, 2]\n\n" "first:\n" "[3.0]\n\n" "second:\n" "[4.0]" ) def test_list_different_decimal(self): self.check_raises( [1, 2, Decimal(3)], [1, 2, Decimal(4)], "sequence not as expected:\n\n" "same:\n" "[1, 2]\n\n" "first:\n" "[Decimal('3')]\n\n" "second:\n" "[Decimal('4')]" ) def test_list_totally_different(self): self.check_raises( [1], [2], "sequence not as expected:\n\n" "same:\n" "[]\n\n" "first:\n" "[1]\n\n" "second:\n" "[2]" ) def test_list_first_shorter(self): self.check_raises( [1, 2], [1, 2, 3], "sequence not as expected:\n\n" "same:\n[1, 2]\n\n" "first:\n[]\n\n" "second:\n[3]" ) def test_list_second_shorter(self): self.check_raises( [1, 2, 3], [1, 2], "sequence not as expected:\n\n" "same:\n[1, 2]\n\n" "first:\n[3]\n\n" "second:\n[]" ) def test_dict_same(self): compare(dict(x=1), dict(x=1)) def test_dict_first_missing_keys(self): self.check_raises( dict(), dict(z=3), "dict not as expected:\n" "\n" "in second but not first:\n" "'z': 3" ) def test_dict_second_missing_keys(self): self.check_raises( dict(z=3), dict(), "dict not as expected:\n" "\n" "in first but not second:\n" "'z': 3" ) def test_dict_values_different(self): self.check_raises( dict(x=1), dict(x=2), "dict not as expected:\n" "\n" "values differ:\n" "'x': 1 != 2" ) def test_dict_identical_non_matching_ints(self): self.check_raises( dict(x=1, y=1), dict(x=2, y=2), "dict not as expected:\n" "\n" "values differ:\n" "'x': 1 != 2\n" "'y': 1 != 2" ) def test_dict_identical_non_matching_floats(self): self.check_raises( dict(x=1.0, y=1.0), dict(x=2.0, y=2.0), "dict not as expected:\n" "\n" "values differ:\n" "'x': 1.0 != 2.0\n" "'y': 1.0 != 2.0" ) def test_dict_labels_specified(self): self.check_raises( dict(x=1, y=2), dict(x=2, z=3), "dict not as expected:\n" "\n" "in expected but not actual:\n" "'y': 2\n" "\n" "in actual but not expected:\n" "'z': 3\n" "\n" "values differ:\n" "'x': 1 (expected) != 2 (actual)", x_label='expected', y_label='actual' ) def test_dict_tuple_keys_same_value(self): compare({(1, 2): None}, {(1, 2): None}) def test_dict_tuple_keys_different_value(self): self.check_raises( {(1, 2): 3}, {(1, 2): 42}, "dict not as expected:\n" "\n" "values differ:\n" "(1, 2): 3 != 42" ) def test_dict_full_diff(self): self.check_raises( dict(x=1, y=2, a=4), dict(x=1, z=3, a=5), "dict not as expected:\n" "\n" 'same:\n' "['x']\n" "\n" "in first but not second:\n" "'y': 2\n" '\n' "in second but not first:\n" "'z': 3\n" '\n' "values differ:\n" "'a': 4 != 5" ) def test_dict_consistent_ordering(self): self.check_raises( dict(xa=1, xb=2, ya=1, yb=2, aa=3, ab=4), dict(xa=1, xb=2, za=3, zb=4, aa=5, ab=5), "dict not as expected:\n" "\n" 'same:\n' "['xa', 'xb']\n" "\n" "in first but not second:\n" "'ya': 1\n" "'yb': 2\n" '\n' "in second but not first:\n" "'za': 3\n" "'zb': 4\n" '\n' "values differ:\n" "'aa': 3 != 5\n" "'ab': 4 != 5" ) def test_dict_consistent_ordering_types_same(self): if PY3: same = "[6, None]\n" else: same = "[None, 6]\n" self.check_raises( {None: 1, 6: 2, 1: 3}, {None: 1, 6: 2, 1: 4}, "dict not as expected:\n" "\n"+ 'same:\n'+ same+ "\n" "values differ:\n" "1: 3 != 4" ) def test_dict_consistent_ordering_types_x_not_y(self): self.check_raises( {None: 1, 3: 2}, {}, "dict not as expected:\n" "\n" "in first but not second:\n" "3: 2\n" "None: 1" ) def test_dict_consistent_ordering_types_y_not_x(self): self.check_raises( {}, {None: 1, 3: 2}, "dict not as expected:\n" "\n" "in second but not first:\n" "3: 2\n" "None: 1" ) def test_dict_consistent_ordering_types_value(self): self.check_raises( {None: 1, 6: 2}, {None: 3, 6: 4}, "dict not as expected:\n" "\n" "values differ:\n" "6: 2 != 4\n" "None: 1 != 3" ) def test_set_same(self): compare(set([1]), set([1])) def test_set_first_missing_keys(self): self.check_raises( set(), set([3]), "set not as expected:\n" "\n" "in second but not first:\n" "[3]\n" '\n' ) def test_set_second_missing_keys(self): self.check_raises( set([3]), set(), "set not as expected:\n" "\n" "in first but not second:\n" "[3]\n" '\n' ) def test_set_full_diff(self): self.check_raises( set([1, 2, 4]), set([1, 3, 5]), "set not as expected:\n" "\n" "in first but not second:\n" "[2, 4]\n" '\n' "in second but not first:\n" "[3, 5]\n" '\n' ) def test_set_type_ordering(self): self.check_raises( {None, 1}, {'', 2}, "set not as expected:\n" "\n" "in first but not second:\n" "[1, None]\n" '\n' "in second but not first:\n" "['', 2]\n" '\n' ) def test_set_labels(self): self.check_raises( set([1, 2, 4]), set([1, 3, 5]), "set not as expected:\n" "\n" "in expected but not actual:\n" "[2, 4]\n" '\n' "in actual but not expected:\n" "[3, 5]\n" '\n', x_label='expected', y_label='actual', ) def test_tuple_same(self): compare((1, 2, 3), (1, 2, 3)) def test_tuple_different(self): self.check_raises( (1, 2, 3), (1, 2, 4), "sequence not as expected:\n\n" "same:\n(1, 2)\n\n" "first:\n(3,)\n\n" "second:\n(4,)" ) def test_tuple_totally_different(self): self.check_raises( (1, ), (2, ), "sequence not as expected:\n\n" "same:\n()\n\n" "first:\n(1,)\n\n" "second:\n(2,)" ) def test_tuple_first_shorter(self): self.check_raises( (1, 2), (1, 2, 3), "sequence not as expected:\n\n" "same:\n(1, 2)\n\n" "first:\n()\n\n" "second:\n(3,)" ) def test_tuple_second_shorter(self): self.check_raises( (1, 2, 3), (1, 2), "sequence not as expected:\n\n" "same:\n(1, 2)\n\n" "first:\n(3,)\n\n" "second:\n()" ) def test_generator_same(self): compare(generator(1, 2, 3), generator(1, 2, 3)) def test_generator_different(self): self.check_raises( generator(1, 2, 3), generator(1, 2, 4), "sequence not as expected:\n\n" "same:\n(1, 2)\n\n" "first:\n(3,)\n\n" "second:\n(4,)" ) def test_generator_totally_different(self): self.check_raises( generator(1, ), generator(2, ), "sequence not as expected:\n\n" "same:\n()\n\n" "first:\n(1,)\n\n" "second:\n(2,)" ) def test_generator_first_shorter(self): self.check_raises( generator(1, 2), generator(1, 2, 3), "sequence not as expected:\n\n" "same:\n(1, 2)\n\n" "first:\n()\n\n" "second:\n(3,)" ) def test_generator_second_shorted(self): self.check_raises( generator(1, 2, 3), generator(1, 2), "sequence not as expected:\n\n" "same:\n(1, 2)\n\n" "first:\n(3,)\n\n" "second:\n()" ) def test_nested_generator_different(self): self.check_raises( generator(1, 2, generator(3), 4), generator(1, 2, generator(3), 5), "sequence not as expected:\n" "\n" "same:\n" "(1, 2, )\n" "\n" "first:\n" "(4,)\n" "\n" "second:\n" "(5,)" ) def test_nested_generator_tuple_left(self): compare( generator(1, 2, (3, ), 4), generator(1, 2, generator(3), 4), ) def test_nested_generator_tuple_right(self): compare( generator(1, 2, generator(3), 4), generator(1, 2, (3, ), 4), ) def test_sequence_and_generator(self): compare((1, 2, 3), generator(1, 2, 3)) def test_sequence_and_generator_strict(self): expected = compile( "\(1, 2, 3\) \(<(class|type) 'tuple'>\) \(expected\) != " "\) \(actual\)" ) self.check_raises( (1, 2, 3), generator(1, 2, 3), regex=expected, strict=True, x_label='expected', y_label='actual', ) def test_generator_and_sequence(self): compare(generator(1, 2, 3), (1, 2, 3)) def test_iterable_with_iterable_same(self): compare(xrange(1, 4), xrange(1, 4)) def test_iterable_with_iterable_different(self): self.check_raises( xrange(1, 4), xrange(1, 3), "sequence not as expected:\n" "\n" "same:\n" "(1, 2)\n" "\n" "first:\n" "(3,)\n" "\n" "second:\n" "()" ) def test_iterable_and_generator(self): compare(xrange(1, 4), generator(1, 2, 3)) def test_iterable_and_generator_strict(self): expected = compile( "x?range\(1, 4\) \(<(class|type) 'x?range'>\) != " "\)" ) self.check_raises( xrange(1, 4), generator(1, 2, 3), regex=expected, strict=True, ) def test_generator_and_iterable(self): compare(generator(1, 2, 3), xrange(1, 4)) def test_tuple_and_list(self): compare((1, 2, 3), [1, 2, 3]) def test_tuple_and_list_strict(self): if PY2: expected = ("(1, 2, 3) () != " "[1, 2, 3] ()") else: expected = ("(1, 2, 3) () != " "[1, 2, 3] ()") self.check_raises( (1, 2, 3), [1, 2, 3], expected, strict=True ) def test_float_subclass_strict(self): class TestFloat(float): pass compare(TestFloat(0.75), TestFloat(0.75), strict=True) def test_old_style_classes_same(self): class X: pass compare(X, X) def test_old_style_classes_different(self): if PY3: expected = ( ".X'>" " != " ".Y'>" ) else: expected = ( "" " != " "" ) class X: pass class Y: pass self.check_raises(X, Y, expected) def test_new_style_classes_same(self): class X(object): pass compare(X, X) def test_new_style_classes_different(self): if PY3: expected = ( ".X'>" " != " ".Y'>" ) else: expected = ( "" " != " "" ) class X(object): pass class Y(object): pass self.check_raises(X, Y, expected) def test_show_whitespace(self): # does nothing! ;-) self.check_raises( ' x \n\r', ' x \n \t', "' x \\n\\r' != ' x \\n \\t'", show_whitespace=True ) def test_show_whitespace_long(self): self.check_raises( "\t \n '", '\r \n ', '\n--- first\n' '+++ second\n' '@@ -1,2 +1,2 @@\n' '-\'\\t \\n\'\n' '-" \'"\n' '+\'\\r \\n\'\n' '+\' \'', show_whitespace=True ) def test_show_whitespace_equal(self): compare('x', 'x', show_whitespace=True) def test_show_whitespace_not_used_because_of_other_difference(self): self.check_raises( (1, 'a'), (2, 'b'), "sequence not as expected:\n" "\n" "same:\n" "()\n" "\n" "first:\n" "(1, 'a')\n" "\n" "second:\n" "(2, 'b')", show_whitespace=False ) def test_include_trailing_whitespace(self): self.check_raises( ' x \n', ' x \n', "' x \\n' != ' x \\n'" ) def test_ignore_trailing_whitespace(self): compare(' x \t\n', ' x\t \n', trailing_whitespace=False) def test_ignore_trailing_whitespace_non_string(self): self.check_raises( 1, '', "1 != ''", trailing_whitespace=False ) def test_ignore_trailing_whitespace_but_respect_leading_whitespace(self): # NB: careful: this strips off the last newline too # DON'T use if you care about that! self.check_raises( 'a\n b\n c\n', 'a\nb\nc\n', "'a\\n b\\n c' != 'a\\nb\\nc'", trailing_whitespace=False ) def test_include_blank_lines(self): self.check_raises( '\n \n', '\n ', "'\\n \\n' != '\\n '" ) def test_ignore_blank_lines(self): compare(""" a \t b """, ' a\nb', blanklines=False) def test_ignore_blank_lines_non_string(self): self.check_raises( 1, '', "1 != ''", blanklines=False ) def test_supply_comparer(self): def compare_dict(x, y, context): self.assertEqual(x, {1: 1}) self.assertEqual(y, {2: 2}) self.assertEqual(context.get_option('foo'), 'bar') return 'not equal' with ShouldAssert('not equal'): compare({1: 1}, {2: 2}, foo='bar', comparers={dict: compare_dict}) def test_register_more_specific(self): class_ = namedtuple('Test', 'x') with ShouldAssert('compare class_'): compare(class_(1), class_(2), comparers={ tuple: Mock(return_value='compare tuple'), class_: Mock(return_value='compare class_') }) def test_extra_comparers_leave_existing(self): class MyObject(object): def __init__(self, name): self.name = name def __repr__(self): return 'MyObject instance' def compare_my_object(x, y, context): return '%s != %s' % (x.name, y.name) with Replacer() as r: r.replace('testfixtures.comparison._registry', { list: compare_sequence, }) self.check_raises( [1, MyObject('foo')], [1, MyObject('bar')], "sequence not as expected:\n" "\n" "same:\n" "[1]\n" "\n" "first:\n" "[MyObject instance]\n" "\n" "second:\n" "[MyObject instance]\n" "\n" "While comparing [1]: foo != bar", comparers={MyObject: compare_my_object} ) def test_list_subclass(self): class MyList(list): pass a_list = MyList([1]) b_list = MyList([2]) self.check_raises( a_list, b_list, "sequence not as expected:\n\n" "same:\n[]\n\n" "first:\n[1]\n\n" "second:\n[2]" ) def test_strict_okay(self): m = object() compare(m, m, strict=True) def test_strict_comparer_supplied(self): compare_obj = Mock() compare_obj.return_value = 'not equal' self.check_raises( object(), object(), "not equal", strict=True, comparers={object: compare_obj}, ) def test_strict_default_comparer(self): class MyList(list): pass # default comparer used! self.check_raises( MyList((1, 2, 3)), MyList((1, 2, 4)), "sequence not as expected:\n" "\n" "same:\n" "[1, 2]\n" "\n" "first:\n" "[3]\n" "\n" "second:\n" "[4]", strict=True, ) def test_list_subclass_strict(self): m = Mock() m.aCall() self.check_raises( [call.aCall()], m.method_calls, ("[call.aCall()] (<{0} 'list'>) != [call.aCall()] " "({1})").format(class_type_name, call_list_repr), strict=True, ) def test_list_subclass_long_strict(self): m = Mock() m.call('X'*20) self.check_raises( [call.call('Y'*20)], m.method_calls, ("[call.call('YYYYYYYYYYYYYYYYYY... " "(<{0} 'list'>) != " "[call.call('XXXXXXXXXXXXXXXXXX... " "({1})").format(class_type_name, call_list_repr), strict=True, ) def test_prefix(self): self.check_raises(1, 2, 'wrong number of orders: 1 != 2', prefix='wrong number of orders') def test_prefix_multiline(self): self.check_raises( 'x'*5+'\n'+'y'*5, 'x'*5+'\n'+'z'*5, "file content: \n--- first\n+++ second\n" "@@ -1,2 +1,2 @@\n xxxxx\n-yyyyy\n+zzzzz", prefix='file content' ) def test_prefix_callable(self): with ShouldAssert('foo: 1 != 2'): compare(1, 2, prefix=lambda: 'foo') def test_prefix_stringable(self): with ShouldAssert('foo: 1 != 2'): compare(1, 2, prefix=Lazy('foo')) def test_prefix_lazy(self): compare(2, 2, prefix=Mock(side_effect=Exception('boom!'))) def test_suffix(self): self.check_raises( 1, 2, '1 != 2\n' 'additional context', suffix='additional context', ) def test_suffix_callable(self): with ShouldAssert('1 != 2\n3'): compare(1, 2, suffix=lambda: 3) def test_suffix_stringable(self): with ShouldAssert('1 != 2\nfoo'): compare(1, 2, suffix=Lazy('foo')) def test_suffix_lazy(self): compare(2, 2, suffix=Mock(side_effect=Exception('boom!'))) def test_labels_multiline(self): self.check_raises( 'x'*5+'\n'+'y'*5, 'x'*5+'\n'+'z'*5, "\n--- expected\n+++ actual\n" "@@ -1,2 +1,2 @@\n xxxxx\n-yyyyy\n+zzzzz", x_label='expected', y_label='actual' ) def test_generator_with_non_generator(self): self.check_raises( generator(1, 2, 3), None, ' != None', ) def test_generator_with_buggy_generator(self): def bad_gen(): yield 1 # raising a TypeError here is important :-/ raise TypeError('foo') with ShouldRaise(TypeError('foo')): compare(generator(1, 2, 3), bad_gen()) def test_nested_dict_tuple_values_different(self): self.check_raises( dict(x=(1, 2, 3)), dict(x=(1, 2, 4)), "dict not as expected:\n" "\n" "values differ:\n" "'x': (1, 2, 3) != (1, 2, 4)\n" '\n' "While comparing ['x']: sequence not as expected:\n" "\n" "same:\n" "(1, 2)\n" "\n" "first:\n" "(3,)\n" "\n" "second:\n" "(4,)" ) def test_nested_dict_different(self): self.check_raises( dict(x=dict(y=1)), dict(x=dict(y=2)), "dict not as expected:\n" "\n" "values differ:\n" "'x': {'y': 1} != {'y': 2}\n" '\n' "While comparing ['x']: dict not as expected:\n" "\n" "values differ:\n" "'y': 1 != 2" ) def test_nested_dict_empty_but_same(self): compare(dict(x=dict()), dict(x=dict()), ignore_eq=True) def test_nested_dict_empty_with_keys(self): compare(dict(x=dict(x=1)), dict(x=dict(x=1)), ignore_eq=True) def test_tuple_list_different(self): self.check_raises( (1, [2, 3, 5]), (1, [2, 4, 5]), "sequence not as expected:\n" "\n" "same:\n" "(1,)\n" "\n" "first:\n" "([2, 3, 5],)\n" "\n" "second:\n" "([2, 4, 5],)\n" "\n" "While comparing [1]: sequence not as expected:\n" "\n" "same:\n" "[2]\n" "\n" "first:\n" "[3, 5]\n" "\n" "second:\n" "[4, 5]" ) def test_tuple_long_strings_different(self): self.check_raises( (1, 2, "foo\nbar\nbaz\n", 4), (1, 2, "foo\nbob\nbaz\n", 4), "sequence not as expected:\n" "\n" "same:\n" "(1, 2)\n" "\n" "first:\n" "('foo\\nbar\\nbaz\\n', 4)\n" "\n" "second:\n" "('foo\\nbob\\nbaz\\n', 4)\n" "\n" "While comparing [2]: \n" "--- first\n" "+++ second\n" "@@ -1,4 +1,4 @@\n" # check that show_whitespace bubbles down " 'foo\\n'\n" "-'bar\\n'\n" "+'bob\\n'\n" " 'baz\\n'\n" " ''", show_whitespace=True ) def test_dict_multiple_differences(self): self.check_raises( dict(x=(1, 2, 3), y=(4, 5, 6, )), dict(x=(1, 2, 4), y=(4, 5, 7, )), "dict not as expected:\n" "\n" "values differ:\n" "'x': (1, 2, 3) != (1, 2, 4)\n" "'y': (4, 5, 6) != (4, 5, 7)\n" "\n" "While comparing ['x']: sequence not as expected:\n" "\n" "same:\n" "(1, 2)\n" "\n" "first:\n" "(3,)\n" "\n" "second:\n" "(4,)\n" "\n" "While comparing ['y']: sequence not as expected:\n" "\n" "same:\n" "(4, 5)\n" "\n" "first:\n" "(6,)\n" "\n" "second:\n" "(7,)" ) def test_deep_breadcrumbs(self): obj1 = singleton('obj1') obj2 = singleton('obj2') gen1 = generator(obj1, obj2) gen2 = generator(obj1, ) # dict -> list -> tuple -> generator self.check_raises( dict(x=[1, ('a', 'b', gen1), 3], y=[3, 4]), dict(x=[1, ('a', 'b', gen2), 3], y=[3, 4]), ( "dict not as expected:\n" "\n" "same:\n" "['y']\n" "\n" "values differ:\n" "'x': [1, ('a', 'b', {gen1}), 3] != [1, ('a', 'b', {gen2}), 3]" "\n\n" "While comparing ['x']: sequence not as expected:\n" "\n" "same:\n" "[1]\n" "\n" "first:\n" "[('a', 'b', {gen1}), 3]\n" "\n" "second:\n" "[('a', 'b', {gen2}), 3]\n" "\n" "While comparing ['x'][1]: sequence not as expected:\n" "\n" "same:\n" "('a', 'b')\n" "\n" "first:\n" "({gen1},)\n" "\n" "second:\n" "({gen2},)\n" "\n" "While comparing ['x'][1][2]: sequence not as expected:\n" "\n" "same:\n" "(,)\n" "\n" "first:\n" "(,)\n" "\n" "second:\n" "()" ).format(gen1=hexsub(repr(gen1)), gen2=hexsub(repr(gen2))) ) def test_nested_labels(self): obj1 = singleton('obj1') obj2 = singleton('obj2') gen1 = generator(obj1, obj2) gen2 = generator(obj1, ) # dict -> list -> tuple -> generator self.check_raises( dict(x=[1, ('a', 'b', gen1), 3], y=[3, 4]), dict(x=[1, ('a', 'b', gen2), 3], y=[3, 4]), ( "dict not as expected:\n" "\n" "same:\n" "['y']\n" "\n" "values differ:\n" "'x': [1, ('a', 'b', {gen1}), 3] (expected) != " "[1, ('a', 'b', {gen2}), 3] (actual)\n" "\n" "While comparing ['x']: sequence not as expected:\n" "\n" "same:\n" "[1]\n" "\n" "expected:\n" "[('a', 'b', {gen1}), 3]\n" "\n" "actual:\n" "[('a', 'b', {gen2}), 3]\n" "\n" "While comparing ['x'][1]: sequence not as expected:\n" "\n" "same:\n" "('a', 'b')\n" "\n" "expected:\n" "({gen1},)\n" "\n" "actual:\n" "({gen2},)\n" "\n" "While comparing ['x'][1][2]: sequence not as expected:\n" "\n" "same:\n" "(,)\n" "\n" "expected:\n" "(,)\n" "\n" "actual:\n" "()" ).format(gen1=hexsub(repr(gen1)), gen2=hexsub(repr(gen2))), x_label='expected', y_label='actual', ) def test_nested_strict_only_type_difference(self): MyTuple = namedtuple('MyTuple', 'x y z') type_repr = repr(MyTuple) tuple_repr = repr(tuple) self.check_raises( [MyTuple(1, 2, 3)], [(1, 2, 3)], ("sequence not as expected:\n" "\n" "same:\n" "[]\n" "\n" "first:\n" "[MyTuple(x=1, y=2, z=3)]\n" "\n" "second:\n" "[(1, 2, 3)]\n" "\n" "While comparing [0]: MyTuple(x=1, y=2, z=3) " "(%s) " "!= (1, 2, 3) " "(%s)") % (type_repr, tuple_repr), strict=True ) def test_strict_nested_different(self): if PY2: expected = "[1, 2] () != (1, 3) ()" else: expected = "[1, 2] () != (1, 3) ()" self.check_raises( (1, 2, [1, 2]), (1, 2, (1, 3)), "sequence not as expected:\n" "\n" "same:\n" "(1, 2)\n" "\n" "first:\n" "([1, 2],)\n" "\n" "second:\n" "((1, 3),)" "\n\n" "While comparing [2]: " + expected, strict=True, ) def test_namedtuple_equal(self): class_ = namedtuple('Foo', 'x') compare(class_(1), class_(1)) def test_namedtuple_same_type(self): class_ = namedtuple('Foo', 'x y') self.check_raises( class_(1, 2), class_(1, 3), "Foo not as expected:\n\n" "same:\n" "['x']\n\n" "values differ:\n" "'y': 2 != 3" ) def test_namedtuple_different_type(self): class_a = namedtuple('Foo', 'x y') class_b = namedtuple('Bar', 'x y z') self.check_raises( class_a(1, 2), class_b(1, 2, 3), "Foo(x=1, y=2) () != " "Bar(x=1, y=2, z=3) " "()" ) def test_dict_with_list(self): self.check_raises( {1: 'one', 2: 'two'}, [1, 2], "{1: 'one', 2: 'two'} != [1, 2]" ) def test_explicit_expected(self): self.check_raises('x', expected='y', message="'y' (expected) != 'x' (actual)") def test_explicit_actual(self): self.check_raises('x', actual='y', message="'x' (expected) != 'y' (actual)") def test_explicit_both(self): self.check_raises(expected='x', actual='y', message="'x' (expected) != 'y' (actual)") def test_implicit_and_labels(self): self.check_raises('x', 'y', x_label='x_label', y_label='y_label', message="'x' (x_label) != 'y' (y_label)") def test_explicit_and_labels(self): self.check_raises(explicit_x='x', explicit_y='y', x_label='x_label', y_label='y_label', message="'x' (x_label) != 'y' (y_label)") def test_invalid_two_args_expected(self): with ShouldRaise(TypeError( "Exactly two objects needed, you supplied: ['z', 'x', 'y']" )): compare('x', 'y', expected='z') def test_invalid_two_args_actual(self): with ShouldRaise(TypeError( "Exactly two objects needed, you supplied: ['x', 'y', 'z']" )): compare('x', 'y', actual='z') def test_invalid_zero_args(self): with ShouldRaise(TypeError( 'Exactly two objects needed, you supplied:' )): compare() def test_invalid_one_args(self): with ShouldRaise(TypeError( "Exactly two objects needed, you supplied: ['x']" )): compare('x') def test_invalid_three_args(self): with ShouldRaise(TypeError( "Exactly two objects needed, you supplied: ['x', 'y', 'z']" )): compare('x', 'y', 'z') def test_invalid_because_of_typo(self): with ShouldRaise(TypeError( "Exactly two objects needed, you supplied: ['x'] {'expceted': 'z'}" )): compare('x', expceted='z') def test_dont_raise(self): self.assertEqual(compare('x', 'y', raises=False), "'x' != 'y'") class OrmObj(object): def __init__(self, a): self.a = a def __eq__(self, other): return True def __repr__(self): return 'OrmObj: '+str(self.a) def test_django_orm_is_horrible(self): self.assertTrue(self.OrmObj(1) == self.OrmObj(2)) def query_set(): yield self.OrmObj(1) yield self.OrmObj(2) self.check_raises( message=( "sequence not as expected:\n" "\n" "same:\n" "(OrmObj: 1,)\n" "\n" "expected:\n" "(OrmObj: 3,)\n" "\n" "actual:\n" "(OrmObj: 2,)\n" '\n' 'While comparing [1]: OrmObj not as expected:\n' '\n' 'attributes differ:\n' "'a': 3 (expected) != 2 (actual)" ), expected=[self.OrmObj(1), self.OrmObj(3)], actual=query_set(), ignore_eq=True ) def test_django_orm_is_horrible_part_2(self): t_compare = partial(compare, ignore_eq=True) t_compare(self.OrmObj(1), self.OrmObj(1)) t_compare(self.OrmObj('some longish string'), self.OrmObj('some longish string')) t_compare(self.OrmObj(date(2016, 1, 1)), self.OrmObj(date(2016, 1, 1))) def test_django_orm_is_horrible_part_3(self): compare( expected=self.OrmObj(1), actual=self.OrmObj(1), ignore_eq=True ) def test_django_orm_is_horrible_part_4(self): self.check_raises( message='[1] (expected) != 2 (actual)', expected=[1], actual=2, ignore_eq=True ) def test_nested_django_orm_in_object(self): class MyObject(object): def __init__(self, orm): self.orm = orm self.check_raises( message="MyObject not as expected:\n" "\n" "attributes differ:\n" "'orm': OrmObj: 1 (expected) != OrmObj: 2 (actual)\n" "\n" "While comparing .orm: OrmObj not as expected:\n" "\n" "attributes differ:\n" "'a': 1 (expected) != 2 (actual)", expected=MyObject(self.OrmObj(1)), actual=MyObject(self.OrmObj(2)), ignore_eq=True) def test_mock_call_same(self): m = Mock() m.foo(1, 2, x=3) compare(m.mock_calls, m.mock_calls) def test_mock_call_same_strict(self): m = Mock() m.foo(1, 2, x=3) compare(m.mock_calls, m.mock_calls, strict=True) def test_calls_different(self): m1 = Mock() m2 = Mock() m1.foo(1, 2, x=3, y=4) m2.bar(1, 3, x=7, y=4) self.check_raises( m1.mock_calls, m2.mock_calls, "sequence not as expected:\n" "\n" "same:\n" "[]\n" "\n" "first:\n" "[call.foo(1, 2, x=3, y=4)]\n" "\n" "second:\n" "[call.bar(1, 3, x=7, y=4)]" "\n\n" 'While comparing [0]: \n' "'call.foo(1, 2, x=3, y=4)'\n" '!=\n' "'call.bar(1, 3, x=7, y=4)'" ) def test_call_args_different(self): m = Mock() m.foo(1) self.check_raises( m.foo.call_args, call(2), "'call(1)' != 'call(2)'" ) def test_calls_args_different_but_same_repr(self): class Annoying(object): def __init__(self, x): self.x = x def __repr__(self): return '' m1 = Mock() m2 = Mock() m1.foo(Annoying(1)) m2.foo(Annoying(3)) self.check_raises( m1.mock_calls, m2.mock_calls, 'sequence not as expected:\n' '\n' 'same:\n' '[]\n' '\n' 'first:\n' '[call.foo()]\n' '\n' 'second:\n' '[call.foo()]\n' '\n' 'While comparing [0]: mock.call not as expected:\n' '\n' 'While comparing [0] args: sequence not as expected:\n' '\n' 'same:\n' '()\n' '\n' 'first:\n' '(,)\n' '\n' 'second:\n' '(,)\n' '\n' 'While comparing [0] args[0]: Annoying not as expected:\n' '\n' 'attributes differ:\n' "'x': 1 != 3" ) def test_calls_nested_equal_sub_attributes(self): class Annoying(object): def __init__(self, x): self.x = x def __repr__(self): return '' m1 = Mock() m2 = Mock() m1.foo(x=[Annoying(1)]) m2.foo(x=[Annoying(1)]) compare(m1.mock_calls, m2.mock_calls) def test_compare_arbitrary_nested_diff(self): class OurClass: def __init__(self, *args): self.args = args def __repr__(self): return '' self.check_raises( OurClass(OurClass(1)), OurClass(OurClass(2)), "OurClass not as expected:\n" "\n" 'attributes differ:\n' "'args': (,) != (,)\n" '\n' 'While comparing .args: sequence not as expected:\n' '\n' 'same:\n' '()\n' '\n' 'first:\n' '(,)\n' '\n' 'second:\n' '(,)\n' '\n' 'While comparing .args[0]: OurClass not as expected:\n' '\n' 'attributes differ:\n' "'args': (1,) != (2,)\n" '\n' 'While comparing .args[0].args: sequence not as expected:\n' '\n' 'same:\n' '()\n' '\n' 'first:\n' '(1,)\n' '\n' 'second:\n' '(2,)' ) def test_compare_slotted_same(self): compare(Slotted(1, 2), Slotted(1, 2)) def test_compare_slotted_diff(self): self.check_raises( Slotted(1, 2), Slotted(1, 3), "Slotted not as expected:\n" "\n" "attributes same:\n" "['x']\n" "\n" 'attributes differ:\n' "'y': 2 != 3" ) def test_empty_sets(self): compare(set(), set()) def test_empty_sets_strict(self): compare(set(), set(), strict=True) def test_datetime_not_equal(self): self.check_raises( datetime(2001, 1, 1), datetime(2001, 1, 2), "datetime.datetime(2001, 1, 1, 0, 0) != " "datetime.datetime(2001, 1, 2, 0, 0)" ) def test_inherited_slots(self): class Parent(object): __slots__ = ('a',) class Child(Parent): __slots__ = ('b',) def __init__(self, a, b): self.a, self.b = a, b self.check_raises( Child(1, 'x'), Child(2, 'x'), 'Child not as expected:\n' '\n' 'attributes same:\n' "['b']\n" '\n' 'attributes differ:\n' "'a': 1 != 2" ) def test_empty_child_slots(self): class Parent(object): __slots__ = ('a',) def __init__(self, a): self.a = a class Child(Parent): __slots__ = () compare(Child(1), Child(1)) def test_slots_and_attrs(self): class Parent(object): __slots__ = ('a',) class Child(Parent): def __init__(self, a, b): self.a = a self.b = b self.check_raises(Child(1, 2), Child(1, 3), message=( 'Child not as expected:\n' '\n' 'attributes same:\n' "['a']\n" '\n' 'attributes differ:\n' "'b': 2 != 3" )) def test_partial_callable_different(self): def foo(x): pass def bar(y): pass self.check_raises( partial(foo), partial(bar), ( 'partial not as expected:\n' '\n' 'attributes same:\n' "['args', 'keywords']\n" '\n' 'attributes differ:\n' "'func': {foo} != {bar}\n" '\n' 'While comparing .func: {foo} != {bar}' ).format(foo=hexsub(repr(foo)), bar=hexsub(repr(bar)))) def test_partial_args_different(self): def foo(x): pass self.check_raises( partial(foo, 1), partial(foo, 2), 'partial not as expected:\n' '\n' 'attributes same:\n' "['func', 'keywords']\n" '\n' 'attributes differ:\n' "'args': (1,) != (2,)\n" '\n' 'While comparing .args: sequence not as expected:\n' '\n' 'same:\n' '()\n' '\n' 'first:\n' '(1,)\n' '\n' 'second:\n' '(2,)' ) def test_partial_kw_different(self): def foo(x): pass self.check_raises( partial(foo, x=1, y=3), partial(foo, x=2, z=4), 'partial not as expected:\n' '\n' 'attributes same:\n' "['args', 'func']\n" '\n' 'attributes differ:\n' "'keywords': {'x': 1, 'y': 3} != {'x': 2, 'z': 4}\n" '\n' 'While comparing .keywords: dict not as expected:\n' '\n' 'in first but not second:\n' "'y': 3\n" '\n' 'in second but not first:\n' "'z': 4\n" '\n' 'values differ:\n' "'x': 1 != 2" ) def test_partial_equal(self): def foo(x): pass compare(partial(foo, 1, x=2), partial(foo, 1, x=2)) def test_repr_and_attributes_equal(self): class Wut(object): def __repr__(self): return 'Wut' def __eq__(self, other): return False self.check_raises( Wut(), Wut(), "Both x and y appear as 'Wut', but are not equal!" ) self.check_raises( expected=Wut(), actual=Wut(), message="Both expected and actual appear as 'Wut', but are not equal!" ) def test_string_with_slotted(self): class Slotted(object): __slots__ = ['foo'] def __init__(self, foo): self.foo = foo def __repr__(self): return repr(self.foo) self.check_raises( 'foo', Slotted('foo'), "'foo' (%s) != 'foo' (%s)" % (repr(str), repr(Slotted)) ) def test_not_recursive(self): self.check_raises( {1: 'foo', 2: 'foo'}, {1: 'bar', 2: 'bar'}, "dict not as expected:\n" "\n" "values differ:\n" "1: 'foo' != 'bar'\n" "2: 'foo' != 'bar'\n" "\n" "While comparing [1]: 'foo' != 'bar'" "\n\n" "While comparing [2]: 'foo' != 'bar'" ) def test_regex(self): if PY2: return shared_prefix = "a" * 199 self.check_raises( re.compile(shared_prefix + "x"), re.compile(shared_prefix + "y"), 'Both x and y appear as "re.compile(\''+'a'*199+')", but are not equal!' ) class TestIgnore(CompareHelper): class Parent(object): def __init__(self, id, other): self.id = id self.other = other def __repr__(self): return '<{}:{}>'.format(type(self).__name__, self.id) class Child(Parent): pass def test_ignore_attributes(self): compare(self.Parent(1, 3), self.Parent(2, 3), ignore_attributes={'id'}) def test_ignore_attributes_different_types(self): self.check_raises( self.Parent(1, 3), self.Child(2, 3), ' != ', ignore_attributes={'id'} ) def test_ignore_attributes_per_type(self): ignore = {self.Parent: {'id'}} compare(self.Parent(1, 3), self.Parent(2, 3), ignore_attributes=ignore) self.check_raises( self.Child(1, 3), self.Child(2, 3), 'Child not as expected:\n' '\n' 'attributes same:\n' "['other']\n" '\n' 'attributes differ:\n' "'id': 1 != 2", ignore_attributes=ignore ) class TestCompareObject(object): class Thing(object): def __init__(self, **kw): for k, v in kw.items(): setattr(self, k, v) def test_ignore(self): def compare_thing(x, y, context): return compare_object(x, y, context, ignore_attributes=['y']) compare(self.Thing(x=1, y=2), self.Thing(x=1, y=3), comparers={self.Thing: compare_thing}) def test_ignore_dict_context_list_param(self): def compare_thing(x, y, context): return compare_object(x, y, context, ignore_attributes=['y']) compare(self.Thing(x=1, y=2, z=3), self.Thing(x=1, y=4, z=5), comparers={self.Thing: compare_thing}, ignore_attributes={self.Thing: ['z']}) def test_ignore_list_context_list_param(self): def compare_thing(x, y, context): return compare_object(x, y, context, ignore_attributes=['y']) compare(self.Thing(x=1, y=2, z=3), self.Thing(x=1, y=4, z=5), comparers={self.Thing: compare_thing}, ignore_attributes=['z']) class BaseClass(ABC): pass class MyDerivedClass(BaseClass): def __init__(self, thing): self.thing = thing class ConcreteBaseClass(object): pass class ConcreteDerivedClass(ConcreteBaseClass): def __init__(self, thing): self.thing = thing class TestBaseClasses(CompareHelper): def test_abc_equal(self): thing1 = MyDerivedClass(1) thing2 = MyDerivedClass(1) compare(thing1, thing2) def test_abc_unequal(self): thing1 = MyDerivedClass(1) thing2 = MyDerivedClass(2) self.check_raises(thing1, thing2, message=( "MyDerivedClass not as expected:\n\n" "attributes differ:\n" "'thing': 1 != 2" )) def test_concrete_equal(self): thing1 = ConcreteDerivedClass(1) thing2 = ConcreteDerivedClass(1) compare(thing1, thing2) def test_concrete_unequal(self): thing1 = ConcreteDerivedClass(1) thing2 = ConcreteDerivedClass(2) self.check_raises(thing1, thing2, message=( "ConcreteDerivedClass not as expected:\n\n" "attributes differ:\n" "'thing': 1 != 2" )) testfixtures-6.18.3/testfixtures/tests/test_comparison.py000066400000000000000000000573051412502526400240730ustar00rootroot00000000000000from unittest import TestCase import sys from testfixtures import Comparison as C, TempDirectory, compare, diff, Comparison from testfixtures.compat import PY2, PY3, exception_module from testfixtures.shouldraise import ShouldAssert from testfixtures.tests.sample1 import SampleClassA, a_function import pytest class AClass: def __init__(self, x, y=None): self.x = x if y: self.y = y def __repr__(self): return '<'+self.__class__.__name__+'>' class BClass(AClass): pass class WeirdException(Exception): def __init__(self, x, y): self.x = x self.y = y class X(object): __slots__ = ['x'] class FussyDefineComparison(object): def __init__(self, attr): self.attr = attr def __eq__(self, other): if not isinstance(other, self.__class__): # pragma: no cover raise TypeError() return False # pragma: no cover def __ne__(self, other): return not self == other # pragma: no cover def compare_repr(obj, expected): actual = diff(expected, repr(obj)) if actual: # pragma: no cover raise AssertionError(actual) class TestC(TestCase): def test_example(self): # In this pattern, we want to check a sequence is # of the correct type and order. r = a_function() self.assertEqual(r, ( C('testfixtures.tests.sample1.SampleClassA'), C('testfixtures.tests.sample1.SampleClassB'), C('testfixtures.tests.sample1.SampleClassA'), )) # We also want to check specific parts of some # of the returned objects' attributes self.assertEqual(r[0].args[0], 1) self.assertEqual(r[1].args[0], 2) self.assertEqual(r[2].args[0], 3) def test_example_with_object(self): # Here we see compare an object with a Comparison # based on an object of the same type and with the # same attributes: self.assertEqual( C(AClass(1, 2)), AClass(1, 2), ) # ...even though the original class doesn't support # meaningful comparison: self.assertNotEqual( AClass(1, 2), AClass(1, 2), ) def test_example_with_vars(self): # Here we use a Comparison to make sure both the # type and attributes of an object are correct. self.assertEqual( C('testfixtures.tests.test_comparison.AClass', x=1, y=2), AClass(1, 2), ) def test_example_with_odd_vars(self): # If the variable names class with parameters to the # Comparison constructor, they can be specified in a # dict: self.assertEqual( C('testfixtures.tests.test_comparison.AClass', {'x': 1, 'y': 2}), AClass(1, 2), ) def test_example_partial(self): self.assertEqual( C('testfixtures.tests.test_comparison.AClass', x=1, partial=True), AClass(1, 2), ) def test_example_dont_use_c_wrappers_on_both_sides(self): # NB: don't use C wrappers on both sides! e = ValueError('some message') x, y = C(e), C(e) assert x != y compare_repr(x, "wrong type".format( mod=exception_module)) compare_repr( y, "args: ('some message',)".format( mod=exception_module) ) def test_repr_module(self): compare_repr(C('datetime'), '') def test_repr_class(self): compare_repr(C('testfixtures.tests.sample1.SampleClassA'), '') def test_repr_function(self): compare_repr(C('testfixtures.tests.sample1.z'), '') def test_repr_instance(self): compare_repr(C(SampleClassA('something')), "" "args: ('something',)" "" ) def test_repr_exception(self): compare_repr(C(ValueError('something')), ("args: ('something',)" ).format(exception_module)) def test_repr_exception_not_args(self): if sys.version_info >= (3, 2, 4): # in PY3, even args that aren't set still appear to be there args = "args: (1, 2)\n" else: args = "args: ()\n" compare_repr( C(WeirdException(1, 2)), "\n\n" + args + "x: 1\n" "y: 2\n" "" ) def test_repr_class_and_vars(self): compare_repr( C(SampleClassA, {'args': (1,)}), "args: (1,)" ) def test_repr_nested(self): compare_repr( C(SampleClassA, y=C(AClass), z=C(BClass(1, 2))), "\n" "\n" "y: \n" "z: \n" " \n" " x: 1\n" " y: 2\n" " \n" "" ) def test_repr_failed_wrong_class(self): c = C('testfixtures.tests.test_comparison.AClass', x=1, y=2) assert c != BClass(1, 2) compare_repr(c, "" "wrong type" ) def test_repr_failed_all_reasons_in_one(self): c = C('testfixtures.tests.test_comparison.AClass', y=5, z='missing') assert c != AClass(1, 2) compare_repr(c, "\n" "\n" "attributes in Comparison but not actual:\n" "'z': 'missing'\n\n" "attributes in actual but not Comparison:\n" "'x': 1\n\n" "attributes differ:\n" "'y': 5 (Comparison) != 2 (actual)\n" "", ) def test_repr_failed_not_in_other(self): c = C('testfixtures.tests.test_comparison.AClass', x=1, y=2, z=(3, )) assert c != AClass(1, 2) compare_repr(c , "\n" "\n" "attributes same:\n" "['x', 'y']\n\n" "attributes in Comparison but not actual:\n" "'z': (3,)\n" "", ) def test_repr_failed_not_in_self(self): c = C('testfixtures.tests.test_comparison.AClass', y=2) assert c != AClass(x=(1, ), y=2) compare_repr(c, "\n" "\n" "attributes same:\n" "['y']\n\n" "attributes in actual but not Comparison:\n" "'x': (1,)\n" "", ) def test_repr_failed_not_in_self_partial(self): c = C('testfixtures.tests.test_comparison.AClass', x=1, y=2, z=(3, ), partial=True) assert c != AClass(x=1, y=2) compare_repr(c, "\n" "\n" "attributes same:\n" "['x', 'y']\n\n" "attributes in Comparison but not actual:\n" "'z': (3,)\n" "", ) def test_repr_failed_one_attribute_not_equal(self): c = C('testfixtures.tests.test_comparison.AClass', x=1, y=(2, )) assert c != AClass(1, (3, )) compare_repr(c, "\n" "\n" "attributes same:\n" "['x']\n\n" "attributes differ:\n" "'y': (2,) (Comparison) != (3,) (actual)\n" "", ) def test_repr_failed_nested(self): left_side = [C(AClass, x=1, y=2), C(BClass, x=C(AClass, x=1, y=2), y=C(AClass))] right_side = [AClass(1, 3), AClass(1, 3)] # do the comparison left_side == right_side compare_repr( left_side, "[\n" "\n" "attributes same:\n" "['x']\n\n" "attributes differ:\n" "'y': 2 (Comparison) != 3 (actual)\n" ", \n" "\n" "x: \n" " \n" " x: 1\n" " y: 2\n" " \n" "y: \n" "]" ) compare_repr(right_side, "[, ]") def test_repr_failed_nested_failed(self): left_side = [C(AClass, x=1, y=2), C(BClass, x=C(AClass, x=1, partial=True), y=C(AClass, z=2))] right_side = [AClass(1, 2), BClass(AClass(1, 2), AClass(1, 2))] # do the comparison left_side == right_side compare_repr( left_side, "[\n" "\n" "x: 1\n" "y: 2\n" ", \n" "\n" "attributes same:\n" "['x']\n\n" "attributes differ:\n" "'y': \n" "\n" "attributes in Comparison but not actual:\n" "'z': 2\n\n" "attributes in actual but not Comparison:\n" "'x': 1\n" "'y': 2\n" " (Comparison) != (actual)\n" "]", ) compare_repr(right_side, '[, ]') def test_repr_failed_passed_failed(self): c = C('testfixtures.tests.test_comparison.AClass', x=1, y=2) assert c != AClass(1, 3) compare_repr(c, "\n" "\n" "attributes same:\n" "['x']\n\n" "attributes differ:\n" "'y': 2 (Comparison) != 3 (actual)\n" "", ) assert c == AClass(1, 2) assert c != AClass(3, 2) compare_repr(c, "\n" "\n" "attributes same:\n" "['y']\n\n" "attributes differ:\n" "'x': 1 (Comparison) != 3 (actual)\n" "", ) def test_first(self): self.assertEqual( C('testfixtures.tests.sample1.SampleClassA'), SampleClassA() ) def test_second(self): self.assertEqual( SampleClassA(), C('testfixtures.tests.sample1.SampleClassA'), ) def test_not_same_first(self): self.assertNotEqual( C('datetime'), SampleClassA() ) def test_not_same_second(self): self.assertNotEqual( SampleClassA(), C('datetime') ) def test_object_supplied(self): self.assertEqual( SampleClassA(1), C(SampleClassA(1)) ) def test_class_and_vars(self): self.assertEqual( SampleClassA(1), C(SampleClassA, {'args': (1,)}) ) def test_class_and_kw(self): self.assertEqual( SampleClassA(1), C(SampleClassA, args=(1,)) ) def test_class_and_vars_and_kw(self): self.assertEqual( AClass(1, 2), C(AClass, {'x': 1}, y=2) ) def test_object_and_vars(self): # vars passed are used instead of the object's self.assertEqual( SampleClassA(1), C(SampleClassA(), {'args': (1,)}) ) def test_object_and_kw(self): # kws passed are used instead of the object's self.assertEqual( SampleClassA(1), C(SampleClassA(), args=(1,)) ) def test_object_partial(self): # only attributes on comparison object # are used self.assertEqual( C(AClass(1), partial=True), AClass(1, 2), ) def run_property_equal_test(self, partial): class SomeClass(object): @property def prop(self): return 1 self.assertEqual( C(SomeClass, prop=1, partial=partial), SomeClass() ) def test_property_equal(self): self.run_property_equal_test(partial=False) def test_property_equal_partial(self): self.run_property_equal_test(partial=True) def run_property_not_equal_test(self, partial): class SomeClass(object): @property def prop(self): return 1 c = C(SomeClass, prop=2, partial=partial) self.assertNotEqual(c, SomeClass()) compare_repr( c, "\n" "\n" "attributes differ:\n" "'prop': 2 (Comparison) != 1 (actual)\n" "") def test_property_not_equal(self): self.run_property_not_equal_test(partial=False) def test_property_not_equal_partial(self): self.run_property_not_equal_test(partial=True) def run_method_equal_test(self, partial): class SomeClass(object): def method(self): pass # pragma: no cover instance = SomeClass() self.assertEqual( C(SomeClass, method=instance.method, partial=partial), instance ) def test_method_equal(self): self.run_method_equal_test(partial=False) def test_method_equal_partial(self): self.run_method_equal_test(partial=True) def run_method_not_equal_test(self, partial): class SomeClass(object): pass instance = SomeClass() instance.method = min c = C(SomeClass, method=max, partial=partial) self.assertNotEqual(c, instance) compare_repr( c, "\n" "\n" "attributes differ:\n" "'method': (Comparison)" " != (actual)\n" "" ) def test_method_not_equal(self): self.run_method_not_equal_test(partial=False) def test_method_not_equal_partial(self): self.run_method_not_equal_test(partial=True) def test_exception(self): self.assertEqual( ValueError('foo'), C(ValueError('foo')) ) def test_exception_class_and_args(self): self.assertEqual( ValueError('foo'), C(ValueError, args=('foo', )) ) def test_exception_instance_and_args(self): self.assertEqual( ValueError('foo'), C(ValueError('bar'), args=('foo', )) ) def test_exception_not_same(self): self.assertNotEqual( ValueError('foo'), C(ValueError('bar')) ) def test_exception_no_args_different(self): self.assertNotEqual( WeirdException(1, 2), C(WeirdException(1, 3)) ) def test_exception_no_args_same(self): self.assertEqual( C(WeirdException(1, 2)), WeirdException(1, 2) ) def test_repr_file_different(self): with TempDirectory() as d: path = d.write('file', b'stuff') f = open(path) f.close() if PY3: c = C('io.TextIOWrapper', name=path, mode='r', closed=False, partial=True) assert f != c compare_repr(c, "\n" "\n" "attributes same:\n" "['mode', 'name']\n\n" "attributes differ:\n" "'closed': False (Comparison) != True (actual)\n" "", ) else: c = C(file, name=path, mode='r', closed=False, partial=True) assert f != c compare_repr(c, "\n" "\n" "attributes same:\n" "['mode', 'name']\n\n" "attributes differ:\n" "'closed': False (Comparison) != True (actual)\n" "", ) def test_file_same(self): with TempDirectory() as d: path = d.write('file', b'stuff') f = open(path) f.close() if PY3: self.assertEqual( f, C('io.TextIOWrapper', name=path, mode='r', closed=True, partial=True) ) else: self.assertEqual( f, C(file, name=path, mode='r', closed=True, partial=True) ) def test_no___dict___strict(self): c = C(X, x=1) assert c != X() compare_repr(c, "\n" "\n" "attributes in Comparison but not actual:\n" "'x': 1\n" "") def test_no___dict___partial_same(self): x = X() x.x = 1 self.assertEqual(C(X, x=1, partial=True), x) def test_no___dict___partial_missing_attr(self): c = C(X, x=1, partial=True) assert c != X() compare_repr(c, "\n" "\n" "attributes in Comparison but not actual:\n" "'x': 1\n" "", ) def test_no___dict___partial_different(self): x = X() x.x = 2 c = C(X, x=1, y=2, partial=True) assert c != x compare_repr(c, "\n" "\n" "attributes in Comparison but not actual:\n" "'y': 2\n\n" "attributes differ:\n" "'x': 1 (Comparison) != 2 (actual)\n" "", ) def test_compared_object_defines_eq(self): # If an object defines eq, such as Django instances, # things become tricky class Annoying: def __init__(self): self.eq_called = 0 def __eq__(self, other): self.eq_called += 1 if isinstance(other, Annoying): return True return False self.assertEqual(Annoying(), Annoying()) # Suddenly, order matters. # This order is wrong, as it uses the class's __eq__: self.assertFalse(Annoying() == C(Annoying)) if PY2: # although this, which is subtly different, does not: self.assertFalse(Annoying() != C(Annoying)) else: # but on PY3 __eq__ is used as a fallback: self.assertTrue(Annoying() != C(Annoying)) # This is the right ordering: self.assertTrue(C(Annoying) == Annoying()) self.assertFalse(C(Annoying) != Annoying()) # When the ordering is right, you still get the useful # comparison representation afterwards c = C(Annoying, eq_called=1) c == Annoying() compare_repr( c, '\n\n' 'attributes differ:\n' "'eq_called': 1 (Comparison) != 0 (actual)\n" '' ) def test_importerror(self): assert C(ImportError('x')) == ImportError('x') def test_class_defines_comparison_strictly(self): self.assertEqual( C('testfixtures.tests.test_comparison.FussyDefineComparison', attr=1), FussyDefineComparison(1) ) def test_cant_resolve(self): try: C('testfixtures.bonkers') except Exception as e: self.failUnless(isinstance(e, AttributeError)) self.assertEqual( e.args, ("'testfixtures.bonkers' could not be resolved", ) ) else: self.fail('No exception raised!') def test_no_name(self): class NoName(object): pass NoName.__name__ = '' NoName.__module__ = '' c = C(NoName) if PY3: expected = ".NoName'>>" else: expected = ">" self.assertEqual(repr(c), expected) def test_missing_expected_attribute_strict(self): class MyClass(object): def __init__(self, **attrs): self.__dict__.update(attrs) c = Comparison(MyClass, b=2, c=3, strict=True) assert c != MyClass(a=1, b=2) def test_missing_expected_attribute_not_strict(self): class MyClass(object): def __init__(self, **attrs): self.__dict__.update(attrs) c = Comparison(MyClass, b=2, c=3, strict=False) assert c != MyClass(a=1, b=2) def test_extra_expected_attribute_strict(self): class MyClass(object): def __init__(self, **attrs): self.__dict__.update(attrs) c = Comparison(MyClass, a=1, strict=True) assert c != MyClass(a=1, b=2) def test_extra_expected_attribute_not_strict(self): class MyClass(object): def __init__(self, **attrs): self.__dict__.update(attrs) c = Comparison(MyClass, a=1, strict=False) assert c == MyClass(a=1, b=2) testfixtures-6.18.3/testfixtures/tests/test_components.py000066400000000000000000000017161412502526400241010ustar00rootroot00000000000000from testfixtures.mock import Mock, call from testfixtures import Replacer, compare from testfixtures.components import TestComponents from unittest import TestCase from warnings import catch_warnings class ComponentsTests(TestCase): def test_atexit(self): m = Mock() with Replacer() as r: r.replace('atexit.register', m.register) c = TestComponents() expected = [call.register(c.atexit)] compare(expected, m.mock_calls) with catch_warnings(record=True) as w: c.atexit() self.assertTrue(len(w), 1) compare(str(w[0].message), ( # pragma: no branch "TestComponents instances not uninstalled by shutdown!" )) c.uninstall() compare(expected, m.mock_calls) # check re-running has no ill effects c.atexit() compare(expected, m.mock_calls) testfixtures-6.18.3/testfixtures/tests/test_date.py000066400000000000000000000223201412502526400226230ustar00rootroot00000000000000from datetime import date as d, timedelta from time import strptime from testfixtures import ShouldRaise, test_date, replace, compare from testfixtures.tests import sample1, sample2 from unittest import TestCase class TestDate(TestCase): # NB: Only the today method is currently stubbed out, # if you need other methods, tests and patches # greatfully received! @replace('datetime.date', test_date()) def test_today(self): from datetime import date compare(date.today(), d(2001, 1, 1)) compare(date.today(), d(2001, 1, 2)) compare(date.today(), d(2001, 1, 4)) @replace('datetime.date', test_date(2001, 2, 3)) def test_today_supplied(self): from datetime import date compare(date.today(), d(2001, 2, 3)) @replace('datetime.date', test_date(year=2001, month=2, day=3)) def test_today_all_kw(self): from datetime import date compare(date.today(), d(2001, 2, 3)) @replace('datetime.date', test_date(None)) def test_today_sequence(self, t): t.add(2002, 1, 1) t.add(2002, 1, 2) t.add(2002, 1, 3) from datetime import date compare(date.today(), d(2002, 1, 1)) compare(date.today(), d(2002, 1, 2)) compare(date.today(), d(2002, 1, 3)) @replace('datetime.date', test_date(None)) def test_today_requested_longer_than_supplied(self, t): t.add(2002, 1, 1) t.add(2002, 1, 2) from datetime import date compare(date.today(), d(2002, 1, 1)) compare(date.today(), d(2002, 1, 2)) compare(date.today(), d(2002, 1, 3)) compare(date.today(), d(2002, 1, 5)) @replace('datetime.date', test_date(None)) def test_add_date_supplied(self): from datetime import date date.add(d(2001, 1, 2)) date.add(date(2001, 1, 3)) compare(date.today(), d(2001, 1, 2)) compare(date.today(), d(2001, 1, 3)) def test_instantiate_with_date(self): from datetime import date t = test_date(date(2002, 1, 1)) compare(t.today(), d(2002, 1, 1)) @replace('datetime.date', test_date(strict=True)) def test_call(self, t): compare(t(2002, 1, 2), d(2002, 1, 2)) from datetime import date dt = date(2003, 2, 1) self.failIf(dt.__class__ is d) compare(dt, d(2003, 2, 1)) def test_gotcha_import(self): # standard `replace` caveat, make sure you # patch all revelent places where date # has been imported: @replace('datetime.date', test_date()) def test_something(): from datetime import date compare(date.today(), d(2001, 1, 1)) compare(sample1.str_today_1(), '2001-01-02') with ShouldRaise(AssertionError) as s: test_something() # This convoluted check is because we can't stub # out the date, since we're testing stubbing out # the date ;-) j, dt1, j, dt2, j = s.raised.args[0].split("'") # check we can parse the date dt1 = strptime(dt1, '%Y-%m-%d') # check the dt2 bit was as it should be compare(dt2, '2001-01-02') # What you need to do is replace the imported type: @replace('testfixtures.tests.sample1.date', test_date()) def test_something(): compare(sample1.str_today_1(), '2001-01-01') test_something() def test_gotcha_import_and_obtain(self): # Another gotcha is where people have locally obtained # a class attributes, where the normal patching doesn't # work: @replace('testfixtures.tests.sample1.date', test_date()) def test_something(): compare(sample1.str_today_2(), '2001-01-01') with ShouldRaise(AssertionError) as s: test_something() # This convoluted check is because we can't stub # out the date, since we're testing stubbing out # the date ;-) j, dt1, j, dt2, j = s.raised.args[0].split("'") # check we can parse the date dt1 = strptime(dt1, '%Y-%m-%d') # check the dt2 bit was as it should be compare(dt2, '2001-01-01') # What you need to do is replace the imported name: @replace('testfixtures.tests.sample1.today', test_date().today) def test_something(): compare(sample1.str_today_2(), '2001-01-01') test_something() # if you have an embedded `today` as above, *and* you need to supply # a list of required dates, then it's often simplest just to # do a manual try-finally with a replacer: def test_import_and_obtain_with_lists(self): t = test_date(None) t.add(2002, 1, 1) t.add(2002, 1, 2) from testfixtures import Replacer r = Replacer() r.replace('testfixtures.tests.sample1.today', t.today) try: compare(sample1.str_today_2(), '2002-01-01') compare(sample1.str_today_2(), '2002-01-02') finally: r.restore() @replace('datetime.date', test_date()) def test_repr(self): from datetime import date compare(repr(date), "") @replace('datetime.date', test_date(delta=2)) def test_delta(self): from datetime import date compare(date.today(), d(2001, 1, 1)) compare(date.today(), d(2001, 1, 3)) compare(date.today(), d(2001, 1, 5)) @replace('datetime.date', test_date(delta_type='weeks')) def test_delta_type(self): from datetime import date compare(date.today(), d(2001, 1, 1)) compare(date.today(), d(2001, 1, 8)) compare(date.today(), d(2001, 1, 22)) @replace('datetime.date', test_date(None)) def test_set(self): from datetime import date date.set(2001, 1, 2) compare(date.today(), d(2001, 1, 2)) date.set(2002, 1, 1) compare(date.today(), d(2002, 1, 1)) compare(date.today(), d(2002, 1, 3)) @replace('datetime.date', test_date(None)) def test_set_date_supplied(self): from datetime import date date.set(d(2001, 1, 2)) compare(date.today(), d(2001, 1, 2)) date.set(date(2001, 1, 3)) compare(date.today(), d(2001, 1, 3)) @replace('datetime.date', test_date(None)) def test_set_kw(self): from datetime import date date.set(year=2001, month=1, day=2) compare(date.today(), d(2001, 1, 2)) @replace('datetime.date', test_date(None)) def test_add_kw(self, t): t.add(year=2002, month=1, day=1) from datetime import date compare(date.today(), d(2002, 1, 1)) @replace('datetime.date', test_date(strict=True)) def test_isinstance_strict_true(self): from datetime import date to_check = [] to_check.append(date(1999, 1, 1)) to_check.append(date.today()) date.set(2001, 1, 2) to_check.append(date.today()) date.add(2001, 1, 3) to_check.append(date.today()) to_check.append(date.today()) date.set(date(2001, 1, 4)) to_check.append(date.today()) date.add(date(2001, 1, 5)) to_check.append(date.today()) to_check.append(date.today()) date.set(d(2001, 1, 4)) to_check.append(date.today()) date.add(d(2001, 1, 5)) to_check.append(date.today()) to_check.append(date.today()) for inst in to_check: self.failUnless(isinstance(inst, date), inst) self.failUnless(inst.__class__ is date, inst) self.failUnless(isinstance(inst, d), inst) self.failIf(inst.__class__ is d, inst) @replace('datetime.date', test_date()) def test_isinstance_default(self): from datetime import date to_check = [] to_check.append(date(1999, 1, 1)) to_check.append(date.today()) date.set(2001, 1, 2) to_check.append(date.today()) date.add(2001, 1, 3) to_check.append(date.today()) to_check.append(date.today()) date.set(date(2001, 1, 4)) to_check.append(date.today()) date.add(date(2001, 1, 5)) to_check.append(date.today()) to_check.append(date.today()) date.set(d(2001, 1, 4)) to_check.append(date.today()) date.add(d(2001, 1, 5)) to_check.append(date.today()) to_check.append(date.today()) for inst in to_check: self.failIf(isinstance(inst, date), inst) self.failIf(inst.__class__ is date, inst) self.failUnless(isinstance(inst, d), inst) self.failUnless(inst.__class__ is d, inst) def test_tick_when_static(self): date = test_date(delta=0) compare(date.today(), expected=d(2001, 1, 1)) date.tick(days=1) compare(date.today(), expected=d(2001, 1, 2)) def test_tick_when_dynamic(self): # hopefully not that common? date = test_date() compare(date.today(), expected=date(2001, 1, 1)) date.tick(days=1) compare(date.today(), expected=date(2001, 1, 3)) def test_tick_with_timedelta_instance(self): date = test_date(delta=0) compare(date.today(), expected=d(2001, 1, 1)) date.tick(timedelta(days=1)) compare(date.today(), expected=d(2001, 1, 2)) testfixtures-6.18.3/testfixtures/tests/test_datetime.py000066400000000000000000000360061412502526400235100ustar00rootroot00000000000000from datetime import date from datetime import datetime as d from datetime import timedelta from datetime import tzinfo from testfixtures import test_datetime, test_date from testfixtures import replace, Replacer, compare, ShouldRaise from testfixtures.tests import sample1 from unittest import TestCase class SampleTZInfo(tzinfo): __test__ = False def utcoffset(self, dt): return timedelta(minutes=3) + self.dst(dt) def dst(self, dt): return timedelta(minutes=1) class SampleTZInfo2(tzinfo): __test__ = False def utcoffset(self, dt): return timedelta(minutes=5) def dst(self, dt): return timedelta(minutes=0) class TestDateTime(TestCase): @replace('datetime.datetime', test_datetime()) def test_now(self): from datetime import datetime compare(datetime.now(), d(2001, 1, 1, 0, 0, 0)) compare(datetime.now(), d(2001, 1, 1, 0, 0, 10)) compare(datetime.now(), d(2001, 1, 1, 0, 0, 30)) @replace('datetime.datetime', test_datetime()) def test_now_with_tz_supplied(self): from datetime import datetime info = SampleTZInfo() compare(datetime.now(info), d(2001, 1, 1, 0, 4, tzinfo=SampleTZInfo())) @replace('datetime.datetime', test_datetime(tzinfo=SampleTZInfo())) def test_now_with_tz_setup(self): from datetime import datetime compare(datetime.now(), d(2001, 1, 1)) @replace('datetime.datetime', test_datetime(tzinfo=SampleTZInfo())) def test_now_with_tz_setup_and_supplied(self): from datetime import datetime info = SampleTZInfo2() compare(datetime.now(info), d(2001, 1, 1, 0, 1, tzinfo=info)) @replace('datetime.datetime', test_datetime(tzinfo=SampleTZInfo())) def test_now_with_tz_setup_and_same_supplied(self): from datetime import datetime info = SampleTZInfo() compare(datetime.now(info), d(2001, 1, 1, tzinfo=info)) def test_now_with_tz_instance(self): dt = test_datetime(d(2001, 1, 1, tzinfo=SampleTZInfo())) compare(dt.now(), d(2001, 1, 1)) def test_now_with_tz_instance_and_supplied(self): dt = test_datetime(d(2001, 1, 1, tzinfo=SampleTZInfo())) info = SampleTZInfo2() compare(dt.now(info), d(2001, 1, 1, 0, 1, tzinfo=info)) def test_now_with_tz_instance_and_same_supplied(self): dt = test_datetime(d(2001, 1, 1, tzinfo=SampleTZInfo())) info = SampleTZInfo() compare(dt.now(info), d(2001, 1, 1, tzinfo=info)) @replace('datetime.datetime', test_datetime(2002, 1, 1, 1, 2, 3)) def test_now_supplied(self): from datetime import datetime compare(datetime.now(), d(2002, 1, 1, 1, 2, 3)) @replace('datetime.datetime', test_datetime(None)) def test_now_sequence(self, t): t.add(2002, 1, 1, 1, 0, 0) t.add(2002, 1, 1, 2, 0, 0) t.add(2002, 1, 1, 3, 0, 0) from datetime import datetime compare(datetime.now(), d(2002, 1, 1, 1, 0, 0)) compare(datetime.now(), d(2002, 1, 1, 2, 0, 0)) compare(datetime.now(), d(2002, 1, 1, 3, 0, 0)) @replace('datetime.datetime', test_datetime()) def test_add_and_set(self, t): t.add(2002, 1, 1, 1, 0, 0) t.add(2002, 1, 1, 2, 0, 0) t.set(2002, 1, 1, 3, 0, 0) from datetime import datetime compare(datetime.now(), d(2002, 1, 1, 3, 0, 0)) compare(datetime.now(), d(2002, 1, 1, 3, 0, 10)) compare(datetime.now(), d(2002, 1, 1, 3, 0, 30)) @replace('datetime.datetime', test_datetime(None)) def test_add_datetime_supplied(self, t): from datetime import datetime t.add(d(2002, 1, 1, 1)) t.add(datetime(2002, 1, 1, 2)) compare(datetime.now(), d(2002, 1, 1, 1, 0, 0)) compare(datetime.now(), d(2002, 1, 1, 2, 0, 0)) tzinfo = SampleTZInfo() tzrepr = repr(tzinfo) with ShouldRaise(ValueError( 'Cannot add datetime with tzinfo of %s as configured to use None' %( tzrepr ))): t.add(d(2001, 1, 1, tzinfo=tzinfo)) def test_instantiate_with_datetime(self): from datetime import datetime t = test_datetime(datetime(2002, 1, 1, 1)) compare(t.now(), d(2002, 1, 1, 1, 0, 0)) @replace('datetime.datetime', test_datetime(None)) def test_now_requested_longer_than_supplied(self, t): t.add(2002, 1, 1, 1, 0, 0) t.add(2002, 1, 1, 2, 0, 0) from datetime import datetime compare(datetime.now(), d(2002, 1, 1, 1, 0, 0)) compare(datetime.now(), d(2002, 1, 1, 2, 0, 0)) compare(datetime.now(), d(2002, 1, 1, 2, 0, 10)) compare(datetime.now(), d(2002, 1, 1, 2, 0, 30)) @replace('datetime.datetime', test_datetime(strict=True)) def test_call(self, t): compare(t(2002, 1, 2, 3, 4, 5), d(2002, 1, 2, 3, 4, 5)) from datetime import datetime dt = datetime(2001, 1, 1, 1, 0, 0) self.failIf(dt.__class__ is d) compare(dt, d(2001, 1, 1, 1, 0, 0)) def test_date_return_type(self): with Replacer() as r: r.replace('datetime.datetime', test_datetime()) from datetime import datetime dt = datetime(2001, 1, 1, 1, 0, 0) d = dt.date() compare(d, date(2001, 1, 1)) self.failUnless(d.__class__ is date) def test_date_return_type_picky(self): # type checking is a bitch :-/ date_type = test_date(strict=True) with Replacer() as r: r.replace('datetime.datetime', test_datetime(date_type=date_type, strict=True, )) from datetime import datetime dt = datetime(2010, 8, 26, 14, 33, 13) d = dt.date() compare(d, date_type(2010, 8, 26)) self.failUnless(d.__class__ is date_type) # if you have an embedded `now` as above, *and* you need to supply # a list of required datetimes, then it's often simplest just to # do a manual try-finally with a replacer: def test_import_and_obtain_with_lists(self): t = test_datetime(None) t.add(2002, 1, 1, 1, 0, 0) t.add(2002, 1, 1, 2, 0, 0) from testfixtures import Replacer r = Replacer() r.replace('testfixtures.tests.sample1.now', t.now) try: compare(sample1.str_now_2(), '2002-01-01 01:00:00') compare(sample1.str_now_2(), '2002-01-01 02:00:00') finally: r.restore() @replace('datetime.datetime', test_datetime()) def test_repr(self): from datetime import datetime compare(repr(datetime), "") @replace('datetime.datetime', test_datetime(delta=1)) def test_delta(self): from datetime import datetime compare(datetime.now(), d(2001, 1, 1, 0, 0, 0)) compare(datetime.now(), d(2001, 1, 1, 0, 0, 1)) compare(datetime.now(), d(2001, 1, 1, 0, 0, 2)) @replace('datetime.datetime', test_datetime(delta_type='minutes')) def test_delta_type(self): from datetime import datetime compare(datetime.now(), d(2001, 1, 1, 0, 0, 0)) compare(datetime.now(), d(2001, 1, 1, 0, 10, 0)) compare(datetime.now(), d(2001, 1, 1, 0, 30, 0)) @replace('datetime.datetime', test_datetime(None)) def test_set(self): from datetime import datetime datetime.set(2001, 1, 1, 1, 0, 1) compare(datetime.now(), d(2001, 1, 1, 1, 0, 1)) datetime.set(2002, 1, 1, 1, 0, 0) compare(datetime.now(), d(2002, 1, 1, 1, 0, 0)) compare(datetime.now(), d(2002, 1, 1, 1, 0, 20)) @replace('datetime.datetime', test_datetime(None)) def test_set_datetime_supplied(self, t): from datetime import datetime t.set(d(2002, 1, 1, 1)) compare(datetime.now(), d(2002, 1, 1, 1, 0, 0)) t.set(datetime(2002, 1, 1, 2)) compare(datetime.now(), d(2002, 1, 1, 2, 0, 0)) tzinfo = SampleTZInfo() tzrepr = repr(tzinfo) with ShouldRaise(ValueError( 'Cannot add datetime with tzinfo of %s as configured to use None' %( tzrepr ))): t.set(d(2001, 1, 1, tzinfo=tzinfo)) @replace('datetime.datetime', test_datetime(None, tzinfo=SampleTZInfo())) def test_set_tz_setup(self): from datetime import datetime datetime.set(year=2002, month=1, day=1) compare(datetime.now(), d(2002, 1, 1)) @replace('datetime.datetime', test_datetime(None)) def test_set_kw(self): from datetime import datetime datetime.set(year=2002, month=1, day=1) compare(datetime.now(), d(2002, 1, 1)) @replace('datetime.datetime', test_datetime(None)) def test_set_tzinfo_kw(self): from datetime import datetime with ShouldRaise(TypeError('Cannot add using tzinfo on tdatetime')): datetime.set(year=2002, month=1, day=1, tzinfo=SampleTZInfo()) @replace('datetime.datetime', test_datetime(None)) def test_set_tzinfo_args(self): from datetime import datetime with ShouldRaise(TypeError('Cannot add using tzinfo on tdatetime')): datetime.set(2002, 1, 2, 3, 4, 5, 6, SampleTZInfo()) @replace('datetime.datetime', test_datetime(None)) def test_add_kw(self, t): from datetime import datetime t.add(year=2002, day=1, month=1) compare(datetime.now(), d(2002, 1, 1)) @replace('datetime.datetime', test_datetime(None)) def test_add_tzinfo_kw(self, t): from datetime import datetime with ShouldRaise(TypeError('Cannot add using tzinfo on tdatetime')): datetime.add(year=2002, month=1, day=1, tzinfo=SampleTZInfo()) @replace('datetime.datetime', test_datetime(None)) def test_add_tzinfo_args(self, t): from datetime import datetime with ShouldRaise(TypeError('Cannot add using tzinfo on tdatetime')): datetime.add(2002, 1, 2, 3, 4, 5, 6, SampleTZInfo()) @replace('datetime.datetime', test_datetime(2001, 1, 2, 3, 4, 5, 6, SampleTZInfo())) def test_max_number_args(self): from datetime import datetime compare(datetime.now(), d(2001, 1, 2, 3, 4, 5, 6)) @replace('datetime.datetime', test_datetime(2001, 1, 2)) def test_min_number_args(self): from datetime import datetime compare(datetime.now(), d(2001, 1, 2)) @replace('datetime.datetime', test_datetime( year=2001, month=1, day=2, hour=3, minute=4, second=5, microsecond=6, tzinfo=SampleTZInfo() )) def test_all_kw(self): from datetime import datetime compare(datetime.now(), d(2001, 1, 2, 3, 4, 5, 6)) @replace('datetime.datetime', test_datetime(2001, 1, 2)) def test_utc_now(self): from datetime import datetime compare(datetime.utcnow(), d(2001, 1, 2)) @replace('datetime.datetime', test_datetime(2001, 1, 2, tzinfo=SampleTZInfo())) def test_utc_now_with_tz(self): from datetime import datetime compare(datetime.utcnow(), d(2001, 1, 1, 23, 56)) @replace('datetime.datetime', test_datetime(strict=True)) def test_isinstance_strict(self): from datetime import datetime to_check = [] to_check.append(datetime(1999, 1, 1)) to_check.append(datetime.now()) to_check.append(datetime.now(SampleTZInfo())) to_check.append(datetime.utcnow()) datetime.set(2001, 1, 1, 20) to_check.append(datetime.now()) datetime.add(2001, 1, 1, 21) to_check.append(datetime.now()) to_check.append(datetime.now()) datetime.set(datetime(2001, 1, 1, 22)) to_check.append(datetime.now()) to_check.append(datetime.now(SampleTZInfo())) datetime.add(datetime(2001, 1, 1, 23)) to_check.append(datetime.now()) to_check.append(datetime.now()) to_check.append(datetime.now(SampleTZInfo())) datetime.set(d(2001, 1, 1, 22)) to_check.append(datetime.now()) datetime.add(d(2001, 1, 1, 23)) to_check.append(datetime.now()) to_check.append(datetime.now()) to_check.append(datetime.now(SampleTZInfo())) for inst in to_check: self.failUnless(isinstance(inst, datetime), inst) self.failUnless(inst.__class__ is datetime, inst) self.failUnless(isinstance(inst, d), inst) self.failIf(inst.__class__ is d, inst) @replace('datetime.datetime', test_datetime()) def test_isinstance_default(self): from datetime import datetime to_check = [] to_check.append(datetime(1999, 1, 1)) to_check.append(datetime.now()) to_check.append(datetime.now(SampleTZInfo())) to_check.append(datetime.utcnow()) datetime.set(2001, 1, 1, 20) to_check.append(datetime.now()) datetime.add(2001, 1, 1, 21) to_check.append(datetime.now()) to_check.append(datetime.now(SampleTZInfo())) datetime.set(datetime(2001, 1, 1, 22)) to_check.append(datetime.now()) datetime.add(datetime(2001, 1, 1, 23)) to_check.append(datetime.now()) to_check.append(datetime.now()) to_check.append(datetime.now(SampleTZInfo())) datetime.set(d(2001, 1, 1, 22)) to_check.append(datetime.now()) datetime.add(d(2001, 1, 1, 23)) to_check.append(datetime.now()) to_check.append(datetime.now()) to_check.append(datetime.now(SampleTZInfo())) for inst in to_check: self.failIf(isinstance(inst, datetime), inst) self.failIf(inst.__class__ is datetime, inst) self.failUnless(isinstance(inst, d), inst) self.failUnless(inst.__class__ is d, inst) def test_subsecond_deltas(self): datetime = test_datetime(delta=0.5) compare(datetime.now(), datetime(2001, 1, 1, 0, 0, 0, 0)) compare(datetime.now(), datetime(2001, 1, 1, 0, 0, 0, 500000)) compare(datetime.now(), datetime(2001, 1, 1, 0, 0, 1, 0)) def test_ms_delta(self): datetime = test_datetime(delta=100, delta_type='microseconds') compare(datetime.now(), datetime(2001, 1, 1, 0, 0, 0, 0)) compare(datetime.now(), datetime(2001, 1, 1, 0, 0, 0, 100)) compare(datetime.now(), datetime(2001, 1, 1, 0, 0, 0, 200)) def test_tick_when_static(self): datetime = test_datetime(delta=0) compare(datetime.now(), expected=d(2001, 1, 1)) datetime.tick(hours=1) compare(datetime.now(), expected=d(2001, 1, 1, 1)) def test_tick_when_dynamic(self): # hopefully not that common? datetime = test_datetime() compare(datetime.now(), expected=d(2001, 1, 1)) datetime.tick(hours=1) compare(datetime.now(), expected=d(2001, 1, 1, 1, 0, 10)) def test_tick_with_timedelta_instance(self): datetime = test_datetime(delta=0) compare(datetime.now(), expected=d(2001, 1, 1)) datetime.tick(timedelta(hours=1)) compare(datetime.now(), expected=d(2001, 1, 1, 1)) testfixtures-6.18.3/testfixtures/tests/test_diff.py000066400000000000000000000023341412502526400226210ustar00rootroot00000000000000from unittest import TestCase from testfixtures import diff class TestDiff(TestCase): def test_example(self): actual = diff(''' line1 line2 line3 ''', ''' line1 line changed line3 ''') expected = '''\ --- first +++ second @@ -1,5 +1,5 @@ line1 - line2 + line changed line3 ''' self.assertEqual( [line.strip() for line in expected.split("\n")], [line.strip() for line in actual.split("\n")], '\n%r\n!=\n%r' % (expected, actual) ) def test_no_newlines(self): actual = diff('x', 'y') # no rhyme or reason as to which of these comes back :-/ try: expected = '@@ -1 +1 @@\n-x\n+y' self.assertEqual( expected, actual, '\n%r\n!=\n%r' % (expected, actual) ) except AssertionError: # pragma: no cover expected = '--- first\n+++ second\n@@ -1 +1 @@\n-x\n+y' self.assertEqual( expected, actual, '\n%r\n!=\n%r' % (expected, actual) ) testfixtures-6.18.3/testfixtures/tests/test_django/000077500000000000000000000000001412502526400225775ustar00rootroot00000000000000testfixtures-6.18.3/testfixtures/tests/test_django/__init__.py000066400000000000000000000000001412502526400246760ustar00rootroot00000000000000testfixtures-6.18.3/testfixtures/tests/test_django/manage.py000066400000000000000000000004211412502526400243760ustar00rootroot00000000000000import os def main(): os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testfixtures.tests.test_django.settings") from django.core.management import execute_from_command_line execute_from_command_line() if __name__ == "__main__": # pragma: no cover main() testfixtures-6.18.3/testfixtures/tests/test_django/models.py000066400000000000000000000003721412502526400244360ustar00rootroot00000000000000from django.db import models class OtherModel(models.Model): pass class SampleModel(models.Model): value = models.IntegerField() not_editable = models.IntegerField(editable=False) created = models.DateTimeField(auto_now_add=True) testfixtures-6.18.3/testfixtures/tests/test_django/settings.py000066400000000000000000000004051412502526400250100ustar00rootroot00000000000000SECRET_KEY = 'fake-key' INSTALLED_APPS = [ 'django.contrib.auth', 'django.contrib.contenttypes', "testfixtures.tests.test_django", ] DATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3'}} DEFAULT_AUTO_FIELD='django.db.models.AutoField' testfixtures-6.18.3/testfixtures/tests/test_django/test_compare.py000066400000000000000000000064661412502526400256520ustar00rootroot00000000000000from unittest import TestCase import pytest from django.contrib.auth.models import User from testfixtures import OutputCapture, Replacer from testfixtures.compat import PY3 from .models import SampleModel from testfixtures.tests.test_django.manage import main from ..test_compare import CompareHelper from ... import compare from ...django import compare as django_compare class CompareTests(CompareHelper, TestCase): def test_simple_same(self): django_compare(SampleModel(id=1), SampleModel(id=1)) def test_simple_diff(self): if PY3: expected = "'id': 1 != 2" else: expected = "u'id': 1 != 2" self.check_raises( SampleModel(id=1), SampleModel(id=2), compare=django_compare, message=( 'SampleModel not as expected:\n' '\n' 'same:\n' "['value']\n" '\n' 'values differ:\n'+ expected ) ) def test_simple_ignore_fields(self): django_compare(SampleModel(id=1), SampleModel(id=1), ignore_fields=['id']) def test_ignored_because_speshul(self): django_compare(SampleModel(not_editable=1), SampleModel(not_editable=2)) def test_ignored_because_no_longer_speshul(self): if PY3: same = "['created', 'id', 'value']\n" else: same = "['created', u'id', 'value']\n" self.check_raises( SampleModel(not_editable=1), SampleModel(not_editable=2), compare=django_compare, message=( 'SampleModel not as expected:\n' '\n' 'same:\n'+ same+ '\n' 'values differ:\n' "'not_editable': 1 != 2" ), non_editable_fields=True ) def test_normal_compare_id_same(self): # other diffs ignored compare(SampleModel(id=1, value=1), SampleModel(id=1, value=2)) def test_normal_compare_id_diff(self): if PY3: expected = ( "'id': 3 != 4\n" "'value': 1 != 2" ) else: expected = ( "'value': 1 != 2\n" "u'id': 3 != 4" ) self.check_raises( SampleModel(id=3, value=1), SampleModel(id=4, value=2), compare=django_compare, message=( 'SampleModel not as expected:\n' '\n' 'values differ:\n'+ expected ) ) def test_manage(self): with OutputCapture() as output: with Replacer() as r: r.replace('os.environ.DJANGO_SETTINGS_MODULE', '', strict=False) r.replace('sys.argv', ['x', 'check']) main() output.compare('System check identified no issues (0 silenced).') @pytest.mark.django_db def test_many_to_many_same(self): user = User.objects.create(username='foo') django_compare(user, expected=User( username='foo', first_name='', last_name='', is_superuser=False ), ignore_fields=['id', 'date_joined']) testfixtures-6.18.3/testfixtures/tests/test_django/test_shouldraise.py000066400000000000000000000015411412502526400265330ustar00rootroot00000000000000from django.core.exceptions import ValidationError from testfixtures import ShouldRaise from testfixtures.compat import PY2 from testfixtures.shouldraise import ShouldAssert class TestShouldRaiseWithValidatorErrors(object): def test_as_expected(self): with ShouldRaise(ValidationError("d'oh")): raise ValidationError("d'oh") def test_not_as_expected(self): if PY2: message = ( 'ValidationError([u"d\'oh"]) (expected) != ' 'ValidationError([u\'nuts\']) (raised)' ) else: message = ( 'ValidationError(["d\'oh"]) (expected) != ' 'ValidationError([\'nuts\']) (raised)' ) with ShouldAssert(message): with ShouldRaise(ValidationError("d'oh")): raise ValidationError("nuts") testfixtures-6.18.3/testfixtures/tests/test_generator.py000066400000000000000000000007231412502526400236770ustar00rootroot00000000000000from unittest import TestCase from types import GeneratorType from testfixtures import generator class TestG(TestCase): def test_example(self): g = generator(1, 2, 3) self.failUnless(isinstance(g, GeneratorType)) self.assertEqual(tuple(g), (1, 2, 3)) def test_from_sequence(self): s = (1, 2, 3) g = generator(*s) self.failUnless(isinstance(g, GeneratorType)) self.assertEqual(tuple(g), (1, 2, 3)) testfixtures-6.18.3/testfixtures/tests/test_log_capture.py000066400000000000000000000163731412502526400242250ustar00rootroot00000000000000from __future__ import absolute_import from logging import getLogger, ERROR from unittest import TestCase from testfixtures.shouldraise import ShouldAssert from testfixtures.mock import patch from testfixtures import ( log_capture, compare, Comparison as C, LogCapture ) root = getLogger() one = getLogger('one') two = getLogger('two') child = getLogger('one.child') class TestLog_Capture(TestCase): @log_capture('two', 'one.child') @log_capture('one') @log_capture() def test_logging(self, l1, l2, l3): # we can now log as normal root.info('1') one.info('2') two.info('3') child.info('4') # and later check what was logged l1.check( ('root', 'INFO', '1'), ('one', 'INFO', '2'), ('two', 'INFO', '3'), ('one.child', 'INFO', '4'), ) l2.check( ('one', 'INFO', '2'), ('one.child', 'INFO', '4') ) l3.check( ('two', 'INFO', '3'), ('one.child', 'INFO', '4') ) # each logger also exposes the real # log records should anything else be neeeded compare(l3.records, [ C('logging.LogRecord'), C('logging.LogRecord'), ]) @log_capture(ensure_checks_above=ERROR) def test_simple_strict(self, l): root.error('during') l.check(("root", "ERROR", "during")) def test_uninstall_properly(self): root = getLogger() child = getLogger('child') before_root = root.handlers[:] before_child = child.handlers[:] try: old_root_level = root.level root.setLevel(49) old_child_level = child.level child.setLevel(69) @log_capture('child') @log_capture() def test_method(l1, l2): root = getLogger() root.info('1') self.assertEqual(root.level, 1) child = getLogger('child') self.assertEqual(child.level, 1) child.info('2') l1.check( ('root', 'INFO', '1'), ('child', 'INFO', '2'), ) l2.check( ('child', 'INFO', '2'), ) test_method() self.assertEqual(root.level, 49) self.assertEqual(child.level, 69) self.assertEqual(root.handlers, before_root) self.assertEqual(child.handlers, before_child) finally: root.setLevel(old_root_level) child.setLevel(old_child_level) @log_capture() def test_decorator_returns_logcapture(self, l): # check for what we get, so we only have to write # tests in test_logcapture.py self.failUnless(isinstance(l, LogCapture)) def test_remove_existing_handlers(self): logger = getLogger() # get original handlers original = logger.handlers try: # put in a stub which will blow up if used logger.handlers = start = [object()] @log_capture() def test_method(l): logger.info('during') l.check(('root', 'INFO', 'during')) test_method() compare(logger.handlers, start) finally: logger.handlers = original def test_clear_global_state(self): from logging import _handlers, _handlerList capture = LogCapture() capture.uninstall() self.assertFalse(capture in _handlers) self.assertFalse(capture in _handlerList) def test_no_propogate(self): logger = getLogger('child') # paranoid check compare(logger.propagate, True) @log_capture('child', propagate=False) def test_method(l): logger.info('a log message') l.check(('child', 'INFO', 'a log message')) with LogCapture() as global_log: test_method() global_log.check() compare(logger.propagate, True) def test_different_attributes(self): with LogCapture(attributes=('funcName', 'processName')) as log: getLogger().info('oh hai') log.check( ('test_different_attributes', 'MainProcess') ) def test_missing_attribute(self): with LogCapture(attributes=('msg', 'lolwut')) as log: getLogger().info('oh %s', 'hai') log.check( ('oh %s', None) ) def test_single_attribute(self): # one which isn't a string, to boot! with LogCapture(attributes=['msg']) as log: getLogger().info(dict(foo='bar', baz='bob')) log.check( dict(foo='bar', baz='bob'), ) def test_callable_instead_of_attribute(self): def extract_msg(record): return {k: v for (k, v) in record.msg.items() if k != 'baz'} with LogCapture(attributes=extract_msg) as log: getLogger().info(dict(foo='bar', baz='bob')) log.check( dict(foo='bar'), ) def test_msg_is_none(self): with LogCapture(attributes=('msg', 'foo')) as log: getLogger().info(None, extra=dict(foo='bar')) log.check( (None, 'bar') ) def test_normal_check(self): with LogCapture() as log: getLogger().info('oh hai') with ShouldAssert( "sequence not as expected:\n\n" "same:\n" "()\n\n" "expected:\n" "(('root', 'INFO', 'oh noez'),)\n\n" "actual:\n" "(('root', 'INFO', 'oh hai'),)" ): log.check(('root', 'INFO', 'oh noez')) def test_recursive_check(self): with LogCapture(recursive_check=True) as log: getLogger().info('oh hai') with ShouldAssert( "sequence not as expected:\n\n" "same:\n()\n\n" "expected:\n(('root', 'INFO', 'oh noez'),)\n\n" "actual:\n(('root', 'INFO', 'oh hai'),)\n\n" "While comparing [0]: sequence not as expected:\n\n" "same:\n('root', 'INFO')\n\n" "expected:\n" "('oh noez',)\n\n" "actual:\n" "('oh hai',)\n\n" "While comparing [0][2]: 'oh noez' (expected) != 'oh hai' (actual)" ): log.check(('root', 'INFO', 'oh noez')) @log_capture() @patch('testfixtures.tests.sample1.SampleClassA') def test_patch_then_log(self, a1, a2): actual = [type(c).__name__ for c in (a1, a2)] compare(actual, expected=['MagicMock', 'LogCaptureForDecorator']) @patch('testfixtures.tests.sample1.SampleClassA') @log_capture() def test_log_then_patch(self, a1, a2): actual = [type(c).__name__ for c in (a1, a2)] compare(actual, expected=['LogCaptureForDecorator', 'MagicMock']) class BaseCaptureTest(TestCase): a = 33 @log_capture() def test_logs_if_a_smaller_than_44(self, logs): logger = getLogger() if self.a < 44: logger.info('{} is smaller than 44'.format(self.a)) logs.check( ('root', 'INFO', '{} is smaller than 44'.format(self.a)), ) class SubclassCaptureTest(BaseCaptureTest): a = 2 testfixtures-6.18.3/testfixtures/tests/test_logcapture.py000066400000000000000000000467361412502526400240740ustar00rootroot00000000000000from __future__ import print_function from logging import getLogger, ERROR, Filter from textwrap import dedent from unittest import TestCase from warnings import catch_warnings from testfixtures.shouldraise import ShouldAssert from testfixtures.mock import Mock from testfixtures import Replacer, LogCapture, compare, Replace root = getLogger() one = getLogger('one') two = getLogger('two') child = getLogger('one.child') class DummyFilter(Filter): def filter(self, _): return True class TestLogCapture(TestCase): def test_simple(self): root.info('before') l = LogCapture() root.info('during') l.uninstall() root.info('after') assert str(l) == "root INFO\n during" def test_simple_strict(self): log_capture = LogCapture(ensure_checks_above=ERROR) root.error('during') log_capture.uninstall() with ShouldAssert("Not asserted ERROR log(s): [('root', 'ERROR', 'during')]"): log_capture.ensure_checked() def test_simple_strict_re_defaulted(self): with Replace('testfixtures.LogCapture.default_ensure_checks_above', ERROR): LogCapture.default_ensure_checks_above = ERROR log_capture = LogCapture() root.error('during') log_capture.uninstall() with ShouldAssert("Not asserted ERROR log(s): [('root', 'ERROR', 'during')]"): log_capture.ensure_checked() def test_simple_strict_asserted_by_check(self): log_capture = LogCapture(ensure_checks_above=ERROR) root.error('during') log_capture.uninstall() log_capture.check(("root", "ERROR", "during")) log_capture.ensure_checked() def test_simple_strict_asserted_by_check_present_ordered(self): log_capture = LogCapture(ensure_checks_above=ERROR) root.error('during') log_capture.uninstall() log_capture.check_present(("root", "ERROR", "during")) log_capture.ensure_checked() def test_simple_strict_asserted_by_check_present_unordered(self): log_capture = LogCapture(ensure_checks_above=ERROR) root.error('during') log_capture.uninstall() log_capture.check_present(("root", "ERROR", "during"), order_matters=False) log_capture.ensure_checked() def test_simple_strict_not_asserted_by_check_present(self): log_capture = LogCapture(ensure_checks_above=ERROR) root.error('before') root.error('during') log_capture.uninstall() log_capture.check_present(("root", "ERROR", "during")) with ShouldAssert("Not asserted ERROR log(s): [('root', 'ERROR', 'before')]"): log_capture.ensure_checked() def test_simple_strict_asserted_by_containment(self): log_capture = LogCapture(ensure_checks_above=ERROR) root.error('during') log_capture.uninstall() assert ("root", "ERROR", "during") in log_capture assert ("root", "INFO", "during") not in log_capture log_capture.ensure_checked() def test_simple_strict_asserted_by_mark_all_checked(self): log_capture = LogCapture(ensure_checks_above=ERROR) root.error('during') log_capture.uninstall() log_capture.mark_all_checked() log_capture.ensure_checked() def test_simple_strict_ctx(self): with ShouldAssert("Not asserted ERROR log(s): [('root', 'ERROR', 'during')]"): with LogCapture(ensure_checks_above=ERROR): root.error('during') def test_simple_strict_asserted_ctx(self): with LogCapture(ensure_checks_above=ERROR) as log_capture: root.error('during') log_capture.check(("root", "ERROR", "during")) def test_specific_logger(self): l = LogCapture('one') root.info('1') one.info('2') two.info('3') child.info('4') l.uninstall() assert str(l) == ( "one INFO\n 2\n" "one.child INFO\n 4" ) def test_multiple_loggers(self): l = LogCapture(('one.child','two')) root.info('1') one.info('2') two.info('3') child.info('4') l.uninstall() assert str(l) == ( "two INFO\n 3\n" "one.child INFO\n 4" ) def test_simple_manual_install(self): l = LogCapture(install=False) root.info('before') l.install() root.info('during') l.uninstall() root.info('after') assert str(l) == "root INFO\n during" def test_uninstall(self): # Lets start off with a couple of loggers: root = getLogger() child = getLogger('child') # Add a dummy filter so we can verify it is swapped out # during the capture, and swapped back in after `uninstall`. root.addFilter(DummyFilter()) # Lets also record the handlers for these loggers before # we start the test: before_root = root.handlers[:] before_child = child.handlers[:] # Lets also record the levels for the loggers: old_root_level=root.level old_child_level=child.level # Also record the filters: old_root_filters = root.filters[:] old_child_filters = child.filters[:] # Now the test: try: root.setLevel(49) child.setLevel(69) l1 = LogCapture() l2 = LogCapture('child') root = getLogger() root.info('1') child = getLogger('child') assert root.level == 1 assert child.level == 1 assert root.filters == [] assert child.filters == [] child.info('2') assert str(l1) == ( "root INFO\n 1\n" "child INFO\n 2" ) assert str(l2) == ( "child INFO\n 2" ) # Add a dummy filter to the child, # which should be removed by `uninstall`. child.addFilter(DummyFilter()) l2.uninstall() l1.uninstall() assert root.level == 49 assert child.level == 69 finally: root.setLevel(old_root_level) child.setLevel(old_child_level) # Now we check the handlers are as they were before # the test: assert root.handlers == before_root assert child.handlers == before_child # Also check the filters were restored: assert root.filters == old_root_filters assert child.filters == old_child_filters def test_uninstall_all(self): before_handlers_root = root.handlers[:] before_handlers_child = child.handlers[:] l1 = LogCapture() l2 = LogCapture('one.child') # We can see that the LogCaptures have changed the # handlers, removing existing ones and installing # their own: assert len(root.handlers) == 1 assert root.handlers != before_handlers_root assert len(child.handlers) == 1 assert child.handlers != before_handlers_child # Now we show the function in action: LogCapture.uninstall_all() # ...and we can see the handlers are back as # they were beefore: assert before_handlers_root == root.handlers assert before_handlers_child == child.handlers def test_two_logcaptures_on_same_logger(self): # If you create more than one LogCapture on a single # logger, the 2nd one installed will stop the first # one working! l1 = LogCapture() root.info('1st message') assert str(l1) == "root INFO\n 1st message" l2 = LogCapture() root.info('2nd message') # So, l1 missed this message: assert str(l1) == "root INFO\n 1st message" # ...because l2 kicked it out and recorded the message: assert str(l2) == "root INFO\n 2nd message" LogCapture.uninstall_all() def test_uninstall_more_than_once(self): # There's no problem with uninstalling a LogCapture # more than once: old_level = root.level try: root.setLevel(49) l = LogCapture() assert root.level == 1 l.uninstall() assert root.level == 49 root.setLevel(69) l.uninstall() assert root.level == 69 finally: root.setLevel(old_level) # And even when loggers have been uninstalled, there's # no problem having uninstall_all as a backstop: l.uninstall_all() def test_with_statement(self): root.info('before') with LogCapture() as l: root.info('during') root.info('after') assert str(l) == "root INFO\n during" class LogCaptureTests(TestCase): def test_remove_existing_handlers(self): logger = getLogger() # get original handlers original_handlers = logger.handlers # put in a stub which will blow up if used try: logger.handlers = start = [object()] with LogCapture() as l: logger.info('during') l.check(('root', 'INFO', 'during')) compare(logger.handlers, start) finally: # only executed if the test fails logger.handlers = original_handlers def test_atexit(self): # http://bugs.python.org/issue25532 from testfixtures.mock import call m = Mock() with Replacer() as r: # make sure the marker is false, other tests will # probably have set it r.replace('testfixtures.LogCapture.atexit_setup', False) r.replace('atexit.register', m.register) l = LogCapture() expected = [call.register(l.atexit)] compare(expected, m.mock_calls) with catch_warnings(record=True) as w: l.atexit() self.assertTrue(len(w), 1) compare(str(w[0].message), ( "LogCapture instances not uninstalled by shutdown, " "loggers captured:\n" "(None,)" )) l.uninstall() compare(set(), LogCapture.instances) # check re-running has no ill effects l.atexit() def test_numeric_log_level(self): with LogCapture() as log: getLogger().log(42, 'running in the family') log.check(('root', 'Level 42', 'running in the family')) def test_enable_disabled_logger(self): logger = getLogger('disabled') logger.disabled = True with LogCapture('disabled') as log: logger.info('a log message') log.check(('disabled', 'INFO', 'a log message')) compare(logger.disabled, True) def test_no_propogate(self): logger = getLogger('child') # paranoid check compare(logger.propagate, True) with LogCapture() as global_log: with LogCapture('child', propagate=False) as child_log: logger.info('a log message') child_log.check(('child', 'INFO', 'a log message')) global_log.check() compare(logger.propagate, True) class TestCheckPresent(object): def test_order_matters_ok(self): with LogCapture() as log: root.info('one') root.error('junk') root.warning('two') root.error('junk') root.error('three') log.check_present( ('root', 'INFO', 'one'), ('root', 'WARNING', 'two'), ('root', 'ERROR', 'three'), ) def test_order_matters_not_okay(self): with LogCapture() as log: root.error('junk') with ShouldAssert(dedent("""\ ignored: [('root', 'ERROR', 'junk')] same: [] expected: [('root', 'INFO', 'one')] actual: []""")): log.check_present( ('root', 'INFO', 'one'), ) def test_order_matters_not_okay_recursive(self): with LogCapture(recursive_check=True) as log: root.error('junk') with ShouldAssert(dedent("""\ same: [] expected: [('root', 'INFO', 'one')] actual: [('root', 'ERROR', 'junk')] While comparing [0]: sequence not as expected: same: ('root',) expected: ('INFO', 'one') actual: ('ERROR', 'junk') While comparing [0][1]: 'INFO' (expected) != 'ERROR' (actual)""")): log.check_present( ('root', 'INFO', 'one'), ) def test_order_matters_but_wrong(self): with LogCapture() as log: root.info('one') root.error('j1') root.error('three') root.warning('two') root.error('j2') with ShouldAssert(dedent("""\ ignored: [('root', 'ERROR', 'j1'), ('root', 'ERROR', 'three'), ('root', 'ERROR', 'j2')] same: [('root', 'INFO', 'one'), ('root', 'WARNING', 'two')] expected: [('root', 'ERROR', 'three')] actual: []""")): log.check_present( ('root', 'INFO', 'one'), ('root', 'WARNING', 'two'), ('root', 'ERROR', 'three'), ) def test_order_doesnt_matter_ok(self): with LogCapture() as log: root.info('one') root.error('junk') root.warning('two') root.error('junk') root.error('three') log.check_present( ('root', 'ERROR', 'three'), ('root', 'INFO', 'one'), ('root', 'WARNING', 'two'), order_matters=False ) def test_order_doesnt_matter_not_okay(self): with LogCapture() as log: root.error('junk') with ShouldAssert(dedent("""\ ignored: [('root', 'ERROR', 'junk')] in expected but not actual: [('root', 'INFO', 'one')]""")): log.check_present( ('root', 'INFO', 'one'), order_matters=False ) def test_single_item_ok(self): with LogCapture() as log: root.info('one') root.error('junk') root.warning('two') root.error('junk') root.error('three') log.check_present( ('root', 'WARNING', 'two'), ) def test_single_item_not_ok(self): with LogCapture(attributes=['getMessage']) as log: root.info('one') root.error('junk') root.error('three') with ShouldAssert(dedent("""\ ignored: ['one', 'junk', 'three'] same: [] expected: ['two'] actual: []""")): log.check_present('two') def test_bad_params(self): # not needed if we didn't have to support Python 2! with ShouldAssert('order_matters is the only keyword parameter'): LogCapture(install=False).check_present(foo='bar') def test_multiple_identical_expected_order_matters(self): with LogCapture() as log: root.info('one') root.info('one') root.error('junk') root.warning('two') root.error('junk') root.warning('two') log.check_present( ('root', 'INFO', 'one'), ('root', 'INFO', 'one'), ('root', 'WARNING', 'two'), ('root', 'WARNING', 'two'), ) def test_multiple_identical_expected_order_doesnt_matter_ok(self): with LogCapture() as log: root.info('one') root.warning('two') root.error('junk') root.warning('two') root.error('junk') root.info('one') log.check_present( ('root', 'INFO', 'one'), ('root', 'INFO', 'one'), ('root', 'WARNING', 'two'), ('root', 'WARNING', 'two'), order_matters=False ) def test_multiple_identical_expected_order_doesnt_matter_not_ok(self): with LogCapture() as log: root.error('junk') root.info('one') root.warning('two') root.error('junk') root.info('one') with ShouldAssert(dedent("""\ ignored: [('root', 'ERROR', 'junk'), ('root', 'ERROR', 'junk')] same: [('root', 'INFO', 'one'), ('root', 'INFO', 'one'), ('root', 'WARNING', 'two')] in expected but not actual: [('root', 'WARNING', 'two')]""")): log.check_present( ('root', 'INFO', 'one'), ('root', 'INFO', 'one'), ('root', 'WARNING', 'two'), ('root', 'WARNING', 'two'), order_matters=False ) def test_entries_are_dictionaries(self): def extract(record): return {'level': record.levelname, 'message': record.getMessage()} with LogCapture(attributes=extract) as log: root.info('one') root.error('junk') root.warning('two') root.error('junk') root.info('one') log.check_present( {'level': 'INFO', 'message': 'one'}, {'level': 'INFO', 'message': 'one'}, {'level': 'WARNING', 'message': 'two'}, order_matters=False ) def test_almost_same_order_matters(self): with LogCapture() as log: root.info('one') root.error('junk') root.warning('two') root.error('junk') with ShouldAssert(dedent("""\ ignored: [('root', 'ERROR', 'junk'), ('root', 'ERROR', 'junk')] same: [('root', 'INFO', 'one'), ('root', 'WARNING', 'two')] expected: [('root', 'ERROR', 'three')] actual: []""")): log.check_present( ('root', 'INFO', 'one'), ('root', 'WARNING', 'two'), ('root', 'ERROR', 'three'), ) def test_almost_same_order_doesnt_matter(self): with LogCapture() as log: root.info('one') root.error('junk') root.error('three') root.error('junk') with ShouldAssert(dedent("""\ ignored: [('root', 'ERROR', 'junk'), ('root', 'ERROR', 'junk')] same: [('root', 'ERROR', 'three'), ('root', 'INFO', 'one')] in expected but not actual: [('root', 'WARNING', 'two')]""")): log.check_present( ('root', 'ERROR', 'three'), ('root', 'INFO', 'one'), ('root', 'WARNING', 'two'), order_matters=False ) testfixtures-6.18.3/testfixtures/tests/test_mappingcomparison.py000066400000000000000000000236151412502526400254440ustar00rootroot00000000000000from collections import OrderedDict from textwrap import dedent from testfixtures import MappingComparison, ShouldRaise, compare from testfixtures.compat import PY2, PY3 def check_repr(obj, expected): compare(repr(obj), expected=dedent(expected).rstrip('\n')) class TestMappingComparison(object): def test_repr(self): m = MappingComparison({'a': 1}, b=2) check_repr(m, "'a': 1, 'b': 2") def test_repr_ordered(self): m = MappingComparison((('b', 3), ('a', 1)), ordered=True) check_repr(m, "'b': 3, 'a': 1") def test_repr_long(self): m = MappingComparison({1: 'a', 2: 'b'*60}) compare(repr(m)[:65], expected="\n\n1: 'a',\n2: 'bb") def test_repr_after_equal(self): m = MappingComparison({'a': 1}) assert m == {'a': 1} check_repr(m, "'a': 1") def test_equal_mapping(self): m = MappingComparison({'a': 1}) assert m == {'a': 1} def test_equal_sequence(self): m = MappingComparison(('a', 1), ('b', 2)) assert m == {'a': 1, 'b': 2} def test_equal_items(self): m = MappingComparison(a=1) assert m == {'a': 1} def test_equal_both(self): m = MappingComparison({'a': 1, 'b': 2}, b=3) assert m == {'a': 1, 'b': 3} def test_equal_items_ordered(self): if PY2: with ShouldRaise(TypeError('order undefined on Python 2')): MappingComparison(b=3, a=1, ordered=True) else: m = MappingComparison(b=3, a=1, ordered=True) assert m == {'b': 3, 'a': 1} def test_equal_ordered_and_dict_supplied(self): if PY2: with ShouldRaise(TypeError('dict order undefined on Python 2')): MappingComparison({'b': 3, 'a': 1}, ordered=True) else: m = MappingComparison({'b': 3, 'a': 1}, ordered=True) assert m == {'b': 3, 'a': 1} def test_equal_ordered_dict_sequence_expected(self): m = MappingComparison((('a', 1), ('b', 3)), ordered=True) assert m == OrderedDict((('a', 1), ('b', 3))) def test_equal_ordered_dict_ordered_dict_expected(self): m = MappingComparison(OrderedDict((('a', 1), ('b', 3))), ordered=True) assert m == OrderedDict((('a', 1), ('b', 3))) def test_equal_partial(self): m = MappingComparison({'a': 1}, partial=True) assert m == {'a': 1, 'b': 2} def test_equal_partial_ordered(self): m = MappingComparison((('a', 1), ('b', 3)), ordered=True, partial=True) assert m == OrderedDict((('a', 1), ('c', 2), ('b', 3))) def test_unequal_wrong_type(self): m = MappingComparison({'a': 1}) assert m != [] compare(repr(m), expected="bad type") def test_unequal_not_partial(self): m = MappingComparison({'a': 1, 'b': 2}) assert m != {'a': 1, 'b': 2, 'c': 3} check_repr(m, expected=''' same: ['a', 'b'] in actual but not expected: 'c': 3 ''') def test_unequal_keys_and_values(self): m = MappingComparison({'a': 1, 'b': 2, 'c': 3}) assert m != {'a': 1, 'c': 4, 'd': 5} check_repr(m, expected=''' same: ['a'] in expected but not actual: 'b': 2 in actual but not expected: 'd': 5 values differ: 'c': 3 (expected) != 4 (actual) ''') def test_unequal_order(self): m = MappingComparison((('b', 3), ('a', 1)), ordered=True) assert m != OrderedDict((('a', 1), ('b', 3))) check_repr(m, expected=''' wrong key order: same: [] expected: ['b', 'a'] actual: ['a', 'b'] ''') def test_unequal_order_recursive(self): m = MappingComparison(((('b', 'x'), 3), (('b', 'y'), 1)), ordered=True, recursive=True) assert m != OrderedDict(((('b', 'y'), 1), (('b', 'x'), 3))) check_repr(m, expected=''' wrong key order: same: [] expected: [('b', 'x'), ('b', 'y')] actual: [('b', 'y'), ('b', 'x')] While comparing [0]: sequence not as expected: same: ('b',) expected: ('x',) actual: ('y',) While comparing [0][1]: 'x' (expected) != 'y' (actual) ''') def test_unequal_order_wrong_py3(self): if PY3: m = MappingComparison(b=3, a=1, ordered=True) assert m != {'a': 1, 'b': 3} check_repr(m, expected=''' wrong key order: same: [] expected: ['b', 'a'] actual: ['a', 'b'] ''') def test_unequal_partial_keys_missing(self): m = MappingComparison({'a': 1, 'b': 2}, partial=True) assert m != {'a': 1} check_repr(m, expected=''' same: ['a'] in expected but not actual: 'b': 2 ''') def test_unequal_partial_values_wrong(self): m = MappingComparison({'a': 1, 'b': 2}, partial=True) assert m != {'a': 1, 'b': 3} check_repr(m, expected=''' same: ['a'] values differ: 'b': 2 (expected) != 3 (actual) ''') def test_unequal_partial_ordered(self): m = MappingComparison((('b', 3), ('a', 1)), partial=True, ordered=True) assert m != OrderedDict((('a', 1), ('b', 3))) check_repr(m, expected=''' wrong key order: same: [] expected: ['b', 'a'] actual: ['a', 'b'] ''') def test_unequal_partial_ordered_some_ignored(self): m = MappingComparison((('b', 3), ('c', 1), ('a', 1)), partial=True, ordered=True) assert m != OrderedDict((('b', 3), ('d', 4), ('a', 1), ('c', 1), )) check_repr(m, expected=''' ignored: ['d'] wrong key order: same: ['b'] expected: ['c', 'a'] actual: ['a', 'c'] ''') def test_unequal_recursive(self): m = MappingComparison({'a': 1, 'b': {'c': 2}}, recursive=True) assert m != {'a': 1, 'b': {'c': 3}} check_repr(m, expected=''' same: ['a'] values differ: 'b': {'c': 2} (expected) != {'c': 3} (actual) While comparing ['b']: dict not as expected: values differ: 'c': 2 (expected) != 3 (actual) ''') def test_everything_wrong(self): m = MappingComparison((('a', 1), ('b', 2), ('c', 3)), ordered=True, partial=True, recursive=True) assert m != OrderedDict((('b', 2), ('a', 1), ('d', 4))) check_repr(m, expected=''' ignored: ['d'] same: ['a', 'b'] in expected but not actual: 'c': 3 wrong key order: same: [] expected: ['a', 'b', 'c'] actual: ['b', 'a'] While comparing [0]: 'a' (expected) != 'b' (actual) ''') def test_partial_nothing_specified(self): m = MappingComparison(partial=True) assert m == {} def test_partial_nothing_specified_wrong_type(self): m = MappingComparison(partial=True) assert m != [] check_repr(m, 'bad type') def test_boolean_return(self): m = MappingComparison({'k': 'v'}) result = m != {'k': 'v'} assert isinstance(result, bool) testfixtures-6.18.3/testfixtures/tests/test_mock.py000066400000000000000000000043051412502526400226420ustar00rootroot00000000000000from testfixtures.mock import Mock, call, ANY from .test_compare import CompareHelper class TestCall(CompareHelper): def test_non_root_call_not_equal(self): self.check_raises( call.foo().bar(), call.baz().bar(), '\n' "'call.foo().bar()'\n" '!=\n' "'call.baz().bar()'" ) def test_non_root_attr_not_equal(self): self.check_raises( call.foo.bar(), call.baz.bar(), '\n' "'call.foo.bar()'\n" '!=\n' "'call.baz.bar()'" ) def test_non_root_params_not_equal(self): self.check_raises( call.foo(x=1).bar(), call.foo(x=2).bar(), '\n' "'call.foo(x=1)'\n" '!=\n' "'call.foo(x=2)'" ) def test_any(self): assert call == ANY def test_no_len(self): assert not call == object() def test_two_elements(self): m = Mock() m(x=1) assert m.call_args == ((), {'x': 1}) def test_other_empty(self): assert call == () def test_other_single(self): assert call == ((),) assert call == ({},) assert call == ('',) def test_other_double(self): assert call == ('', (),) assert call == ('', {},) def test_other_quad(self): assert not call == (1, 2, 3, 4) class TestMock(CompareHelper): def test_non_root_call_not_equal(self): m = Mock() m.foo().bar() self.check_raises( m.mock_calls[-1], call.baz().bar(), '\n' "'call.foo().bar()'\n" '!=\n' "'call.baz().bar()'" ) def test_non_root_attr_not_equal(self): m = Mock() m.foo.bar() self.check_raises( m.mock_calls[-1], call.baz.bar(), '\n' "'call.foo.bar()'\n" '!=\n' "'call.baz.bar()'" ) def test_non_root_params_not_equal(self): m = Mock() m.foo(x=1).bar() # surprising and annoying (and practically unsolvable :-/): assert m.mock_calls[-1] == call.foo(y=2).bar() testfixtures-6.18.3/testfixtures/tests/test_outputcapture.py000066400000000000000000000104561412502526400246410ustar00rootroot00000000000000from __future__ import print_function import sys from subprocess import call from unittest import TestCase from testfixtures import OutputCapture, compare from .test_compare import CompareHelper class TestOutputCapture(CompareHelper, TestCase): def test_compare_strips(self): with OutputCapture() as o: print(' Bar! ') o.compare('Bar!') def test_compare_doesnt_strip(self): with OutputCapture(strip_whitespace=False) as o: print(' Bar! ') self.check_raises( '\tBar!', compare=o.compare, message="'\\tBar!' (expected) != ' Bar! \\n' (actual)", ) def test_stdout_and_stderr(self): with OutputCapture() as o: print('hello', file=sys.stdout) print('out', file=sys.stderr) print('there', file=sys.stdout) print('now', file=sys.stderr) o.compare("hello\nout\nthere\nnow\n") def test_unicode(self): with OutputCapture() as o: print(u'\u65e5', file=sys.stdout) o.compare(u'\u65e5\n') def test_separate_capture(self): with OutputCapture(separate=True) as o: print('hello', file=sys.stdout) print('out', file=sys.stderr) print('there', file=sys.stdout) print('now', file=sys.stderr) o.compare(stdout="hello\nthere\n", stderr="out\nnow\n") def test_compare_both_at_once(self): with OutputCapture(separate=True) as o: print('hello', file=sys.stdout) print('out', file=sys.stderr) self.check_raises( stdout="out\n", stderr="hello\n", compare=o.compare, message=( 'dict not as expected:\n' '\n' 'values differ:\n' "'stderr': 'hello' (expected) != 'out' (actual)\n" "'stdout': 'out' (expected) != 'hello' (actual)\n" '\n' "While comparing ['stderr']: 'hello' (expected) != 'out' (actual)\n" '\n' "While comparing ['stdout']: 'out' (expected) != 'hello' (actual)" ), ) def test_original_restore(self): o_out, o_err = sys.stdout, sys.stderr with OutputCapture() as o: self.assertFalse(sys.stdout is o_out) self.assertFalse(sys.stderr is o_err) self.assertTrue(sys.stdout is o_out) self.assertTrue(sys.stderr is o_err) def test_double_disable(self): o_out, o_err = sys.stdout, sys.stderr with OutputCapture() as o: self.assertFalse(sys.stdout is o_out) self.assertFalse(sys.stderr is o_err) o.disable() self.assertTrue(sys.stdout is o_out) self.assertTrue(sys.stderr is o_err) o.disable() self.assertTrue(sys.stdout is o_out) self.assertTrue(sys.stderr is o_err) self.assertTrue(sys.stdout is o_out) self.assertTrue(sys.stderr is o_err) def test_double_enable(self): o_out, o_err = sys.stdout, sys.stderr with OutputCapture() as o: o.disable() self.assertTrue(sys.stdout is o_out) self.assertTrue(sys.stderr is o_err) o.enable() self.assertFalse(sys.stdout is o_out) self.assertFalse(sys.stderr is o_err) o.enable() self.assertFalse(sys.stdout is o_out) self.assertFalse(sys.stderr is o_err) self.assertTrue(sys.stdout is o_out) self.assertTrue(sys.stderr is o_err) class TestOutputCaptureWithDescriptors(object): def test_fd(self, capfd): with capfd.disabled(), OutputCapture(fd=True) as o: call([sys.executable, '-c', "import sys; sys.stdout.write('out')"]) call([sys.executable, '-c', "import sys; sys.stderr.write('err')"]) compare(o.captured, expected=b'outerr') o.compare(expected=b'outerr') def test_fd_separate(self, capfd): with capfd.disabled(), OutputCapture(fd=True, separate=True) as o: call([sys.executable, '-c', "import sys; sys.stdout.write('out')"]) call([sys.executable, '-c', "import sys; sys.stderr.write('err')"]) compare(o.captured, expected=b'') o.compare(stdout=b'out', stderr=b'err') testfixtures-6.18.3/testfixtures/tests/test_popen.py000066400000000000000000000564201412502526400230370ustar00rootroot00000000000000import subprocess from subprocess import PIPE, STDOUT from unittest import TestCase from testfixtures.mock import call from testfixtures import ShouldRaise, compare, Replacer from testfixtures.popen import MockPopen, PopenBehaviour from testfixtures.compat import BytesLiteral, PY2 import signal class Tests(TestCase): def test_command_min_args(self): # setup Popen = MockPopen() Popen.set_command('a command') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE) # process started, no return code compare(process.pid, 1234) compare(None, process.returncode) out, err = process.communicate() # test the rest compare(out, b'') compare(err, b'') compare(process.returncode, 0) # test call list compare([ call.Popen('a command', stderr=-1, stdout=-1), call.Popen_instance.communicate(), ], Popen.mock.method_calls) def test_command_max_args(self): Popen = MockPopen() Popen.set_command('a command', b'out', b'err', 1, 345) process = Popen('a command', stdout=PIPE, stderr=PIPE) compare(process.pid, 345) compare(None, process.returncode) out, err = process.communicate() # test the rest compare(out, b'out') compare(err, b'err') compare(process.returncode, 1) # test call list compare([ call.Popen('a command', stderr=-1, stdout=-1), call.Popen_instance.communicate(), ], Popen.mock.method_calls) def test_callable_default_behaviour(self): def some_callable(command, stdin): return PopenBehaviour(BytesLiteral(command), BytesLiteral(stdin), 1, 345, 0) Popen = MockPopen() Popen.set_default(behaviour=some_callable) process = Popen('a command', stdin='some stdin', stdout=PIPE, stderr=PIPE) compare(process.pid, 345) out, err = process.communicate() compare(out, b'a command') compare(err, b'some stdin') compare(process.returncode, 1) def test_command_is_sequence(self): Popen = MockPopen() Popen.set_command('a command') process = Popen(['a', 'command'], stdout=PIPE, stderr=PIPE) compare(process.wait(), 0) compare([ call.Popen(['a', 'command'], stderr=-1, stdout=-1), call.Popen_instance.wait(), ], Popen.mock.method_calls) def test_communicate_with_input(self): # setup Popen = MockPopen() Popen.set_command('a command') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) out, err = process.communicate('foo') # test call list compare([ call.Popen('a command', shell=True, stderr=-1, stdout=-1), call.Popen_instance.communicate('foo'), ], Popen.mock.method_calls) def test_communicate_with_timeout(self): Popen = MockPopen() Popen.set_command('a command', returncode=3) process = Popen('a command') if PY2: with ShouldRaise(TypeError): process.communicate(timeout=1) with ShouldRaise(TypeError): process.communicate('foo', 1) else: process.communicate(timeout=1) process.communicate('foo', 1) compare([ call.Popen('a command'), call.Popen_instance.communicate(timeout=1), call.Popen_instance.communicate('foo', 1), ], expected=Popen.mock.method_calls) def test_read_from_stdout(self): # setup Popen = MockPopen() Popen.set_command('a command', stdout=b'foo') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) self.assertTrue(isinstance(process.stdout.fileno(), int)) compare(process.stdout.read(), b'foo') # test call list compare([ call.Popen('a command', shell=True, stderr=-1, stdout=-1), ], Popen.mock.method_calls) def test_read_from_stderr(self): # setup Popen = MockPopen() Popen.set_command('a command', stderr=b'foo') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) self.assertTrue(isinstance(process.stdout.fileno(), int)) compare(process.stderr.read(), b'foo') # test call list compare([ call.Popen('a command', shell=True, stderr=-1, stdout=-1), ], Popen.mock.method_calls) def test_read_from_stdout_with_stderr_redirected_check_stdout_contents(self): # setup Popen = MockPopen() Popen.set_command('a command', stdout=b'foo', stderr=b'bar') # usage process = Popen('a command', stdout=PIPE, stderr=STDOUT, shell=True) # test stdout contents compare(b'foobar', process.stdout.read()) compare(process.stderr, None) def test_read_from_stdout_with_stderr_redirected_check_stdout_stderr_interleaved(self): # setup Popen = MockPopen() Popen.set_command('a command', stdout=b'o1\no2\no3\no4\n', stderr=b'e1\ne2\n') # usage process = Popen('a command', stdout=PIPE, stderr=STDOUT, shell=True) self.assertTrue(isinstance(process.stdout.fileno(), int)) # test stdout contents compare(b'o1\ne1\no2\ne2\no3\no4\n', process.stdout.read()) def test_communicate_with_stderr_redirected_check_stderr_is_none(self): # setup Popen = MockPopen() Popen.set_command('a command', stdout=b'foo', stderr=b'bar') # usage process = Popen('a command', stdout=PIPE, stderr=STDOUT, shell=True) out, err = process.communicate() # test stderr is None compare(out, b'foobar') compare(err, None) def test_read_from_stdout_and_stderr(self): # setup Popen = MockPopen() Popen.set_command('a command', stdout=b'foo', stderr=b'bar') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) compare(process.stdout.read(), b'foo') compare(process.stderr.read(), b'bar') # test call list compare([ call.Popen('a command', shell=True, stderr=PIPE, stdout=PIPE), ], Popen.mock.method_calls) def test_communicate_text_mode(self): Popen = MockPopen() Popen.set_command('a command', stdout=b'foo', stderr=b'bar') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, text=True) actual = process.communicate() # check compare(actual, expected=(u'foo', u'bar')) def test_communicate_universal_newlines(self): Popen = MockPopen() Popen.set_command('a command', stdout=b'foo', stderr=b'bar') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, universal_newlines=True) actual = process.communicate() # check compare(actual, expected=(u'foo', u'bar')) def test_communicate_encoding(self): Popen = MockPopen() Popen.set_command('a command', stdout=b'foo', stderr=b'bar') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, encoding='ascii') actual = process.communicate() # check compare(actual, expected=(u'foo', u'bar')) def test_communicate_encoding_with_errors(self): Popen = MockPopen() Popen.set_command('a command', stdout=b'\xa3', stderr=b'\xa3') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, encoding='ascii', errors='ignore') actual = process.communicate() # check if PY2: compare(actual, expected=(b'\xa3', b'\xa3')) else: compare(actual, expected=(u'', u'')) def test_read_from_stdout_and_stderr_text_mode(self): Popen = MockPopen() Popen.set_command('a command', stdout=b'foo', stderr=b'bar') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, text=True) actual = process.stdout.read(), process.stderr.read() # check compare(actual, expected=(u'foo', u'bar')) def test_write_to_stdin(self): # setup Popen = MockPopen() Popen.set_command('a command') # usage process = Popen('a command', stdin=PIPE, shell=True) process.stdin.write('some text') # test call list compare(Popen.mock.method_calls, expected=[ call.Popen('a command', shell=True, stdin=PIPE), call.Popen_instance.stdin.write('some text'), ]) compare(Popen.all_calls, expected=[ call.Popen('a command', shell=True, stdin=PIPE), call.Popen('a command', shell=True, stdin=PIPE).stdin.write('some text'), ]) compare(process.mock.method_calls, expected=[ call.stdin.write('some text'), ]) compare(process.calls, expected=[ call.stdin.write('some text'), ]) repr(call.stdin.write('some text')) def test_wait_and_return_code(self): # setup Popen = MockPopen() Popen.set_command('a command', returncode=3) # usage process = Popen('a command') compare(process.returncode, None) # result checking compare(process.wait(), 3) compare(process.returncode, 3) # test call list compare([ call.Popen('a command'), call.Popen_instance.wait(), ], Popen.mock.method_calls) def test_wait_timeout(self): Popen = MockPopen() Popen.set_command('a command', returncode=3) process = Popen('a command') if PY2: with ShouldRaise(TypeError): process.wait(timeout=1) with ShouldRaise(TypeError): process.wait(1) else: process.wait(timeout=1) process.wait(1) compare([ call.Popen('a command'), call.Popen_instance.wait(timeout=1), call.Popen_instance.wait(1) ], expected=Popen.mock.method_calls) def test_multiple_uses(self): Popen = MockPopen() Popen.set_command('a command', b'a') Popen.set_command('b command', b'b') process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) out, err = process.communicate('foo') compare(out, b'a') process = Popen(['b', 'command'], stdout=PIPE, stderr=PIPE, shell=True) out, err = process.communicate('foo') compare(out, b'b') compare([ call.Popen('a command', shell=True, stderr=-1, stdout=-1), call.Popen_instance.communicate('foo'), call.Popen(['b', 'command'], shell=True, stderr=-1, stdout=-1), call.Popen_instance.communicate('foo'), ], Popen.mock.method_calls) def test_send_signal(self): # setup Popen = MockPopen() Popen.set_command('a command') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) process.send_signal(0) # result checking compare([ call.Popen('a command', shell=True, stderr=-1, stdout=-1), call.Popen_instance.send_signal(0), ], Popen.mock.method_calls) def test_terminate(self): # setup Popen = MockPopen() Popen.set_command('a command') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) process.terminate() # result checking compare([ call.Popen('a command', shell=True, stderr=-1, stdout=-1), call.Popen_instance.terminate(), ], Popen.mock.method_calls) def test_kill(self): # setup Popen = MockPopen() Popen.set_command('a command') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) process.kill() # result checking compare([ call.Popen('a command', shell=True, stderr=-1, stdout=-1), call.Popen_instance.kill(), ], Popen.mock.method_calls) def test_all_signals(self): # setup Popen = MockPopen() Popen.set_command('a command') # usage process = Popen('a command') process.send_signal(signal.SIGINT) process.terminate() process.kill() # test call list compare([ call.Popen('a command'), call.Popen_instance.send_signal(signal.SIGINT), call.Popen_instance.terminate(), call.Popen_instance.kill(), ], Popen.mock.method_calls) def test_poll_no_setup(self): # setup Popen = MockPopen() Popen.set_command('a command') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) compare(process.poll(), None) compare(process.poll(), None) compare(process.wait(), 0) compare(process.poll(), 0) # result checking compare([ call.Popen('a command', shell=True, stderr=-1, stdout=-1), call.Popen_instance.poll(), call.Popen_instance.poll(), call.Popen_instance.wait(), call.Popen_instance.poll(), ], Popen.mock.method_calls) def test_poll_setup(self): # setup Popen = MockPopen() Popen.set_command('a command', poll_count=1) # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) compare(process.poll(), None) compare(process.poll(), 0) compare(process.wait(), 0) compare(process.poll(), 0) # result checking compare([ call.Popen('a command', shell=True, stderr=-1, stdout=-1), call.Popen_instance.poll(), call.Popen_instance.poll(), call.Popen_instance.wait(), call.Popen_instance.poll(), ], Popen.mock.method_calls) def test_poll_until_result(self): # setup Popen = MockPopen() Popen.set_command('a command', returncode=3, poll_count=2) # example usage process = Popen('a command') while process.poll() is None: # you'd probably have a sleep here, or go off and # do some other work. pass # result checking compare(process.returncode, 3) compare([ call.Popen('a command'), call.Popen_instance.poll(), call.Popen_instance.poll(), call.Popen_instance.poll(), ], Popen.mock.method_calls) def test_command_not_specified(self): Popen = MockPopen() with ShouldRaise(KeyError( "Nothing specified for command 'a command'" )): Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) def test_default_command_min_args(self): # setup Popen = MockPopen() Popen.set_default() # usage process = Popen('a command', stdout=PIPE, stderr=PIPE) # process started, no return code compare(process.pid, 1234) compare(None, process.returncode) out, err = process.communicate() # test the rest compare(out, b'') compare(err, b'') compare(process.returncode, 0) # test call list compare([ call.Popen('a command', stderr=-1, stdout=-1), call.Popen_instance.communicate(), ], Popen.mock.method_calls) def test_default_command_max_args(self): Popen = MockPopen() Popen.set_default(b'out', b'err', 1, 345) process = Popen('a command', stdout=PIPE, stderr=PIPE) compare(process.pid, 345) compare(None, process.returncode) out, err = process.communicate() # test the rest compare(out, b'out') compare(err, b'err') compare(process.returncode, 1) # test call list compare([ call.Popen('a command', stderr=-1, stdout=-1), call.Popen_instance.communicate(), ], Popen.mock.method_calls) def test_invalid_parameters(self): Popen = MockPopen() with ShouldRaise(TypeError( "__init__() got an unexpected keyword argument 'foo'" )): Popen(foo='bar') def test_invalid_method_or_attr(self): Popen = MockPopen() Popen.set_command('command') process = Popen('command') with ShouldRaise(AttributeError): process.foo() def test_invalid_attribute(self): Popen = MockPopen() Popen.set_command('command') process = Popen('command') with ShouldRaise(AttributeError): process.foo def test_invalid_communicate_call(self): Popen = MockPopen() Popen.set_command('bar') process = Popen('bar') with ShouldRaise(TypeError( "communicate() got an unexpected keyword argument 'foo'" )): process.communicate(foo='bar') def test_invalid_wait_call(self): Popen = MockPopen() Popen.set_command('bar') process = Popen('bar') with ShouldRaise(TypeError( "wait() got an unexpected keyword argument 'foo'" )): process.wait(foo='bar') def test_invalid_send_signal(self): Popen = MockPopen() Popen.set_command('bar') process = Popen('bar') with ShouldRaise(TypeError( "send_signal() got an unexpected keyword argument 'foo'" )): process.send_signal(foo='bar') def test_invalid_terminate(self): Popen = MockPopen() Popen.set_command('bar') process = Popen('bar') with ShouldRaise(TypeError( "terminate() got an unexpected keyword argument 'foo'" )): process.terminate(foo='bar') def test_invalid_kill(self): Popen = MockPopen() Popen.set_command('bar') process = Popen('bar') if PY2: text = 'kill() takes exactly 1 argument (2 given)' else: text = 'kill() takes 1 positional argument but 2 were given' with ShouldRaise(TypeError(text)): process.kill('moo') def test_invalid_poll(self): Popen = MockPopen() Popen.set_command('bar') process = Popen('bar') if PY2: text = 'poll() takes exactly 1 argument (2 given)' else: text = 'poll() takes 1 positional argument but 2 were given' with ShouldRaise(TypeError(text)): process.poll('moo') def test_non_pipe(self): # setup Popen = MockPopen() Popen.set_command('a command') # usage process = Popen('a command') # checks compare(process.stdout, expected=None) compare(process.stderr, expected=None) out, err = process.communicate() # test the rest compare(out, expected=None) compare(err, expected=None) # test call list compare([ call.Popen('a command'), call.Popen_instance.communicate(), ], Popen.mock.method_calls) def test_use_as_context_manager(self): # setup Popen = MockPopen() Popen.set_command('a command') if PY2: process = Popen('a command') with ShouldRaise(AttributeError): process.__enter__ with ShouldRaise(AttributeError): process.__exit__ else: # usage with Popen('a command', stdout=PIPE, stderr=PIPE) as process: # process started, no return code compare(process.pid, 1234) compare(None, process.returncode) out, err = process.communicate() # test the rest compare(out, b'') compare(err, b'') compare(process.returncode, 0) compare(process.stdout.closed, expected=True) compare(process.stderr.closed, expected=True) # test call list compare([ call.Popen('a command', stderr=-1, stdout=-1), call.Popen_instance.communicate(), call.Popen_instance.wait(), ], Popen.mock.method_calls) def test_start_new_session(self): # setup Popen = MockPopen() Popen.set_command('a command') # usage Popen('a command', start_new_session=True) # test call list compare([ call.Popen('a command', start_new_session=True), ], Popen.mock.method_calls) def test_simultaneous_processes(self): Popen = MockPopen() Popen.set_command('a command', b'a', returncode=1) Popen.set_command('b command', b'b', returncode=2) process_a = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) process_b = Popen(['b', 'command'], stdout=PIPE, stderr=PIPE, shell=True) compare(process_a.wait(), expected=1) compare(process_b.wait(), expected=2) a_call = call.Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) b_call = call.Popen(['b', 'command'], stdout=PIPE, stderr=PIPE, shell=True) compare(Popen.all_calls, expected=[ a_call, b_call, a_call.wait(), b_call.wait(), ]) compare(process_a.mock.method_calls, expected=[ call.wait() ]) compare(process_b.mock.method_calls, expected=[ call.wait() ]) def test_pass_executable(self): Popen = MockPopen() Popen.set_command('a command', b'a', returncode=1) Popen('a command', executable='/foo/bar') compare(Popen.all_calls, expected=[ call.Popen('a command', executable='/foo/bar') ]) def test_set_command_with_list(self): Popen = MockPopen() Popen.set_command(['a', 'command']) Popen(['a', 'command'], stdout=PIPE, stderr=PIPE) compare([call.Popen(['a', 'command'], stderr=-1, stdout=-1)], actual=Popen.all_calls) class IntegrationTests(TestCase): def setUp(self): self.popen = MockPopen() replacer = Replacer() replacer.replace('testfixtures.tests.test_popen.subprocess.Popen', self.popen) self.addCleanup(replacer.restore) def test_command_called_with_check_call_check_returncode(self): self.popen.set_command('ls') compare(0, subprocess.check_call(['ls'])) def test_command_called_with_check_output_check_stdout_returned(self): self.popen.set_command('ls', stdout=b'abc') compare(b'abc', subprocess.check_output(['ls'])) def test_command_called_with_check_output_stderr_to_stdout_check_returned(self): self.popen.set_command('ls', stderr=b'xyz') compare(b'xyz', subprocess.check_output(['ls'], stderr=STDOUT)) def test_command_called_with_check_call_failing_command_check_exception(self): self.popen.set_command('ls', returncode=1) with self.assertRaises(subprocess.CalledProcessError): subprocess.check_output(['ls']) testfixtures-6.18.3/testfixtures/tests/test_popen_docs.py000066400000000000000000000154221412502526400240440ustar00rootroot00000000000000# NB: This file is used in the documentation, if you make changes, ensure # you update the line numbers in popen.txt! from subprocess import Popen, PIPE def my_func(): process = Popen(['svn', 'ls', '-R', 'foo'], stdout=PIPE, stderr=PIPE) out, err = process.communicate() if process.returncode: raise RuntimeError('something bad happened') return out dotted_path = 'testfixtures.tests.test_popen_docs.Popen' from unittest import TestCase from testfixtures.mock import call from testfixtures import Replacer, ShouldRaise, compare from testfixtures.popen import MockPopen, PopenBehaviour class TestMyFunc(TestCase): def setUp(self): self.Popen = MockPopen() self.r = Replacer() self.r.replace(dotted_path, self.Popen) self.addCleanup(self.r.restore) def test_example(self): # set up self.Popen.set_command('svn ls -R foo', stdout=b'o', stderr=b'e') # testing of results compare(my_func(), b'o') # testing calls were in the right order and with the correct parameters: process = call.Popen(['svn', 'ls', '-R', 'foo'], stderr=PIPE, stdout=PIPE) compare(Popen.all_calls, expected=[ process, process.communicate() ]) def test_example_bad_returncode(self): # set up Popen.set_command('svn ls -R foo', stdout=b'o', stderr=b'e', returncode=1) # testing of error with ShouldRaise(RuntimeError('something bad happened')): my_func() def test_communicate_with_input(self): # setup Popen = MockPopen() Popen.set_command('a command') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) out, err = process.communicate('foo') # test call list compare(Popen.all_calls, expected=[ process.root_call, process.root_call.communicate('foo'), ]) def test_read_from_stdout_and_stderr(self): # setup Popen = MockPopen() Popen.set_command('a command', stdout=b'foo', stderr=b'bar') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) compare(process.stdout.read(), b'foo') compare(process.stderr.read(), b'bar') def test_write_to_stdin(self): # setup Popen = MockPopen() Popen.set_command('a command') # usage process = Popen('a command', stdin=PIPE, shell=True) process.stdin.write('some text') process.stdin.close() # test call list compare(Popen.all_calls, expected=[ process.root_call, process.root_call.stdin.write('some text'), process.root_call.stdin.close(), ]) def test_wait_and_return_code(self): # setup Popen = MockPopen() Popen.set_command('a command', returncode=3) # usage process = Popen('a command') compare(process.returncode, None) # result checking compare(process.wait(), 3) compare(process.returncode, 3) # test call list compare(Popen.all_calls, expected=[ call.Popen('a command'), call.Popen('a command').wait(), ]) def test_send_signal(self): # setup Popen = MockPopen() Popen.set_command('a command') # usage process = Popen('a command', stdout=PIPE, stderr=PIPE, shell=True) process.send_signal(0) # result checking compare(Popen.all_calls, expected=[ process.root_call, process.root_call.send_signal(0), ]) def test_poll_until_result(self): # setup Popen = MockPopen() Popen.set_command('a command', returncode=3, poll_count=2) # example usage process = Popen('a command') while process.poll() is None: # you'd probably have a sleep here, or go off and # do some other work. pass # result checking compare(process.returncode, 3) compare(Popen.all_calls, expected=[ process.root_call, process.root_call.poll(), process.root_call.poll(), process.root_call.poll(), ]) def test_default_behaviour(self): # set up self.Popen.set_default(stdout=b'o', stderr=b'e') # testing of results compare(my_func(), b'o') # testing calls were in the right order and with the correct parameters: root_call = call.Popen(['svn', 'ls', '-R', 'foo'], stderr=PIPE, stdout=PIPE) compare(Popen.all_calls, expected=[ root_call, root_call.communicate() ]) def test_multiple_responses(self): # set up behaviours = [ PopenBehaviour(stderr=b'e', returncode=1), PopenBehaviour(stdout=b'o'), ] def behaviour(command, stdin): return behaviours.pop(0) self.Popen.set_command('svn ls -R foo', behaviour=behaviour) # testing of error: with ShouldRaise(RuntimeError('something bad happened')): my_func() # testing of second call: compare(my_func(), b'o') def test_count_down(self): # set up self.Popen.set_command('svn ls -R foo', behaviour=CustomBehaviour()) # testing of error: with ShouldRaise(RuntimeError('something bad happened')): my_func() # testing of second call: compare(my_func(), b'o') def test_multiple_processes(self): # set up self.Popen.set_command('process --batch=0', stdout=b'42') self.Popen.set_command('process --batch=1', stdout=b'13') # testing of results compare(process_in_batches(2), expected=55) # testing of process management: p1 = call.Popen('process --batch=0', shell=True, stderr=PIPE, stdout=PIPE) p2 = call.Popen('process --batch=1', shell=True, stderr=PIPE, stdout=PIPE) compare(Popen.all_calls, expected=[ p1, p2, p1.communicate(), p2.communicate(), ]) class CustomBehaviour(object): def __init__(self, fail_count=1): self.fail_count = fail_count def __call__(self, command, stdin): while self.fail_count > 0: self.fail_count -= 1 return PopenBehaviour(stderr=b'e', returncode=1) return PopenBehaviour(stdout=b'o') def process_in_batches(n): processes = [] for i in range(n): processes.append(Popen('process --batch='+str(i), stdout=PIPE, stderr=PIPE, shell=True)) total = 0 for process in processes: out, err = process.communicate() total += int(out) return total testfixtures-6.18.3/testfixtures/tests/test_rangecomparison.py000066400000000000000000000133211412502526400250760ustar00rootroot00000000000000from decimal import Decimal from testfixtures import RangeComparison as R, ShouldRaise, compare from unittest import TestCase from ..compat import PY2, PY3 class Tests(TestCase): def test_equal_yes_rhs(self): self.assertTrue(5 == R(2, 5)) def test_equal_yes_lhs(self): self.assertTrue(R(2, 5) == 2) def test_equal_no_rhs(self): self.assertFalse(5 == R(2, 4)) def test_equal_no_lhs(self): self.assertFalse(R(2, 3) == 5) def test_not_equal_yes_rhs(self): self.assertTrue(5 != R(2, 2)) def test_not_equal_yes_lhs(self): self.assertTrue(R(2, 4) != 1) def test_not_equal_no_rhs(self): self.assertFalse(5 != R(-10, 10)) def test_not_equal_no_lhs(self): self.assertFalse(R(2, 5) != 2) def test_equal_in_sequence_rhs(self): self.assertEqual((1, 2, 5), (1, 2, R(2, 5))) def test_equal_in_sequence_lhs(self): self.assertEqual((1, 2, R(2, 5)), (1, 2, 5)) def test_not_equal_in_sequence_rhs(self): self.assertNotEqual((1, 2, 5), (1, 2, R(2, 4))) def test_not_equal_in_sequence_lhs(self): self.assertNotEqual((1, 2, R(2, 4)), (1, 2, 5)) def test_not_numeric_rhs(self): if PY2: self.assertFalse('abc' == R(2, 5)) self.assertFalse({} == R(2, 5)) self.assertFalse([] == R(2, 5)) else: with ShouldRaise(TypeError): 'abc' == R(2, 5) with ShouldRaise(TypeError): {} == R(2, 5) with ShouldRaise(TypeError): [] == R(2, 5) def test_not_numeric_lhs(self): if PY2: self.assertFalse(R(2, 5) == 'abc') self.assertFalse(R(2, 5) == {}) self.assertFalse(R(2, 5) == []) else: with ShouldRaise(TypeError): R(2, 5) == 'abc' with ShouldRaise(TypeError): R(2, 5) == {} with ShouldRaise(TypeError): R(2, 5) == [] def test_repr(self): compare('', repr(R(2, 5))) def test_str(self): compare('', str(R(2, 5))) def test_str_negative(self): if PY3: expected = '' else: expected = '' compare(expected, repr(R(2, 5))) def test_equal_yes_decimal_lhs(self): self.assertTrue(R(2, 5) == Decimal(3)) def test_equal_yes_decimal_rhs(self): self.assertTrue(Decimal(3) == R(2, 5)) def test_equal_no_decimal_lhs(self): self.assertFalse(R(2, 5) == Decimal(1.0)) def test_equal_no_decimal_rhs(self): self.assertFalse(Decimal(1.0) == R(2, 5)) def test_equal_yes_float_lhs(self): self.assertTrue(R(2, 5) == 3.0) def test_equal_yes_float_rhs(self): self.assertTrue(3.0 == R(2, 5)) def test_equal_no_float_lhs(self): self.assertFalse(R(2, 5) == 1.0) def test_equal_no_float_rhs(self): self.assertFalse(1.0 == R(2, 5)) def test_equal_yes_decimal_in_range_lhs(self): self.assertTrue(R(Decimal(1), 5) == 3) self.assertTrue(R(1, Decimal(5)) == 3) self.assertTrue(R(Decimal(1), Decimal(5)) == 3) def test_equal_yes_decimal_in_range_rhs(self): self.assertTrue(3 == R(Decimal(1), 5)) self.assertTrue(3 == R(1, Decimal(5))) self.assertTrue(3 == R(Decimal(1), Decimal(5))) def test_equal_no_decimal_in_range_lhs(self): self.assertFalse(R(Decimal(1), 5) == 6) self.assertFalse(R(1, Decimal(5)) == 6) self.assertFalse(R(Decimal(1), Decimal(5)) == 6) def test_equal_no_decimal_in_range_rhs(self): self.assertFalse(6 == R(Decimal(1), 5)) self.assertFalse(6 == R(1, Decimal(5))) self.assertFalse(6 == R(Decimal(1), Decimal(5))) def test_equal_yes_float_in_range_lhs(self): self.assertTrue(R(1.0, 5) == 3) self.assertTrue(R(1, 5.0) == 3) self.assertTrue(R(1.0, 5.0) == 3) def test_equal_yes_float_in_range_rhs(self): self.assertTrue(3 == R(1.0, 5)) self.assertTrue(3 == R(1, 5.0)) self.assertTrue(3 == R(1.0, 5.0)) def test_equal_no_float_in_range_lhs(self): self.assertFalse(R(1.0, 5) == 6) self.assertFalse(R(1, 5.0) == 6) self.assertFalse(R(1.0, 5.0) == 6) def test_equal_no_float_in_range_rhs(self): self.assertFalse(6 == R(1.0, 5)) self.assertFalse(6 == R(1, 5.0)) self.assertFalse(6 == R(1.0, 5.0)) def test_equal_yes_negative_lhs(self): self.assertTrue(R(-5, 5) == -3) self.assertTrue(R(-10, -5) == -7) def test_equal_yes_negative_rhs(self): self.assertTrue(-2 == R(-5, 5)) self.assertTrue(-7 == R(-10, -5)) def test_equal_no_negative_lhs(self): self.assertFalse(R(-5, 5) == -10) self.assertFalse(R(-10, -5) == -3) def test_equal_no_negative_rhs(self): self.assertFalse(-10 == R(-5, 5)) self.assertFalse(-30 == R(-10, -5)) def test_equal_yes_no_range_lhs(self): self.assertTrue(R(0, 0) == 0) self.assertTrue(R(2, 2) == 2) self.assertTrue(R(-1, -1) == -1) def test_equal_yes_no_range_rhs(self): self.assertTrue(0 == R(0, 0)) self.assertTrue(2 == R(2, 2)) self.assertTrue(-1 == R(-1, -1)) def test_equal_no_no_range_lhs(self): self.assertFalse(R(0, 0) == 1) self.assertFalse(R(2, 2) == 1) self.assertFalse(R(-1, -1) == 11) def test_equal_no_no_range_rhs(self): self.assertFalse(1 == R(0, 0)) self.assertFalse(1 == R(2, 2)) self.assertFalse(1 == R(-1, -1))testfixtures-6.18.3/testfixtures/tests/test_replace.py000066400000000000000000000326641412502526400233350ustar00rootroot00000000000000from testfixtures import ( Replacer, Replace, ShouldRaise, TempDirectory, replace, compare, not_there, ) from unittest import TestCase import os from testfixtures.tests import sample1 from testfixtures.tests import sample2 from ..compat import PY3 from warnings import catch_warnings class TestReplace(TestCase): def test_function(self): def test_z(): return 'replacement z' compare(sample1.z(), 'original z') @replace('testfixtures.tests.sample1.z', test_z) def test_something(): compare(sample1.z(), 'replacement z') compare(sample1.z(), 'original z') test_something() compare(sample1.z(), 'original z') def test_class(self): OriginalX = sample1.X class ReplacementX(sample1.X): pass self.failIf(OriginalX is ReplacementX) self.failUnless(isinstance(sample1.X(), OriginalX)) @replace('testfixtures.tests.sample1.X', ReplacementX) def test_something(): self.failIf(OriginalX is ReplacementX) self.failUnless(isinstance(sample1.X(), ReplacementX)) self.failIf(OriginalX is ReplacementX) self.failUnless(isinstance(sample1.X(), OriginalX)) test_something() self.failIf(OriginalX is ReplacementX) self.failUnless(isinstance(sample1.X(), OriginalX)) def test_method(self): def test_y(self): return self compare(sample1.X().y(), 'original y') @replace('testfixtures.tests.sample1.X.y', test_y) def test_something(): self.failUnless(isinstance(sample1.X().y(), sample1.X)) compare(sample1.X().y(), 'original y') test_something() compare(sample1.X().y(), 'original y') def test_class_method(self): def rMethod(cls): return (cls, 1) compare(sample1.X().aMethod(), sample1.X) @replace('testfixtures.tests.sample1.X.aMethod', rMethod) def test_something(r): compare(r, rMethod) compare(sample1.X().aMethod(), (sample1.X, 1)) compare(sample1.X().aMethod(), sample1.X) test_something() compare(sample1.X().aMethod(), sample1.X) def test_multiple_replace(self): def test_y(self): return 'test y' def test_z(): return 'test z' compare(sample1.z(), 'original z') compare(sample1.X().y(), 'original y') @replace('testfixtures.tests.sample1.z', test_z) @replace('testfixtures.tests.sample1.X.y', test_y) def test_something(passed_test_y, passed_test_z): compare(test_z, passed_test_z) compare(test_y, passed_test_y) compare(sample1.z(), 'test z') compare(sample1.X().y(), 'test y') compare(sample1.z(), 'original z') compare(sample1.X().y(), 'original y') test_something() compare(sample1.z(), 'original z') compare(sample1.X().y(), 'original y') def test_gotcha(self): # Just because you replace an object in one context, # doesn't meant that it's replaced in all contexts! def test_z(): return 'test z' compare(sample1.z(), 'original z') compare(sample2.z(), 'original z') @replace('testfixtures.tests.sample1.z', test_z) def test_something(): compare(sample1.z(), 'test z') compare(sample2.z(), 'original z') compare(sample1.z(), 'original z') compare(sample2.z(), 'original z') test_something() compare(sample1.z(), 'original z') compare(sample2.z(), 'original z') def test_raises(self): def test_z(): return 'replacement z' compare(sample1.z(), 'original z') @replace('testfixtures.tests.sample1.z', test_z) def test_something(): compare(sample1.z(), 'replacement z') raise Exception() compare(sample1.z(), 'original z') with ShouldRaise(): test_something() compare(sample1.z(), 'original z') def test_want_replacement(self): o = object() @replace('testfixtures.tests.sample1.z', o) def test_something(r): self.failUnless(r is o) self.failUnless(sample1.z is o) test_something() def test_not_there(self): o = object() @replace('testfixtures.tests.sample1.bad', o) def test_something(r): pass # pragma: no cover with ShouldRaise(AttributeError("Original 'bad' not found")): test_something() def test_not_there_ok(self): o = object() @replace('testfixtures.tests.sample1.bad', o, strict=False) def test_something(r): self.failUnless(r is o) self.failUnless(sample1.bad is o) test_something() def test_replace_dict(self): from testfixtures.tests.sample1 import someDict original = someDict['key'] replacement = object() @replace('testfixtures.tests.sample1.someDict.key', replacement) def test_something(obj): self.failUnless(obj is replacement) self.failUnless(someDict['key'] is replacement) test_something() self.failUnless(someDict['key'] is original) def test_replace_delattr(self): from testfixtures.tests import sample1 @replace('testfixtures.tests.sample1.someDict', not_there) def test_something(obj): self.failIf(hasattr(sample1, 'someDict')) test_something() self.assertEqual(sample1.someDict, {'complex_key': [1, 2, 3], 'key': 'value'}) def test_replace_delattr_not_there(self): @replace('testfixtures.tests.sample1.foo', not_there) def test_something(obj): pass # pragma: no cover with ShouldRaise(AttributeError("Original 'foo' not found")): test_something() def test_replace_delattr_not_there_not_strict(self): from testfixtures.tests import sample1 @replace('testfixtures.tests.sample1.foo', not_there, strict=False) def test_something(obj): self.failIf(hasattr(sample1, 'foo')) test_something() def test_replace_delattr_not_there_restored(self): from testfixtures.tests import sample1 @replace('testfixtures.tests.sample1.foo', not_there, strict=False) def test_something(obj): sample1.foo = 'bar' test_something() self.failIf(hasattr(sample1, 'foo')) def test_replace_delattr_cant_remove(self): with Replacer() as r: with ShouldRaise(TypeError( "can't set attributes of " "built-in/extension type 'datetime.datetime'" )): r.replace('datetime.datetime.today', not_there) def test_replace_delattr_cant_remove_not_strict(self): with Replacer() as r: with ShouldRaise(TypeError( "can't set attributes of " "built-in/extension type 'datetime.datetime'" )): r.replace('datetime.datetime.today', not_there, strict=False) def test_replace_dict_remove_key(self): from testfixtures.tests.sample1 import someDict @replace('testfixtures.tests.sample1.someDict.key', not_there) def test_something(obj): self.failIf('key' in someDict) test_something() self.assertEqual(sorted(someDict.keys()), ['complex_key', 'key']) def test_replace_dict_remove_key_not_there(self): from testfixtures.tests.sample1 import someDict @replace('testfixtures.tests.sample1.someDict.badkey', not_there) def test_something(obj): self.failIf('badkey' in someDict) # pragma: no cover with ShouldRaise(AttributeError("Original 'badkey' not found")): test_something() self.assertEqual(sorted(someDict.keys()), ['complex_key', 'key']) def test_replace_dict_remove_key_not_there_not_strict(self): from testfixtures.tests.sample1 import someDict @replace('testfixtures.tests.sample1.someDict.badkey', not_there, strict=False) def test_something(obj): self.failIf('badkey' in someDict) test_something() self.assertEqual(sorted(someDict.keys()), ['complex_key', 'key']) def test_replace_dict_ensure_key_not_there_restored(self): from testfixtures.tests.sample1 import someDict @replace('testfixtures.tests.sample1.someDict.badkey', not_there, strict=False) def test_something(obj): someDict['badkey'] = 'some test value' test_something() self.assertEqual(sorted(someDict.keys()), ['complex_key', 'key']) def test_replace_dict_not_there(self): from testfixtures.tests.sample1 import someDict replacement = object() @replace('testfixtures.tests.sample1.someDict.key2', replacement, strict=False) def test_something(obj): self.failUnless(obj is replacement) self.failUnless(someDict['key2'] is replacement) test_something() self.assertEqual(sorted(someDict.keys()), ['complex_key', 'key']) def test_replace_dict_not_there_empty_string(self): from testfixtures.tests.sample1 import someDict @replace('testfixtures.tests.sample1.someDict.key2', '', strict=False) def test_something(): self.assertEqual(someDict['key2'], '') test_something() self.assertEqual(sorted(someDict.keys()), ['complex_key', 'key']) def test_replace_complex(self): from testfixtures.tests.sample1 import someDict original = someDict['complex_key'][1] replacement = object() @replace('testfixtures.tests.sample1.someDict.complex_key.1', replacement) def test_something(obj): self.failUnless(obj is replacement) self.assertEqual(someDict['complex_key'], [1, obj, 3]) test_something() self.assertEqual(someDict['complex_key'], [1, 2, 3]) self.failUnless(original is someDict['complex_key'][1]) def test_replacer_del(self): r = Replacer() r.replace('testfixtures.tests.sample1.left_behind', object(), strict=False) with catch_warnings(record=True) as w: del r self.assertTrue(len(w), 1) compare(str(w[0].message), "Replacer deleted without being restored, originals left:" " {'testfixtures.tests.sample1.left_behind': }") def test_multiple_replaces(self): orig = os.path.sep with Replacer() as r: r.replace('os.path.sep', '$') compare(os.path.sep, '$') r.replace('os.path.sep', '=') compare(os.path.sep, '=') compare(orig, os.path.sep) def test_sub_module_import(self): with TempDirectory() as dir: dir.write('module/__init__.py', b'') dir.write('module/submodule.py', b'def foo(): return "foo"') with Replacer() as r: r.replace('sys.path', [dir.path]) def bar(): return "bar" # now test r.replace('module.submodule.foo', bar) from module.submodule import foo compare(foo(), "bar") def test_staticmethod(self): compare(sample1.X.bMethod(), 2) with Replacer() as r: r.replace('testfixtures.tests.sample1.X.bMethod', lambda: 1) compare(sample1.X.bMethod(), 1) compare(sample1.X.bMethod(), 2) def test_use_as_cleanup(self): def test_z(): return 'replacement z' compare(sample1.z(), 'original z') replace = Replacer() compare(sample1.z(), 'original z') replace('testfixtures.tests.sample1.z', test_z) cleanup = replace.restore try: compare(sample1.z(), 'replacement z') finally: cleanup() compare(sample1.z(), 'original z') def test_replace_context_manager(self): def test_z(): return 'replacement z' compare(sample1.z(), 'original z') with Replace('testfixtures.tests.sample1.z', test_z) as z: compare(z(), 'replacement z') compare(sample1.z(), 'replacement z') compare(sample1.z(), 'original z') def test_multiple_context_managers(self): def test_y(self): return 'test y' def test_z(): return 'test z' compare(sample1.z(), 'original z') compare(sample1.X().y(), 'original y') with Replacer() as replace: z = replace('testfixtures.tests.sample1.z', test_z) y = replace('testfixtures.tests.sample1.X.y', test_y) compare(z(), 'test z') if PY3: compare(y, sample1.X.y) compare(sample1.X().y(), 'test y') compare(sample1.z(), 'test z') compare(sample1.X().y(), 'test y') compare(sample1.z(), 'original z') compare(sample1.X().y(), 'original y') def test_context_manager_not_strict(self): def test_z(): return 'replacement z' with Replace('testfixtures.tests.sample1.foo', test_z, strict=False): compare(sample1.foo(), 'replacement z') testfixtures-6.18.3/testfixtures/tests/test_replacer.py000066400000000000000000000075021412502526400235100ustar00rootroot00000000000000from unittest import TestCase from testfixtures import Replacer, ShouldRaise class TestReplacer(TestCase): def test_function(self): from testfixtures.tests import sample1 assert sample1.z() == 'original z' def test_z(): return 'replacement z' r = Replacer() r.replace('testfixtures.tests.sample1.z',test_z) assert sample1.z() == 'replacement z' r.restore() assert sample1.z() == 'original z' def test_class(self): from testfixtures.tests import sample1 x = sample1.X() assert x.__class__.__name__ == 'X' class XReplacement(sample1.X): pass r = Replacer() r.replace('testfixtures.tests.sample1.X', XReplacement) x = sample1.X() assert x.__class__.__name__ == 'XReplacement' assert sample1.X().y() == 'original y' r.restore() x = sample1.X() assert x.__class__.__name__ == 'X' def test_method(self): from testfixtures.tests import sample1 assert sample1.X().y() == 'original y' def test_y(self): return 'replacement y' r = Replacer() r.replace('testfixtures.tests.sample1.X.y',test_y) assert sample1.X().y()[:38] == 'replacement y' r.restore() assert sample1.X().y() == 'original y' def test_class_method(self): from testfixtures.tests import sample1 c = sample1.X assert sample1.X.aMethod() is c def rMethod(cls): return cls, 1 r = Replacer() r.replace('testfixtures.tests.sample1.X.aMethod',rMethod) sample1.X.aMethod() assert sample1.X.aMethod() == (c, 1) r.restore() sample1.X.aMethod() assert sample1.X.aMethod() is c def test_multiple_replace(self): from testfixtures.tests import sample1 assert sample1.z() == 'original z' assert sample1.X().y() == 'original y' def test_y(self): return self.__class__.__name__ def test_z(): return 'replacement z' r = Replacer() r.replace('testfixtures.tests.sample1.z',test_z) r.replace('testfixtures.tests.sample1.X.y',test_y) assert sample1.z() == 'replacement z' assert sample1.X().y() == 'X' r.restore() assert sample1.z() == 'original z' assert sample1.X().y() == 'original y' def test_gotcha(self): # Just because you replace an object in one context: from testfixtures.tests import sample1 from testfixtures.tests import sample2 assert sample1.z() == 'original z' def test_z(): return 'replacement z' r = Replacer() r.replace('testfixtures.tests.sample1.z',test_z) assert sample1.z() == 'replacement z' # Doesn't meant that it's replaced in all contexts: assert sample2.z() == 'original z' r.restore() def test_remove_called_twice(self): from testfixtures.tests import sample1 def test_z(): pass r = Replacer() r.replace('testfixtures.tests.sample1.z',test_z) r.restore() assert sample1.z() == 'original z' r.restore() assert sample1.z() == 'original z' def test_with_statement(self): from testfixtures.tests import sample1 assert sample1.z() == 'original z' def test_z(): return 'replacement z' with Replacer() as r: r.replace('testfixtures.tests.sample1.z',test_z) assert sample1.z() == 'replacement z' assert sample1.z() == 'original z' def test_not_there(self): def test_bad(): pass with Replacer() as r: with ShouldRaise(AttributeError("Original 'bad' not found")): r.replace('testfixtures.tests.sample1.bad', test_bad) testfixtures-6.18.3/testfixtures/tests/test_roundcomparison.py000066400000000000000000000117441412502526400251400ustar00rootroot00000000000000from decimal import Decimal from testfixtures import RoundComparison as R, compare, ShouldRaise from unittest import TestCase from ..compat import PY2, PY3 class Tests(TestCase): def test_equal_yes_rhs(self): self.assertTrue(0.123457 == R(0.123456, 5)) def test_equal_yes_lhs(self): self.assertTrue(R(0.123456, 5) == 0.123457) def test_equal_no_rhs(self): self.assertFalse(0.123453 == R(0.123456, 5)) def test_equal_no_lhs(self): self.assertFalse(R(0.123456, 5) == 0.123453) def test_not_equal_yes_rhs(self): self.assertFalse(0.123457 != R(0.123456, 5)) def test_not_equal_yes_lhs(self): self.assertFalse(R(0.123456, 5) != 0.123457) def test_not_equal_no_rhs(self): self.assertTrue(0.123453 != R(0.123456, 5)) def test_not_equal_no_lhs(self): self.assertTrue(R(0.123456, 5) != 0.123453) def test_equal_in_sequence_rhs(self): self.assertEqual((1, 2, 0.123457), (1, 2, R(0.123456, 5))) def test_equal_in_sequence_lhs(self): self.assertEqual((1, 2, R(0.123456, 5)), (1, 2, 0.123457)) def test_not_equal_in_sequence_rhs(self): self.assertNotEqual((1, 2, 0.1236), (1, 2, R(0.123456, 5))) def test_not_equal_in_sequence_lhs(self): self.assertNotEqual((1, 2, R(0.123456, 5)), (1, 2, 0.1236)) def test_not_numeric_rhs(self): with ShouldRaise(TypeError): 'abc' == R(0.123456, 5) def test_not_numeric_lhs(self): with ShouldRaise(TypeError): R(0.123456, 5) == 'abc' def test_repr(self): compare('', repr(R(0.123456, 5))) def test_str(self): compare('', repr(R(0.123456, 5))) def test_str_negative(self): if PY3: expected = '' else: expected = '' compare(expected, repr(R(123456, -2))) TYPE_ERROR_DECIMAL = TypeError( "Cannot compare with " ) def test_equal_yes_decimal_to_float_rhs(self): with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2): self.assertTrue(Decimal("0.123457") == R(0.123456, 5)) def test_equal_yes_decimal_to_float_lhs(self): with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2): self.assertTrue(R(0.123456, 5) == Decimal("0.123457")) def test_equal_no_decimal_to_float_rhs(self): with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2): self.assertFalse(Decimal("0.123453") == R(0.123456, 5)) def test_equal_no_decimal_to_float_lhs(self): with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2): self.assertFalse(R(0.123456, 5) == Decimal("0.123453")) TYPE_ERROR_FLOAT = TypeError( "Cannot compare with " ) def test_equal_yes_float_to_decimal_rhs(self): with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2): self.assertTrue(0.123457 == R(Decimal("0.123456"), 5)) def test_equal_yes_float_to_decimal_lhs(self): with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2): self.assertTrue(R(Decimal("0.123456"), 5) == 0.123457) def test_equal_no_float_to_decimal_rhs(self): with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2): self.assertFalse(0.123453 == R(Decimal("0.123456"), 5)) def test_equal_no_float_to_decimal_lhs(self): with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2): self.assertFalse(R(Decimal("0.123456"), 5) == 0.123453) def test_integer_float(self): with ShouldRaise(TypeError, unless=PY2): 1 == R(1.000001, 5) def test_float_integer(self): with ShouldRaise(TypeError, unless=PY2): R(1.000001, 5) == 1 def test_equal_yes_integer_other_rhs(self): self.assertTrue(10 == R(11, -1)) def test_equal_yes_integer_lhs(self): self.assertTrue(R(11, -1) == 10) def test_equal_no_integer_rhs(self): self.assertFalse(10 == R(16, -1)) def test_equal_no_integer_lhs(self): self.assertFalse(R(16, -1) == 10) def test_equal_integer_zero_precision(self): self.assertTrue(1 == R(1, 0)) def test_equal_yes_negative_precision(self): self.assertTrue(149.123 == R(101.123, -2)) def test_equal_no_negative_precision(self): self.assertFalse(149.123 == R(150.001, -2)) def test_decimal_yes_rhs(self): self.assertTrue(Decimal('0.123457') == R(Decimal('0.123456'), 5)) def test_decimal_yes_lhs(self): self.assertTrue(R(Decimal('0.123456'), 5) == Decimal('0.123457')) def test_decimal_no_rhs(self): self.assertFalse(Decimal('0.123453') == R(Decimal('0.123456'), 5)) def test_decimal_no_lhs(self): self.assertFalse(R(Decimal('0.123456'), 5) == Decimal('0.123453')) testfixtures-6.18.3/testfixtures/tests/test_sequencecomparison.py000066400000000000000000000301201412502526400256060ustar00rootroot00000000000000from testfixtures import SequenceComparison, generator, compare, Subset, Permutation class TestSequenceComparison(object): def test_repr(self): compare(repr(SequenceComparison(1, 2, 3)), expected='1, 2, 3') def test_repr_long(self): actual = repr(SequenceComparison('a', 'b', 'c'*1000))[:60] compare(actual, expected='\n' "\n'a',\n 'b'") def test_repr_after_equal(self): s = SequenceComparison(1, 2, 3) assert s == (1, 2, 3) compare(repr(s), expected='1, 2, 3') def test_equal_list(self): s = SequenceComparison(1, 2, 3) assert s == [1, 2, 3] def test_equal_tuple(self): s = SequenceComparison(1, 2, 3) assert s == (1, 2, 3) def test_equal_nested_unhashable_unordered(self): s = SequenceComparison({1}, {2}, {2}, ordered=False) assert s == ({2}, {1}, {2}) def test_equal_nested_unhashable_unordered_partial(self): s = SequenceComparison({1}, {2}, {2}, ordered=False, partial=True) assert s == ({2}, {1}, {2}, {3}) def test_equal_generator(self): s = SequenceComparison(1, 2, 3) assert s == generator(1, 2, 3) def test_equal_unordered(self): s = SequenceComparison(1, 2, 3, ordered=False) assert s == (1, 3, 2) def test_equal_partial_unordered(self): s = SequenceComparison(1, 2, ordered=False, partial=True) assert s == (2, 1, 4) def test_equal_partial_ordered(self): s = SequenceComparison(1, 2, 1, ordered=True, partial=True) assert s == (1, 1, 2, 1) def test_equal_ordered_duplicates(self): s = SequenceComparison(1, 2, 2, ordered=True, partial=True) assert s == (1, 2, 2, 3) def test_unequal_bad_type(self): s = SequenceComparison(1, 3) assert s != object() compare(repr(s), expected="bad type") def test_unequal_list(self): s = SequenceComparison(1, 2, 3) assert s != (1, 2, 4) compare(repr(s), expected=( '\n' '\n' 'same:\n' '[1, 2]\n\n' 'expected:\n' '[3]\n\n' 'actual:\n' '[4]\n' '' )) def test_unequal_same_but_all_wrong_order(self): s = SequenceComparison(1, 2, 3) assert s != (3, 1, 2) compare(repr(s), expected=( '\n' '\n' 'same:\n' '[]\n\n' 'expected:\n' '[1, 2, 3]\n\n' 'actual:\n' '[3, 1, 2]\n' '' )) def test_unequal_prefix_match_but_partial_false(self): s = SequenceComparison(1, 2, partial=False) assert s != (1, 2, 4) compare(repr(s), expected=( '\n' '\n' 'same:\n' '[1, 2]\n\n' 'expected:\n' '[]\n\n' 'actual:\n' '[4]\n' '' )) def test_unequal_partial_ordered(self): s = SequenceComparison(1, 3, 5, ordered=True, partial=True, recursive=False) assert s != (1, 2, 3, 4, 0) compare(repr(s), expected=( '\n' '\n' 'ignored:\n' '[2, 4, 0]\n\n' 'same:\n' '[1, 3]\n\n' 'expected:\n' '[5]\n\n' 'actual:\n' '[]\n' '' )) def test_unequal_partial_ordered_recursive(self): s = SequenceComparison(1, 3, 5, ordered=True, partial=True, recursive=True) assert s != (1, 2, 3, 4, 0) compare(repr(s), expected=( '\n' '\n' 'ignored:\n' '[4, 0]\n\n' 'same:\n' '[1]\n\n' 'expected:\n' '[3, 5]\n\n' 'actual:\n' '[2, 3]\n' '' )) def test_unequal_partial_ordered_only_one_ignored_recursive(self): s = SequenceComparison(1, 2, ordered=True, partial=True, recursive=True) assert s != (2, 1, 4) compare(repr(s), expected=( '\n' '\n' 'ignored:\n' '[4]\n\n' 'same:\n' '[]\n\n' 'expected:\n' '[1, 2]\n\n' 'actual:\n' '[2, 1]\n' '' )) def test_unequal_full_ordered(self): s = SequenceComparison(1, 3, 5, ordered=True, partial=False) assert s != (0, 1, 2, 3, 4) compare(repr(s), expected=( '\n' '\n' 'same:\n' '[]\n\n' 'expected:\n' '[1, 3, 5]\n\n' 'actual:\n' '[0, 1, 2, 3, 4]\n' '' )) def test_unequal_partial_ordered_with_prefix(self): s = SequenceComparison('a', 'b', 1, 2, ordered=True, partial=True) assert s != ('a', 'b', 2, 1, 4) compare(repr(s), expected=( '\n' '\n' 'ignored:\n' '[2, 4]\n\n' 'same:\n' "['a', 'b', 1]\n\n" 'expected:\n' '[2]\n\n' 'actual:\n' '[]\n' '' )) def test_unequal_partial_unordered(self): s = SequenceComparison(1, 3, ordered=False, partial=True) assert s != (2, 1, 4) compare(repr(s), expected=( '\n' '\n' 'ignored:\n' '[2, 4]\n\n' 'same:\n' "[1]\n\n" 'in expected but not actual:\n' "[3]\n" '' )) def test_unequal_unordered_duplicates(self): s = SequenceComparison(2, 1, 2, ordered=False, partial=False) assert s != (1, 2) compare(repr(s), expected=( '\n' '\n' 'same:\n' "[2, 1]\n\n" 'in expected but not actual:\n' "[2]\n" '' )) def test_unequal_partial_unordered_duplicates(self): s = SequenceComparison(1, 2, 2, ordered=False, partial=True) assert s != (1, 2) compare(repr(s), expected=( '\n' '\n' 'same:\n' "[1, 2]\n\n" 'in expected but not actual:\n' "[2]\n" '' )) def test_unequal_partial_ordered_duplicates(self): s = SequenceComparison(1, 2, 2, partial=True) assert s != (1, 2) compare(repr(s), expected=( '\n' '\n' 'same:\n' "[1, 2]\n\n" 'expected:\n' '[2]\n\n' 'actual:\n' '[]\n' '' )) def test_unequal_generator(self): s = SequenceComparison(1, 3) assert s != generator(1, 2) compare(repr(s), expected=( '\n' '\n' 'same:\n' "[1]\n\n" 'expected:\n' '[3]\n\n' 'actual:\n' '[2]\n' '' )) def test_unequal_nested(self): s = SequenceComparison({1: 'a', 2: 'b'}, [1, 2], recursive=False) assert s != ({2: 'b', 3: 'c'}, [1, 3]) compare(repr(s), expected=( '\n' '\n' 'same:\n' "[]\n\n" 'expected:\n' "[{1: 'a', 2: 'b'}, [1, 2]]\n\n" 'actual:\n' "[{2: 'b', 3: 'c'}, [1, 3]]\n" '' )) def test_unequal_nested_recursive(self): s = SequenceComparison({1: 'a', 2: 'b'}, [1, 2], recursive=True) assert s != ({2: 'b', 3: 'c'}, [1, 3]) compare(repr(s), expected=( '\n' '\n' 'same:\n' "[]\n\n" 'expected:\n' "[{1: 'a', 2: 'b'}, [1, 2]]\n\n" 'actual:\n' "[{2: 'b', 3: 'c'}, [1, 3]]\n\n" "While comparing [0]: dict not as expected:\n\n" "same:\n" "[2]\n\n" "in expected but not actual:\n" "1: 'a'\n\n" "in actual but not expected:\n" "3: 'c'\n" '' )) def test_unequal_nested_unhashable_unordered(self): s = SequenceComparison({2: True}, {1: True}, {2: True}, {3: True}, ordered=False) assert s != ({1: True}, {2: True}, {4: True}) compare(repr(s), expected=( '\n' '\n' 'same:\n' "[{2: True}, {1: True}]\n\n" 'in expected but not actual:\n' "[{2: True}, {3: True}]\n\n" 'in actual but not expected:\n' "[{4: True}]\n" '' )) def test_unequal_nested_unhashable_unordered_partial(self): s = SequenceComparison({2: True}, {1: True}, {2: True}, {3: True}, ordered=False, partial=True) assert s != ({1: True}, {2: True}, {4: True}) compare(repr(s), expected=( '\n' '\n' 'ignored:\n' "[{4: True}]\n\n" 'same:\n' "[{2: True}, {1: True}]\n\n" 'in expected but not actual:\n' "[{2: True}, {3: True}]\n" '' )) def test_unequal_wrong_order(self): s = SequenceComparison(1, 2, 3) assert s != (1, 3, 2) compare(repr(s), expected=( '\n' '\n' 'same:\n' "[1]\n\n" 'expected:\n' '[2, 3]\n\n' 'actual:\n' '[3, 2]\n' '' )) def test_partial_nothing_specified(self): s = SequenceComparison(partial=True) assert s == {} def test_partial_wrong_type(self): s = SequenceComparison(partial=True) assert s != object() class TestSubset(object): def test_equal(self): assert Subset({1}, {2}) == [{1}, {2}, {3}] def test_unequal(self): assert Subset({1}, {2}) != [{1}] class TestPermutation(object): def test_equal(self): assert Permutation({1}, {2}) == [{2}, {1}] def test_unequal(self): assert Permutation({1}) != [{2}, {1}] testfixtures-6.18.3/testfixtures/tests/test_should_raise.py000066400000000000000000000241621412502526400243750ustar00rootroot00000000000000from textwrap import dedent from testfixtures import Comparison as C, ShouldRaise, should_raise from unittest import TestCase from ..compat import PY3, PY_36_PLUS, PY_37_PLUS, PY2 from ..shouldraise import ShouldAssert class TestShouldAssert(object): def test_no_exception(self): try: with ShouldAssert('foo'): pass except AssertionError as e: assert str(e) == "Expected AssertionError('foo'), None raised!" def test_wrong_exception(self): try: with ShouldAssert('foo'): raise KeyError() except KeyError: pass def test_wrong_text(self): try: with ShouldAssert('foo'): assert False, 'bar' except AssertionError as e: assert str(e) == dedent("""\ --- expected +++ actual @@ -1 +1,2 @@ -foo +bar +assert False""") class TestShouldRaise(TestCase): def test_no_params(self): def to_test(): raise ValueError('wrong value supplied') should_raise(ValueError('wrong value supplied'))(to_test)() def test_no_exception(self): def to_test(): pass with ShouldAssert('ValueError() (expected) != None (raised)'): should_raise(ValueError())(to_test)() def test_wrong_exception(self): def to_test(): raise ValueError('bar') if PY_37_PLUS: expected = "ValueError('foo') (expected) != ValueError('bar') (raised)" else: expected = "ValueError('foo',) (expected) != ValueError('bar',) (raised)" with ShouldAssert(expected): should_raise(ValueError('foo'))(to_test)() def test_only_exception_class(self): def to_test(): raise ValueError('bar') should_raise(ValueError)(to_test)() def test_wrong_exception_class(self): expected_exception = ValueError('bar') def to_test(): raise expected_exception try: should_raise(KeyError)(to_test)() except ValueError as actual_exception: assert actual_exception is expected_exception else: # pragma: no cover self.fail(('Wrong exception raised')) def test_wrong_exception_type(self): expected_exception = ValueError('bar') def to_test(): raise expected_exception try: should_raise(KeyError('foo'))(to_test)() except ValueError as actual_exception: assert actual_exception is expected_exception else: # pragma: no cover self.fail(('Wrong exception raised')) def test_no_supplied_or_raised(self): # effectvely we're saying "something should be raised!" # but we want to inspect s.raised rather than making # an up-front assertion def to_test(): pass with ShouldAssert("No exception raised!"): should_raise()(to_test)() def test_args(self): def to_test(*args): raise ValueError('%s' % repr(args)) should_raise(ValueError('(1,)'))(to_test)(1) def test_kw_to_args(self): def to_test(x): raise ValueError('%s' % x) should_raise(ValueError('1'))(to_test)(x=1) def test_kw(self): def to_test(**kw): raise ValueError('%r' % kw) should_raise(ValueError("{'x': 1}"))(to_test)(x=1) def test_both(self): def to_test(*args, **kw): raise ValueError('%r %r' % (args, kw)) should_raise(ValueError("(1,) {'x': 2}"))(to_test)(1, x=2) def test_method_args(self): class X: def to_test(self, *args): self.args = args raise ValueError() x = X() should_raise(ValueError)(x.to_test)(1, 2, 3) self.assertEqual(x.args, (1, 2, 3)) def test_method_kw(self): class X: def to_test(self, **kw): self.kw = kw raise ValueError() x = X() should_raise(ValueError)(x.to_test)(x=1, y=2) self.assertEqual(x.kw, {'x': 1, 'y': 2}) def test_method_both(self): class X: def to_test(self, *args, **kw): self.args = args self.kw = kw raise ValueError() x = X() should_raise(ValueError)(x.to_test)(1, y=2) self.assertEqual(x.args, (1, )) self.assertEqual(x.kw, {'y': 2}) def test_class_class(self): class Test: def __init__(self, x): # The TypeError is raised due to the mis-matched parameters # so the pass never gets executed pass # pragma: no cover should_raise(TypeError)(Test)() def test_raised(self): with ShouldRaise() as s: raise ValueError('wrong value supplied') self.assertEqual(s.raised, C(ValueError('wrong value supplied'))) def test_catch_baseexception_1(self): with ShouldRaise(SystemExit): raise SystemExit() def test_catch_baseexception_2(self): with ShouldRaise(KeyboardInterrupt): raise KeyboardInterrupt() def test_with_exception_class_supplied(self): with ShouldRaise(ValueError): raise ValueError('foo bar') def test_with_exception_supplied(self): with ShouldRaise(ValueError('foo bar')): raise ValueError('foo bar') def test_with_exception_supplied_wrong_args(self): if PY_37_PLUS: expected = "ValueError('foo') (expected) != ValueError('bar') (raised)" else: expected = "ValueError('foo',) (expected) != ValueError('bar',) (raised)" with ShouldAssert(expected): with ShouldRaise(ValueError('foo')): raise ValueError('bar') def test_neither_supplied(self): with ShouldRaise(): raise ValueError('foo bar') def test_with_no_exception_when_expected(self): if PY_37_PLUS: expected = "ValueError('foo') (expected) != None (raised)" else: expected = "ValueError('foo',) (expected) != None (raised)" with ShouldAssert(expected): with ShouldRaise(ValueError('foo')): pass def test_with_no_exception_when_expected_by_type(self): if PY2: expected = " (expected) != None (raised)" else: expected = " (expected) != None (raised)" with ShouldAssert(expected): with ShouldRaise(ValueError): pass def test_with_no_exception_when_neither_expected(self): with ShouldAssert("No exception raised!"): with ShouldRaise(): pass def test_with_getting_raised_exception(self): e = ValueError('foo bar') with ShouldRaise() as s: raise e assert e is s.raised def test_import_errors_1(self): if PY3: message = "No module named 'textfixtures'" else: message = 'No module named textfixtures.foo.bar' exception = ModuleNotFoundError if PY_36_PLUS else ImportError with ShouldRaise(exception(message)): import textfixtures.foo.bar def test_import_errors_2(self): with ShouldRaise(ImportError('X')): raise ImportError('X') def test_custom_exception(self): class FileTypeError(Exception): def __init__(self, value): self.value = value with ShouldRaise(FileTypeError('X')): raise FileTypeError('X') def test_decorator_usage(self): @should_raise(ValueError('bad')) def to_test(): raise ValueError('bad') to_test() def test_unless_false_okay(self): with ShouldRaise(unless=False): raise AttributeError() def test_unless_false_bad(self): with ShouldAssert("No exception raised!"): with ShouldRaise(unless=False): pass def test_unless_true_okay(self): with ShouldRaise(unless=True): pass def test_unless_true_not_okay(self): expected_exception = AttributeError('foo') try: with ShouldRaise(unless=True): raise expected_exception except AttributeError as actual_exception: assert actual_exception is expected_exception else: # pragma: no cover self.fail(('Wrong exception raised')) def test_unless_decorator_usage(self): @should_raise(unless=True) def to_test(): pass to_test() def test_identical_reprs(self): class AnnoyingException(Exception): def __init__(self, **kw): self.other = kw.get('other') with ShouldAssert( "AnnoyingException not as expected:\n\n" 'attributes same:\n' "['args']\n\n" "attributes differ:\n" "'other': 'bar' (expected) != 'baz' (raised)\n\n" "While comparing .other: 'bar' (expected) != 'baz' (raised)" ): with ShouldRaise(AnnoyingException(other='bar')): raise AnnoyingException(other='baz') def test_identical_reprs_but_args_different(self): if PY2: return class MessageError(Exception): def __init__(self, message, type=None): self.message = message self.type = type def __repr__(self): return 'MessageError({!r}, {!r})'.format(self.message, self.type) with ShouldAssert( "MessageError not as expected:\n\n" 'attributes same:\n' "['message', 'type']\n\n" "attributes differ:\n" "'args': ('foo',) (expected) != ('foo', None) (raised)\n\n" "While comparing .args: sequence not as expected:\n\n" "same:\n" "('foo',)\n\n" "expected:\n" "()\n\n" "raised:\n" "(None,)" ): with ShouldRaise(MessageError('foo')): raise MessageError('foo', None) testfixtures-6.18.3/testfixtures/tests/test_shouldwarn.py000066400000000000000000000114641412502526400241030ustar00rootroot00000000000000from unittest import TestCase import warnings from testfixtures import ( ShouldWarn, compare, ShouldRaise, ShouldNotWarn, Comparison as C ) from testfixtures.compat import PY3, PY_36_PLUS, PY_37_PLUS from testfixtures.shouldraise import ShouldAssert if PY3: warn_module = 'builtins' else: warn_module = 'exceptions' if PY_37_PLUS: comma = '' else: comma = ',' class ShouldWarnTests(TestCase): def test_warn_expected(self): with warnings.catch_warnings(record=True) as backstop: with ShouldWarn(UserWarning('foo')): warnings.warn('foo') compare(len(backstop), expected=0) def test_warn_not_expected(self): with ShouldAssert( "sequence not as expected:\n\n" "same:\n[]\n\n" "expected:\n[]\n\n" "actual:\n[UserWarning('foo'"+comma+")]" ): with warnings.catch_warnings(record=True) as backstop: with ShouldNotWarn(): warnings.warn('foo') compare(len(backstop), expected=0) def test_no_warn_expected(self): with ShouldNotWarn(): pass def test_no_warn_not_expected(self): with ShouldAssert( "sequence not as expected:\n\n" "same:\n[]\n\n" "expected:\n[args: ('foo',)]" "\n\nactual:\n[]" ): with ShouldWarn(UserWarning('foo')): pass def test_filters_removed(self): with warnings.catch_warnings(): warnings.simplefilter("ignore") with ShouldWarn(UserWarning("foo")): warnings.warn('foo') def test_multiple_warnings(self): with ShouldRaise(AssertionError) as s: with ShouldWarn(UserWarning('foo')): warnings.warn('foo') warnings.warn('bar') content = str(s.raised) self.assertTrue('foo' in content) self.assertTrue('bar' in content) def test_minimal_ok(self): with ShouldWarn(UserWarning): warnings.warn('foo') def test_minimal_bad(self): with ShouldAssert( "sequence not as expected:\n\n" "same:\n[]\n\n" "expected:\n" "[wrong type]\n\n" "actual:\n[UserWarning('foo'"+comma+")]" ): with ShouldWarn(DeprecationWarning): warnings.warn('foo') def test_maximal_ok(self): with ShouldWarn(DeprecationWarning('foo')): warnings.warn_explicit( 'foo', DeprecationWarning, 'bar.py', 42, 'bar_module' ) def test_maximal_bad(self): with ShouldAssert( "sequence not as expected:\n\n" "same:\n[]\n\n" "expected:\n[\n" "\n" "attributes differ:\n" "'args': ('bar',) (Comparison) != ('foo',) (actual)\n" "]\n\n" "actual:\n[DeprecationWarning('foo'"+comma+")]" ): with ShouldWarn(DeprecationWarning('bar')): warnings.warn_explicit( 'foo', DeprecationWarning, 'bar.py', 42, 'bar_module' ) def test_maximal_explore(self): with ShouldWarn() as recorded: warnings.warn_explicit( 'foo', DeprecationWarning, 'bar.py', 42, 'bar_module' ) compare(len(recorded), expected=1) expected_attrs = dict( _category_name='DeprecationWarning', category=DeprecationWarning, file=None, filename='bar.py', line=None, lineno=42, message=C(DeprecationWarning('foo')), ) if PY_36_PLUS: expected_attrs['source'] = None compare(expected=C(warnings.WarningMessage, **expected_attrs), actual=recorded[0]) def test_filter_present(self): with ShouldWarn(DeprecationWarning, message="This function is deprecated."): warnings.warn("This utility is deprecated.", DeprecationWarning) warnings.warn("This function is deprecated.", DeprecationWarning) def test_filter_missing(self): if PY3: type_repr = 'builtins.DeprecationWarning' else: type_repr = 'exceptions.DeprecationWarning' with ShouldAssert( "sequence not as expected:\n\n" "same:\n[]\n\n" "expected:\n[]\n\n" "actual:\n[]".format(type_repr) ): with ShouldWarn(DeprecationWarning, message="This function is deprecated."): warnings.warn("This utility is deprecated.", DeprecationWarning) testfixtures-6.18.3/testfixtures/tests/test_stringcomparison.py000066400000000000000000000031051412502526400253070ustar00rootroot00000000000000import re from testfixtures import StringComparison as S, compare from testfixtures.compat import PY2 from unittest import TestCase class Tests(TestCase): def test_equal_yes(self): self.failUnless('on 40220' == S('on \d+')) def test_equal_no(self): self.failIf('on xxx' == S('on \d+')) def test_not_equal_yes(self): self.failIf('on 40220' != S('on \d+')) def test_not_equal_no(self): self.failUnless('on xxx' != S('on \d+')) def test_comp_in_sequence(self): self.failUnless(( 1, 2, 'on 40220' ) == ( 1, 2, S('on \d+') )) def test_not_string(self): self.failIf(40220 == S('on \d+')) def test_repr(self): compare('', repr(S('on \d+'))) def test_str(self): compare('', str(S('on \d+'))) def test_sort(self): a = S('a') b = S('b') c = S('c') compare(sorted(('d', c, 'e', a, 'a1', b)), [a, 'a1', b, c, 'd', 'e']) if PY2: # cmp no longer exists in Python 3! def test_cmp_yes(self): self.failIf(cmp(S('on \d+'), 'on 4040')) def test_cmp_no(self): self.failUnless(cmp(S('on \d+'), 'on xx')) def test_flags_argument(self): compare(S(".*bar", re.DOTALL), actual="foo\nbar") def test_flags_parameter(self): compare(S(".*bar", flags=re.DOTALL), actual="foo\nbar") def test_flags_names(self): compare(S(".*BaR", dotall=True, ignorecase=True), actual="foo\nbar") testfixtures-6.18.3/testfixtures/tests/test_sybil.py000066400000000000000000000104301412502526400230270ustar00rootroot00000000000000from textwrap import dedent from unittest import TestCase from testfixtures.mock import Mock from sybil.document import Document from testfixtures import compare, Comparison as C, TempDirectory from testfixtures.sybil import FileParser, FileBlock class TestFileParser(TestCase): def check_document(self, text, expected): d = Document(dedent(text), path='/dev/null') compare( list(r.parsed for r in FileParser('td')(d)), expected=expected ) def test_multiple_files(self): self.check_document( text=""" .. topic:: file.txt :class: write-file line 1 line 2 line 3 .. topic:: file2.txt :class: read-file line 4 line 5 line 6 """, expected = [ C(FileBlock, path='file.txt', content="line 1\n\nline 2\nline 3\n", action='write'), C(FileBlock, path='file2.txt', content='line 4\n\nline 5\nline 6\n', action='read'), ]) def test_ignore_literal_blocking(self): self.check_document( text=""" .. topic:: file.txt :class: write-file :: line 1 line 2 line 3 """, expected=[ C(FileBlock, path='file.txt', content="line 1\n\nline 2\nline 3\n", action='write'), ]) def test_file_followed_by_text(self): self.check_document( text=""" .. topic:: file.txt :class: write-file .. code-block:: python print "hello" out = 'there' foo = 'bar' This is just some normal text! """, expected=[ C(FileBlock, path='file.txt', content='.. code-block:: python\n\nprint "hello"' '\nout = \'there\'\n\nfoo = \'bar\'\n', action='write'), ]) def test_red_herring(self): self.check_document( text=""" .. topic:: file.txt :class: not-a-file print "hello" out = 'there' """, expected=[] ) def test_no_class(self): self.check_document( text=""" .. topic:: file.txt print "hello" out = 'there' """, expected=[] ) def check_evaluate(self, dir, block, expected): parser = FileParser('td') compare(expected, actual=parser.evaluate(Mock( parsed=block, namespace={'td': dir}, path='/the/file', line=42, ))) def test_evaluate_read_same(self): with TempDirectory() as dir: dir.write('foo', b'content') self.check_evaluate( dir, FileBlock('foo', 'content', 'read'), expected=None ) def test_evaluate_read_difference(self): with TempDirectory() as dir: dir.write('foo', b'actual') self.check_evaluate( dir, FileBlock('foo', 'expected', 'read'), expected=( "--- File '/the/file', line 42:\n" "+++ Reading from \"{}/foo\":\n" "@@ -1 +1 @@\n" "-expected\n" "+actual" ).format(dir.path) ) def test_evaluate_write(self): with TempDirectory() as dir: self.check_evaluate( dir, FileBlock('foo', 'content', 'write'), expected=None ) dir.compare(['foo']) compare(dir.read('foo', 'ascii'), 'content') testfixtures-6.18.3/testfixtures/tests/test_tempdir.py000066400000000000000000000054761412502526400233670ustar00rootroot00000000000000import os from testfixtures.shouldraise import ShouldAssert from testfixtures.mock import Mock from tempfile import mkdtemp from testfixtures import Replacer, TempDirectory, compare, tempdir from unittest import TestCase from ..rmtree import rmtree class TestTempDir(TestCase): @tempdir() def test_simple(self, d): d.write('something', b'stuff') d.write('.svn', b'stuff') d.compare(( '.svn', 'something', )) @tempdir() def test_subdirs(self, d): subdir = ['some', 'thing'] d.write(subdir+['something'], b'stuff') d.write(subdir+['.svn'], b'stuff') d.compare(path=subdir, expected=( '.svn', 'something', )) @tempdir() def test_not_same(self, d): d.write('something', b'stuff') with ShouldAssert( "sequence not as expected:\n" "\n" "same:\n" "()\n" "\n" "expected:\n" "('.svn', 'something')\n" "\n" "actual:\n" "('something',)" ): d.compare(['.svn', 'something']) @tempdir(ignore=('.svn', )) def test_ignore(self, d): d.write('something', b'stuff') d.write('.svn', b'stuff') d.compare(['something']) def test_cleanup_properly(self): r = Replacer() try: m = Mock() d = mkdtemp() m.return_value = d r.replace('testfixtures.tempdirectory.mkdtemp', m) self.failUnless(os.path.exists(d)) self.assertFalse(m.called) @tempdir() def test_method(d): d.write('something', b'stuff') d.compare(['something']) self.assertFalse(m.called) compare(os.listdir(d), []) test_method() self.assertTrue(m.called) self.failIf(os.path.exists(d)) finally: r.restore() if os.path.exists(d): # only runs if the test fails! rmtree(d) # pragma: no cover @tempdir() def test_cleanup_test_okay_with_deleted_dir(self, d): rmtree(d.path) @tempdir() def test_decorator_returns_tempdirectory(self, d): # check for what we get, so we only have to write # tests in test_tempdirectory.py self.failUnless(isinstance(d, TempDirectory)) def test_dont_create_or_cleanup_with_path(self): with Replacer() as r: m = Mock() r.replace('testfixtures.tempdirectory.mkdtemp', m) r.replace('testfixtures.tempdirectory.rmtree', m) @tempdir(path='foo') def test_method(d): compare(d.path, 'foo') test_method() self.assertFalse(m.called) testfixtures-6.18.3/testfixtures/tests/test_tempdirectory.py000066400000000000000000000240651412502526400246100ustar00rootroot00000000000000import os from tempfile import mkdtemp from unittest import TestCase from warnings import catch_warnings from testfixtures.mock import Mock from testfixtures import ( TempDirectory, Replacer, ShouldRaise, compare, OutputCapture ) from ..compat import Unicode, PY3 from ..rmtree import rmtree if PY3: some_bytes = '\xa3'.encode('utf-8') some_text = '\xa3' else: some_bytes = '\xc2\xa3' some_text = '\xc2\xa3'.decode('utf-8') class TestTempDirectory(TestCase): def test_cleanup(self): d = TempDirectory() p = d.path assert os.path.exists(p) is True p = d.write('something', b'stuff') d.cleanup() assert os.path.exists(p) is False def test_cleanup_all(self): d1 = TempDirectory() d2 = TempDirectory() assert os.path.exists(d1.path) is True p1 = d1.path assert os.path.exists(d2.path) is True p2 = d2.path TempDirectory.cleanup_all() assert os.path.exists(p1) is False assert os.path.exists(p2) is False def test_with_statement(self): with TempDirectory() as d: p = d.path assert os.path.exists(p) is True d.write('something', b'stuff') assert os.listdir(p) == ['something'] with open(os.path.join(p, 'something')) as f: assert f.read() == 'stuff' assert os.path.exists(p) is False def test_listdir_sort(self): # pragma: no branch with TempDirectory() as d: d.write('ga', b'') d.write('foo1', b'') d.write('Foo2', b'') d.write('g.o', b'') with OutputCapture() as output: d.listdir() output.compare('Foo2\nfoo1\ng.o\nga') class TempDirectoryTests(TestCase): def test_write_with_slash_at_start(self): with TempDirectory() as d: with ShouldRaise(ValueError( 'Attempt to read or write outside the temporary Directory' )): d.write('/some/folder', 'stuff') def test_makedir_with_slash_at_start(self): with TempDirectory() as d: with ShouldRaise(ValueError( 'Attempt to read or write outside the temporary Directory' )): d.makedir('/some/folder') def test_read_with_slash_at_start(self): with TempDirectory() as d: with ShouldRaise(ValueError( 'Attempt to read or write outside the temporary Directory' )): d.read('/some/folder') def test_listdir_with_slash_at_start(self): with TempDirectory() as d: with ShouldRaise(ValueError( 'Attempt to read or write outside the temporary Directory' )): d.listdir('/some/folder') def test_compare_with_slash_at_start(self): with TempDirectory() as d: with ShouldRaise(ValueError( 'Attempt to read or write outside the temporary Directory' )): d.compare((), path='/some/folder') def test_read_with_slash_at_start_ok(self): with TempDirectory() as d: path = d.write('foo', b'bar') compare(d.read(path), b'bar') def test_dont_cleanup_with_path(self): d = mkdtemp() fp = os.path.join(d, 'test') with open(fp, 'w') as f: f.write('foo') try: td = TempDirectory(path=d) self.assertEqual(d, td.path) td.cleanup() # checks self.assertEqual(os.listdir(d), ['test']) with open(fp) as f: self.assertEqual(f.read(), 'foo') finally: rmtree(d) def test_dont_create_with_path(self): d = mkdtemp() rmtree(d) td = TempDirectory(path=d) self.assertEqual(d, td.path) self.failIf(os.path.exists(d)) def test_deprecated_check(self): with TempDirectory() as d: d.write('x', b'') d.check('x') def test_deprecated_check_dir(self): with TempDirectory() as d: d.write('foo/x', b'') d.check_dir('foo', 'x') def test_deprecated_check_all(self): with TempDirectory() as d: d.write('a/b/c', b'') d.check_all('', 'a/', 'a/b/', 'a/b/c') d.check_all('a', 'b/', 'b/c') def test_compare_sort_actual(self): with TempDirectory() as d: d.write('ga', b'') d.write('foo1', b'') d.write('Foo2', b'') d.write('g.o', b'') d.compare(['Foo2', 'foo1', 'g.o', 'ga']) def test_compare_sort_expected(self): with TempDirectory() as d: d.write('ga', b'') d.write('foo1', b'') d.write('Foo2', b'') d.write('g.o', b'') d.compare(['Foo2', 'ga', 'foo1', 'g.o']) def test_compare_path_tuple(self): with TempDirectory() as d: d.write('a/b/c', b'') d.compare(path=('a', 'b'), expected=['c']) def test_recursive_ignore(self): with TempDirectory(ignore=['.svn']) as d: d.write('.svn/rubbish', b'') d.write('a/.svn/rubbish', b'') d.write('a/b/.svn', b'') d.write('a/b/c', b'') d.write('a/d/.svn/rubbish', b'') d.compare([ 'a/', 'a/b/', 'a/b/c', 'a/d/', ]) def test_files_only(self): with TempDirectory() as d: d.write('a/b/c', b'') d.compare(['a/b/c'], files_only=True) def test_path(self): with TempDirectory() as d: expected1 = d.makedir('foo') expected2 = d.write('baz/bob', b'') expected3 = d.getpath('a/b/c') actual1 = d.getpath('foo') actual2 = d.getpath('baz/bob') actual3 = d.getpath(('a', 'b', 'c')) self.assertEqual(expected1, actual1) self.assertEqual(expected2, actual2) self.assertEqual(expected3, actual3) def test_atexit(self): # http://bugs.python.org/issue25532 from testfixtures.mock import call m = Mock() with Replacer() as r: # make sure the marker is false, other tests will # probably have set it r.replace('testfixtures.TempDirectory.atexit_setup', False) r.replace('atexit.register', m.register) d = TempDirectory() expected = [call.register(d.atexit)] compare(expected, m.mock_calls) with catch_warnings(record=True) as w: d.atexit() self.assertTrue(len(w), 1) compare(str(w[0].message), ( # pragma: no branch "TempDirectory instances not cleaned up by shutdown:\n" + d.path )) d.cleanup() compare(set(), TempDirectory.instances) # check re-running has no ill effects d.atexit() def test_read_decode(self): with TempDirectory() as d: with open(os.path.join(d.path, 'test.file'), 'wb') as f: f.write(b'\xc2\xa3') compare(d.read('test.file', 'utf8'), some_text) def test_read_no_decode(self): with TempDirectory() as d: with open(os.path.join(d.path, 'test.file'), 'wb') as f: f.write(b'\xc2\xa3') compare(d.read('test.file'), b'\xc2\xa3') def test_write_bytes(self): with TempDirectory() as d: d.write('test.file', b'\xc2\xa3') with open(os.path.join(d.path, 'test.file'), 'rb') as f: compare(f.read(), b'\xc2\xa3') def test_write_unicode(self): with TempDirectory() as d: d.write('test.file', some_text, 'utf8') with open(os.path.join(d.path, 'test.file'), 'rb') as f: compare(f.read(), b'\xc2\xa3') def test_write_unicode_bad(self): if PY3: expected = TypeError( "a bytes-like object is required, not 'str'" ) else: expected = UnicodeDecodeError( 'ascii', '\xa3', 0, 1, 'ordinal not in range(128)' ) with TempDirectory() as d: with ShouldRaise(expected): d.write('test.file', Unicode('\xa3')) def test_just_empty_non_recursive(self): with TempDirectory() as d: d.makedir('foo/bar') d.makedir('foo/baz') d.compare(path='foo', expected=['bar', 'baz'], recursive=False) def test_just_empty_dirs(self): with TempDirectory() as d: d.makedir('foo/bar') d.makedir('foo/baz') d.compare(['foo/', 'foo/bar/', 'foo/baz/']) def test_symlink(self): with TempDirectory() as d: d.write('foo/bar.txt', b'x') os.symlink(d.getpath('foo'), d.getpath('baz')) d.compare(['baz/', 'foo/', 'foo/bar.txt']) def test_follow_symlinks(self): with TempDirectory() as d: d.write('foo/bar.txt', b'x') os.symlink(d.getpath('foo'), d.getpath('baz')) d.compare(['baz/', 'baz/bar.txt', 'foo/', 'foo/bar.txt'], followlinks=True) def test_trailing_slash(self): with TempDirectory() as d: d.write('source/foo/bar.txt', b'x') d.compare(path='source/', expected=['foo/', 'foo/bar.txt']) def test_default_encoding(self): encoded = b'\xc2\xa3' decoded = encoded.decode('utf-8') with TempDirectory(encoding='utf-8') as d: d.write('test.txt', decoded) compare(d.read('test.txt'), expected=decoded) def test_override_default_encoding(self): encoded = b'\xc2\xa3' decoded = encoded.decode('utf-8') with TempDirectory(encoding='ascii') as d: d.write('test.txt', decoded, encoding='utf-8') compare(d.read('test.txt', encoding='utf-8'), expected=decoded) testfixtures-6.18.3/testfixtures/tests/test_time.py000066400000000000000000000160031412502526400226450ustar00rootroot00000000000000from datetime import timedelta from unittest import TestCase from testfixtures import test_time, replace, compare, ShouldRaise from .test_datetime import SampleTZInfo class TestTime(TestCase): @replace('time.time', test_time()) def test_time_call(self): from time import time compare(time(), 978307200.0) compare(time(), 978307201.0) compare(time(), 978307203.0) @replace('time.time', test_time(2002, 1, 1, 1, 2, 3)) def test_time_supplied(self): from time import time compare(time(), 1009846923.0) @replace('time.time', test_time(None)) def test_time_sequence(self, t): t.add(2002, 1, 1, 1, 0, 0) t.add(2002, 1, 1, 2, 0, 0) t.add(2002, 1, 1, 3, 0, 0) from time import time compare(time(), 1009846800.0) compare(time(), 1009850400.0) compare(time(), 1009854000.0) @replace('time.time', test_time(None)) def test_add_datetime_supplied(self, t): from datetime import datetime from time import time t.add(datetime(2002, 1, 1, 2)) compare(time(), 1009850400.0) tzinfo = SampleTZInfo() tzrepr = repr(tzinfo) with ShouldRaise(ValueError( 'Cannot add datetime with tzinfo of %s as configured to use None' %( tzrepr ))): t.add(datetime(2001, 1, 1, tzinfo=tzinfo)) def test_instantiate_with_datetime(self): from datetime import datetime t = test_time(datetime(2002, 1, 1, 2)) compare(t(), 1009850400.0) @replace('time.time', test_time(None)) def test_now_requested_longer_than_supplied(self, t): t.add(2002, 1, 1, 1, 0, 0) t.add(2002, 1, 1, 2, 0, 0) from time import time compare(time(), 1009846800.0) compare(time(), 1009850400.0) compare(time(), 1009850401.0) compare(time(), 1009850403.0) @replace('time.time', test_time()) def test_call(self, t): compare(t(), 978307200.0) from time import time compare(time(), 978307201.0) @replace('time.time', test_time()) def test_repr_time(self): from time import time compare(repr(time), "") @replace('time.time', test_time(delta=10)) def test_delta(self): from time import time compare(time(), 978307200.0) compare(time(), 978307210.0) compare(time(), 978307220.0) @replace('time.time', test_time(delta_type='minutes')) def test_delta_type(self): from time import time compare(time(), 978307200.0) compare(time(), 978307260.0) compare(time(), 978307380.0) @replace('time.time', test_time(None)) def test_set(self): from time import time time.set(2001, 1, 1, 1, 0, 1) compare(time(), 978310801.0) time.set(2002, 1, 1, 1, 0, 0) compare(time(), 1009846800.0) compare(time(), 1009846802.0) @replace('time.time', test_time(None)) def test_set_datetime_supplied(self, t): from datetime import datetime from time import time t.set(datetime(2001, 1, 1, 1, 0, 1)) compare(time(), 978310801.0) tzinfo = SampleTZInfo() tzrepr = repr(tzinfo) with ShouldRaise(ValueError( 'Cannot add datetime with tzinfo of %s as configured to use None' %( tzrepr ))): t.set(datetime(2001, 1, 1, tzinfo=tzinfo)) @replace('time.time', test_time(None)) def test_set_kw(self): from time import time time.set(year=2001, month=1, day=1, hour=1, second=1) compare(time(), 978310801.0) @replace('time.time', test_time(None)) def test_set_kw_tzinfo(self): from time import time with ShouldRaise(TypeError('Cannot add using tzinfo on ttime')): time.set(year=2001, tzinfo=SampleTZInfo()) @replace('time.time', test_time(None)) def test_set_args_tzinfo(self): from time import time with ShouldRaise(TypeError('Cannot add using tzinfo on ttime')): time.set(2002, 1, 2, 3, 4, 5, 6, SampleTZInfo()) @replace('time.time', test_time(None)) def test_add_kw(self): from time import time time.add(year=2001, month=1, day=1, hour=1, second=1) compare(time(), 978310801.0) @replace('time.time', test_time(None)) def test_add_tzinfo_kw(self): from time import time with ShouldRaise(TypeError('Cannot add using tzinfo on ttime')): time.add(year=2001, tzinfo=SampleTZInfo()) @replace('time.time', test_time(None)) def test_add_tzinfo_args(self): from time import time with ShouldRaise(TypeError('Cannot add using tzinfo on ttime')): time.add(2001, 1, 2, 3, 4, 5, 6, SampleTZInfo()) @replace('time.time', test_time(2001, 1, 2, 3, 4, 5, 600000)) def test_max_number_args(self): from time import time compare(time(), 978404645.6) def test_max_number_tzinfo(self): with ShouldRaise(TypeError( "You don't want to use tzinfo with test_time" )): test_time(2001, 1, 2, 3, 4, 5, 6, SampleTZInfo()) @replace('time.time', test_time(2001, 1, 2)) def test_min_number_args(self): from time import time compare(time(), 978393600.0) @replace('time.time', test_time( year=2001, month=1, day=2, hour=3, minute=4, second=5, microsecond=6, )) def test_all_kw(self): from time import time compare(time(), 978404645.000006) def test_kw_tzinfo(self): with ShouldRaise(TypeError( "You don't want to use tzinfo with test_time" )): test_time(year=2001, tzinfo=SampleTZInfo()) def test_instance_tzinfo(self): from datetime import datetime with ShouldRaise(TypeError( "You don't want to use tzinfo with test_time" )): test_time(datetime(2001, 1, 1, tzinfo=SampleTZInfo())) def test_subsecond_deltas(self): time = test_time(delta=0.5) compare(time(), 978307200.0) compare(time(), 978307200.5) compare(time(), 978307201.0) def test_ms_deltas(self): time = test_time(delta=1000, delta_type='microseconds') compare(time(), 978307200.0) compare(time(), 978307200.001) compare(time(), 978307200.002) def test_tick_when_static(self): time = test_time(delta=0) compare(time(), expected=978307200.0) time.tick(seconds=1) compare(time(), expected=978307201.0) def test_tick_when_dynamic(self): # hopefully not that common? time = test_time() compare(time(), expected=978307200.0) time.tick(seconds=1) compare(time(), expected=978307202.0) def test_tick_with_timedelta_instance(self): time = test_time(delta=0) compare(time(), expected=978307200.0) time.tick(timedelta(seconds=1)) compare(time(), expected=978307201.0) testfixtures-6.18.3/testfixtures/tests/test_twisted.py000066400000000000000000000132061412502526400233740ustar00rootroot00000000000000from twisted.logger import Logger, formatEvent from twisted.python.failure import Failure from twisted.trial.unittest import TestCase from testfixtures import compare, ShouldRaise, StringComparison as S, ShouldAssert from testfixtures.compat import PY3 from testfixtures.twisted import LogCapture, INFO log = Logger() class TestLogCapture(TestCase): def test_simple(self): capture = LogCapture.make(self) log.info('er, {greeting}', greeting='hi') capture.check((INFO, 'er, hi')) def test_captured(self): capture = LogCapture.make(self) log.info('er, {greeting}', greeting='hi') assert len(capture.events) == 1 compare(capture.events[0]['log_namespace'], expected='testfixtures.tests.test_twisted') def test_fields(self): capture = LogCapture.make(self, fields=('a', 'b')) log.info('{a}, {b}', a=1, b=2) log.info('{a}, {b}', a=3, b=4) capture.check( [1, 2], [3, 4], ) def test_field(self): capture = LogCapture.make(self, fields=(formatEvent,)) log.info('er, {greeting}', greeting='hi') capture.check('er, hi') def test_check_failure_test_minimal(self): capture = LogCapture.make(self) try: raise Exception('all gone wrong') except: log.failure('oh dear') capture.check_failure_text('all gone wrong') self.flushLoggedErrors() def test_check_failure_test_maximal(self): capture = LogCapture.make(self) try: raise TypeError('all gone wrong') except: log.failure('oh dear') log.info("don't look at me...") capture.check_failure_text(str(TypeError), index=0, attribute='type') self.flushLoggedErrors() self.flushLoggedErrors() def test_raise_logged_failure(self): capture = LogCapture.make(self) try: raise TypeError('all gone wrong') except: log.failure('oh dear') with ShouldRaise(Failure) as s: capture.raise_logged_failure() compare(s.raised.value, expected=TypeError('all gone wrong')) self.flushLoggedErrors() def test_raise_later_logged_failure(self): capture = LogCapture.make(self) try: raise ValueError('boom!') except: log.failure('oh dear') try: raise TypeError('all gone wrong') except: log.failure('what now?!') with ShouldRaise(Failure) as s: capture.raise_logged_failure(start_index=1) compare(s.raised.value, expected=TypeError('all gone wrong')) self.flushLoggedErrors() def test_order_doesnt_matter_ok(self): capture = LogCapture.make(self) log.info('Failed to send BAR') log.info('Sent FOO, length 1234') log.info('Sent 1 Messages') capture.check( (INFO, S('Sent FOO, length \d+')), (INFO, 'Failed to send BAR'), (INFO, 'Sent 1 Messages'), order_matters=False ) def test_order_doesnt_matter_failure(self): capture = LogCapture.make(self) log.info('Failed to send BAR') log.info('Sent FOO, length 1234') log.info('Sent 1 Messages') with ShouldAssert( "entries not as expected:\n" "\n" "expected and found:\n" "[(, 'Failed to send BAR'), (, 'Sent 1 Messages')]\n" "\n" "expected but not found:\n" "[(, )]\n" "\n" "other entries:\n" "[(, {}'Sent FOO, length 1234')]".format('' if PY3 else 'u') ): capture.check( (INFO, S('Sent FOO, length abc')), (INFO, 'Failed to send BAR'), (INFO, 'Sent 1 Messages'), order_matters=False ) def test_order_doesnt_matter_extra_in_expected(self): capture = LogCapture.make(self) log.info('Failed to send BAR') log.info('Sent FOO, length 1234') with ShouldAssert( "entries not as expected:\n" "\n" "expected and found:\n" "[(, 'Failed to send BAR'),\n" " (, )]\n" "\n" "expected but not found:\n" "[(, 'Sent 1 Messages')]\n" "\n" "other entries:\n" "[]" ): capture.check( (INFO, S('Sent FOO, length 1234')), (INFO, 'Failed to send BAR'), (INFO, 'Sent 1 Messages'), order_matters=False ) def test_order_doesnt_matter_extra_in_actual(self): capture = LogCapture.make(self) log.info('Failed to send BAR') log.info('Sent FOO, length 1234') log.info('Sent 1 Messages') with ShouldAssert( "entries not as expected:\n" "\n" "expected and found:\n" "[(, 'Failed to send BAR'), (, 'Sent 1 Messages')]\n" "\n" "expected but not found:\n" "[(, )]\n" "\n" "other entries:\n" "[(, {}'Sent FOO, length 1234')]".format('' if PY3 else 'u') ): capture.check( (INFO, S('Sent FOO, length abc')), (INFO, 'Failed to send BAR'), (INFO, 'Sent 1 Messages'), order_matters=False ) testfixtures-6.18.3/testfixtures/tests/test_wrap.py000066400000000000000000000146601412502526400226670ustar00rootroot00000000000000from unittest import TestCase from testfixtures.mock import Mock, MagicMock, patch, DEFAULT from testfixtures import wrap, compare, log_capture, LogCapture class TestWrap(TestCase): def test_wrapping(self): m = Mock() @wrap(m.before, m.after) def test_function(r): m.test() return 'something' compare(m.method_calls, []) compare(test_function(), 'something') compare(m.method_calls, [ ('before', (), {}), ('test', (), {}), ('after', (), {}) ]) def test_wrapping_only_before(self): before = Mock() @wrap(before) def test_function(): return 'something' self.assertFalse(before.called) compare(test_function(), 'something') compare(before.call_count, 1) def test_wrapping_wants_return(self): m = Mock() m.before.return_value = 'something' @wrap(m.before, m.after) def test_function(r): m.test(r) return 'r:'+r compare(m.method_calls, []) compare(test_function(), 'r:something') compare(m.method_calls, [ ('before', (), {}), ('test', ('something', ), {}), ('after', (), {}) ]) def test_wrapping_wants_arguments(self): # This only works in python 2.5+, for # earlier versions, you'll have to come # up with your own `partial` class... from functools import partial m = Mock() @wrap(partial(m.before, 1, x=2), partial(m.after, 3, y=4)) def test_function(r): m.test() return 'something' compare(m.method_calls, []) compare(test_function(), 'something') compare(m.method_calls, [ ('before', (1, ), {'x': 2}), ('test', (), {}), ('after', (3, ), {'y': 4}) ]) def test_multiple_wrappers(self): m = Mock() @wrap(m.before2, m.after2) @wrap(m.before1, m.after1) def test_function(): m.test_function() return 'something' compare(m.method_calls, []) compare(test_function(), 'something') compare(m.method_calls, [ ('before1', (), {}), ('before2', (), {}), ('test_function', (), {}), ('after2', (), {}), ('after1', (), {}), ]) def test_multiple_wrappers_wants_return(self): m = Mock() m.before1.return_value = 1 m.before2.return_value = 2 @wrap(m.before2, m.after2) @wrap(m.before1, m.after1) def test_function(r1, r2): m.test_function(r1, r2) return 'something' compare(m.method_calls, []) compare(test_function(), 'something') compare(m.method_calls, [ ('before1', (), {}), ('before2', (), {}), ('test_function', (1, 2), {}), ('after2', (), {}), ('after1', (), {}), ]) def test_multiple_wrappers_only_want_first_return(self): m = Mock() m.before1.return_value = 1 @wrap(m.before2, m.after2) @wrap(m.before1, m.after1) def test_function(r1): m.test_function(r1) return 'something' compare(m.method_calls, []) compare(test_function(), 'something') compare(m.method_calls, [ ('before1', (), {}), ('before2', (), {}), ('test_function', (1, ), {}), ('after2', (), {}), ('after1', (), {}), ]) def test_wrap_method(self): m = Mock() class T: @wrap(m.before, m.after) def method(self): m.method() T().method() compare(m.method_calls, [ ('before', (), {}), ('method', (), {}), ('after', (), {}) ]) def test_wrap_method_wants_return(self): m = Mock() m.before.return_value = 'return' class T: @wrap(m.before, m.after) def method(self, r): m.method(r) T().method() compare(m.method_calls, [ ('before', (), {}), ('method', ('return', ), {}), ('after', (), {}) ]) def test_wrapping_different_functions(self): m = Mock() @wrap(m.before1, m.after1) def test_function1(): m.something1() return 'something1' @wrap(m.before2, m.after2) def test_function2(): m.something2() return 'something2' compare(m.method_calls, []) compare(test_function1(), 'something1') compare(m.method_calls, [ ('before1', (), {}), ('something1', (), {}), ('after1', (), {}) ]) compare(test_function2(), 'something2') compare(m.method_calls, [ ('before1', (), {}), ('something1', (), {}), ('after1', (), {}), ('before2', (), {}), ('something2', (), {}), ('after2', (), {}) ]) def test_wrapping_local_vars(self): m = Mock() @wrap(m.before, m.after) def test_function(): something = 1 m.test() return 'something' compare(m.method_calls, []) compare(test_function(), 'something') compare(m.method_calls, [ ('before', (), {}), ('test', (), {}), ('after', (), {}) ]) def test_wrapping__name__(self): m = Mock() @wrap(m.before, m.after) def test_function(): pass # pragma: no cover compare(test_function.__name__, 'test_function') def test_our_wrap_dealing_with_mock_patch(self): @patch.multiple('testfixtures.tests.sample1', X=DEFAULT) @log_capture() def patched(log, X): from testfixtures.tests.sample1 import X as imported_X assert isinstance(log, LogCapture) assert isinstance(X, MagicMock) assert imported_X is X patched() def test_patch_with_dict(self): @patch('testfixtures.tests.sample1.X', {'x': 1}) @log_capture() def patched(log): assert isinstance(log, LogCapture) from testfixtures.tests.sample1 import X assert X == {'x': 1} patched() testfixtures-6.18.3/testfixtures/twisted.py000066400000000000000000000113031412502526400211670ustar00rootroot00000000000000""" Tools for helping to test Twisted applications. """ from __future__ import absolute_import from pprint import pformat from . import compare from twisted.logger import globalLogPublisher, formatEvent, LogLevel class LogCapture(object): """ A helper for capturing stuff logged using Twisted's loggers. :param fields: A sequence of field names that :meth:`~LogCapture.check` will use to build "actual" events to compare against the expected events passed in. If items are strings, they will be treated as keys info the Twisted logging event. If they are callable, they will be called with the event as their only parameter. If only one field is specified, "actual" events will just be that one field; otherwise they will be a tuple of the specified fields. """ def __init__(self, fields=('log_level', formatEvent,)): #: The list of events captured. self.events = [] self.fields = fields def __call__(self, event): self.events.append(event) def install(self): "Start capturing." self.original_observers = globalLogPublisher._observers globalLogPublisher._observers = [self] def uninstall(self): "Stop capturing." globalLogPublisher._observers = self.original_observers def check(self, *expected, **kw): """ Check captured events against those supplied. Please see the ``fields`` parameter to the constructor to see how "actual" events are built. :param order_matters: This defaults to ``True``. If ``False``, the order of expected logging versus actual logging will be ignored. """ order_matters = kw.pop('order_matters', True) assert not kw, 'order_matters is the only keyword parameter' actual = [] for event in self.events: actual_event = tuple(field(event) if callable(field) else event.get(field) for field in self.fields) if len(actual_event) == 1: actual_event = actual_event[0] actual.append(actual_event) if order_matters: compare(expected=expected, actual=actual) else: expected = list(expected) matched = [] unmatched = [] for entry in actual: try: index = expected.index(entry) except ValueError: unmatched.append(entry) else: matched.append(expected.pop(index)) if expected: raise AssertionError(( 'entries not as expected:\n\n' 'expected and found:\n%s\n\n' 'expected but not found:\n%s\n\n' 'other entries:\n%s' ) % (pformat(matched), pformat(expected), pformat(unmatched))) def check_failure_text(self, expected, index=-1, attribute='value'): """ Check the string representation of an attribute of a logged :class:`Failure` is as expected. :param expected: The expected string representation. :param index: The index into :attr:`events` where the failure should have been logged. :param attribute: The attribute of the failure of which to find the string representation. """ compare(expected, actual=str(getattr(self.events[index]['log_failure'], attribute))) def raise_logged_failure(self, start_index=0): """ A debugging tool that raises the first failure encountered in captured logging. :param start_index: The index into :attr:`events` from where to start looking for failures. """ for event in self.events[start_index:]: failure = event.get('log_failure') if failure: raise failure @classmethod def make(cls, testcase, **kw): """ Instantiate, install and add a cleanup for a :class:`LogCapture`. :param testcase: This must be an instance of :class:`twisted.trial.unittest.TestCase`. :param kw: Any other parameters are passed directly to the :class:`LogCapture` constructor. :return: The :class:`LogCapture` instantiated by this method. """ capture = cls(**kw) capture.install() testcase.addCleanup(capture.uninstall) return capture #: Short reference to Twisted's ``LogLevel.debug`` DEBUG = LogLevel.debug #: Short reference to Twisted's ``LogLevel.info`` INFO = LogLevel.info #: Short reference to Twisted's ``LogLevel.warn`` WARN = LogLevel.warn #: Short reference to Twisted's ``LogLevel.error`` ERROR = LogLevel.error #: Short reference to Twisted's ``LogLevel.critical`` CRITICAL = LogLevel.critical testfixtures-6.18.3/testfixtures/utils.py000066400000000000000000000054631412502526400206560ustar00rootroot00000000000000import sys from functools import wraps from textwrap import dedent try: from inspect import getfullargspec as getargspec except ImportError: from inspect import getargspec from . import singleton from .compat import ClassType DEFAULT = singleton('DEFAULT') defaults = [DEFAULT] try: from .mock import DEFAULT except ImportError: # pragma: no cover pass else: defaults.append(DEFAULT) def generator(*args): """ A utility function for creating a generator that will yield the supplied arguments. """ for i in args: yield i class Wrapping: attribute_name = None new = DEFAULT def __init__(self, before, after): self.before, self.after = before, after def __enter__(self): return self.before() def __exit__(self, exc_type=None, exc_val=None, exc_tb=None): if self.after is not None: self.after() def wrap(before, after=None): """ A decorator that causes the supplied callables to be called before or after the wrapped callable, as appropriate. """ wrapping = Wrapping(before, after) def wrapper(func): if hasattr(func, 'patchings'): func.patchings.append(wrapping) return func @wraps(func) def patched(*args, **keywargs): extra_args = [] entered_patchers = [] to_add = len(getargspec(func).args[len(args):]) added = 0 exc_info = (None, None, None) try: for patching in patched.patchings: arg = patching.__enter__() entered_patchers.append(patching) if patching.attribute_name is not None: keywargs.update(arg) elif patching.new in defaults and added < to_add: extra_args.append(arg) added += 1 args += tuple(extra_args) return func(*args, **keywargs) except: # Pass the exception to __exit__ exc_info = sys.exc_info() # re-raise the exception raise finally: for patching in reversed(entered_patchers): patching.__exit__(*exc_info) patched.patchings = [wrapping] return patched return wrapper def extend_docstring(docstring, objs): for obj in objs: try: obj.__doc__ = dedent(obj.__doc__) + docstring except (AttributeError, TypeError): # python 2 or pypy 4.0.1 :-( pass def indent(text, indent_size = 2): indented = [] for do_indent, line in enumerate(text.splitlines(True)): if do_indent: line = ' '*indent_size + line indented.append(line) return ''.join(indented) testfixtures-6.18.3/testfixtures/version.txt000066400000000000000000000000071412502526400213570ustar00rootroot000000000000006.18.3