pax_global_header00006660000000000000000000000064146474501550014525gustar00rootroot0000000000000052 comment=1c63bda9248e2d5194889012f715c5c22dae6687 pydash-8.0.3/000077500000000000000000000000001464745015500130255ustar00rootroot00000000000000pydash-8.0.3/.github/000077500000000000000000000000001464745015500143655ustar00rootroot00000000000000pydash-8.0.3/.github/workflows/000077500000000000000000000000001464745015500164225ustar00rootroot00000000000000pydash-8.0.3/.github/workflows/main.yml000066400000000000000000000022611464745015500200720ustar00rootroot00000000000000name: Main on: [push, pull_request] jobs: test: name: Test runs-on: ubuntu-latest strategy: matrix: python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] steps: - name: Checkout uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | pip install --upgrade pip setuptools pip install --upgrade tox-gh-actions coveralls - name: Run tests run: | tox - name: Send coverage report run: | coveralls env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} COVERALLS_FLAG_NAME: ${{ matrix.python-version }} COVERALLS_PARALLEL: true COVERALLS_SERVICE_NAME: github coveralls: name: Finish Coveralls needs: test runs-on: ubuntu-latest container: python:3-slim steps: - name: Finished run: | pip install --upgrade coveralls coveralls --finish env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} pydash-8.0.3/.gitignore000066400000000000000000000022441464745015500150170ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ cover/ junit.xml # Translations *.mo *.pot # Django stuff: *.log # Sphinx documentation docs/_build/ # PyBuilder .pybuilder/ target/ # IPython profile_default/ ipython_config.py # pyenv .python-version # PEP 582 __pypackages__/ # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # pytype static type analyzer .pytype/ # Mr Developer .mr.developer.cfg .project .pydevproject .idea .DS_Store # NodeJS node_modules pydash-8.0.3/.readthedocs.yaml000066400000000000000000000011161464745015500162530ustar00rootroot00000000000000# Read the Docs configuration file for Sphinx projects # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Set the OS, Python version and other tools you might need build: os: ubuntu-22.04 tools: python: "3.11" # Build documentation in the "docs/" directory with Sphinx sphinx: configuration: docs/conf.py fail_on_warning: true # Declare the Python requirements required to build documentation # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html python: install: - requirements: requirements.txt pydash-8.0.3/AUTHORS.rst000066400000000000000000000025641464745015500147130ustar00rootroot00000000000000Authors ======= Lead ---- - Derrick Gilland, dgilland@gmail.com, `dgilland@github `_ Contributors ------------ - Nathan Cahill, nathan@nathancahill.com, `nathancahill@github `_ - Klaus Sevensleeper, k7sleeper@gmail.com, `k7sleeper@github `_ - Bharadwaj Yarlagadda, yarlagaddabharadwaj@gmail.com, `bharadwajyarlagadda@github `_ - Michael James, `urbnjamesmi1@github `_ - Tim Griesser, tgriesser@gmail.com, `tgriesser@github `_ - Shaun Patterson, `shaunpatterson@github `_ - Brian Beck, `beck3905@github `_ - Frank Epperlein, `efenka@github `_ - Joshua Wilson, `jwilson8767@github `_ - Eli Jose, `elijose55@github `_ - Gonzalo Naveira, `gonzalonaveira@github `_ - Wenbo Zhao, zhaowb@gmail.com, `zhaowb@github `_ - Mervyn Lee, `mervynlee94@github `_ - Weineel Lee, `weineel@github `_ - bl4ckst0ne@github `bl4ckst0ne@github `_ - Thomas `DeviousStoat@github `_ pydash-8.0.3/CHANGELOG.rst000066400000000000000000001237311464745015500150550ustar00rootroot00000000000000.. _changelog: Changelog ========= v8.0.3 (2024-07-22) ------------------- - Specify explicit type parameters for all type annotations. Thanks DeviousStoat_! v8.0.2 (2024-07-21) ------------------- - Fix typing issue in ``map_`` for compatability with ``chain``. Thanks DeviousStoat_! v8.0.1 (2024-04-26) ------------------- - Fix issue where too many arguments were passed to stdlib's ``operator.attrgetter``, ``operator.itemgetter``, and ``operator.methodcaller`` when instances of those classes were used as callbacks to functions like ``map_``, ``filter_``, etc. due to a bug introduced in Python 3.12.3 and 3.11.9 that reported an incorrect signature for those ``operator`` class instances. v8.0.0 (2024-03-26) ------------------- - Add functions (Thanks DeviousStoat_!): - ``apply`` (previously named ``thru``) - ``apply_catch`` - ``apply_if`` - ``apply_if_not_none`` - ``eq_cmp`` - ``gt_cmp`` - ``gte_cmp`` - ``in_range_cmp`` - ``is_equal_cmp`` - ``is_equal_with_cmp`` - ``is_instance_of_cmp`` - ``is_match_cmp`` - ``is_match_with_cmp`` - ``is_monotone_cmp`` - ``lt_cmp`` - ``lte_cmp`` - Rename function ``thru`` to ``apply``. Thanks DeviousStoat_! (**breaking change**) - Changed ``zip_``, ``unzip``, ``zip_with``, ``unzip_with`` and ``to_pairs`` to accept iterables of tuples instead of lists and return lists of tuples instead of lists of lists. Thanks DeviousStoat_! (**breaking change**) - Fixed bug in ``divide``, ``multiply``, and ``subtract`` that returned the wrong result when ``0`` was used as one of the operation values. Thanks DeviousStoat_! v7.0.7 (2024-01-27) ------------------- - Fix bug in function wrappers that incorrectly computed the number of arguments the wrapped function could handle. Thanks DeviousStoat_! - Fix bug in ``set_`` where the incorrect object type, list instead of dict, was initialized on class attributes. Thanks DeviousStoat_! - Drop support for Python 3.7. v7.0.6 (2023-07-29) ------------------- - Fix typing for chaining interface for methods that use varargs. Thanks DeviousStoat_! v7.0.5 (2023-07-06) ------------------- - Fix typing for ``find_index`` and ``find_last_index`` by allowing ``predicate`` argument to be callback shorthand values. Thanks DeviousStoat_! v7.0.4 (2023-06-02) ------------------- - Exclude incompatible ``typing-extensions`` version ``4.6.0`` from install requirements. Incompatibility was fixed in ``4.6.1``. v7.0.3 (2023-05-04) ------------------- - Fix typing for ``difference_by``, ``intersection_by``, ``union_by``, ``uniq_by``, and ``xor_by`` by allowing ``iteratee`` argument to be `Any`. Thanks DeviousStoat_! v7.0.2 (2023-04-27) ------------------- - Fix issue where using ``pyright`` as a type checker with ``reportPrivateUsage=true`` would report errors that objects are not exported from ``pydash``. Thanks DeviousStoat_! v7.0.1 (2023-04-13) ------------------- - Fix missing install dependency, ``typing-extensions``, for package. v7.0.0 (2023-04-11) ------------------- - Add type annotations to package. Raise an issue for any typing issues at https://github.com/dgilland/pydash/issues. Thanks DeviousStoat_! (**breaking change**) - Change behavior of ``to_dict`` to not using ``dict()`` internally. Previous behavior would be for something like ``to_dict([["k", "v"], ["x", "y"]])`` to return ``{"k": "v", "x": "y"}`` (equivalent to calling ``dict(...)``) but ``to_dict([["k"], ["v"], ["x"], ["y"]])`` would return ``{0: ["x"], 1: ["v"], 2: ["x"], 3: ["y"]}``. The new behavior is to always return iterables as dictionaries with their indexes as keys like ``{0: ["k", "v"], 1: ["x", "y"]}``. This is consistent with how iterable objects are iterated over and means that ``to_dict`` will have more reliable output. (**breaking change**) - Change behavior of ``slugify`` to remove single-quotes from output. Instead of ``slugify("the cat's meow") == "the-cat's-meow"``, the new behavior is to return ``"the-cats-meow"``. (**breaking change**) - Add support for negative indexes in ``get`` path keys. Thanks bl4ckst0ne_! v6.0.2 (2023-02-23) ------------------- - Only prevent access to object paths containing ``__globals__`` or ``__builtins__`` instead of all dunder-methods for non-dict/list objects. v6.0.1 (2023-02-20) ------------------- - Fix exception raised due to mishandling of non-string keys in functions like ``get()`` for non-dict/list objects that used integer index references like ``"[0]"``. v6.0.0 (2023-01-28) ------------------- - Prevent access to object paths containing dunder-methods in functions like ``get()`` for non-dict/list objects. Attempting to access dunder-methods using get-path keys will raise a ``KeyError`` (e.g. ``get(SomeClass(), '__init__'`` will raise). Access to dict keys are unaffected (e.g. ``get({"__init__": True}, "__init__")`` will return ``True``). (**breaking change**) - Add support for Python 3.11. - Drop support for Python 3.6 (**breaking change**) v5.1.2 (2022-11-30) ------------------- - Remove unnecessary type check and conversion for ``exceptions`` argument in ``pydash.retry``. v5.1.1 (2022-09-23) ------------------- - Add support for Python 3.10. - Fix timing assertion issue in test for ``pydash.delay`` where it could fail on certain environments. v5.1.0 (2021-10-02) ------------------- - Support matches-style callbacks on non-dictionary objects that are compatible with ``pydash.get`` in functions like ``pydash.find``. v5.0.2 (2021-07-15) ------------------- - Fix compatibility issue between ``pydash.py_`` / ``pydash._`` and ``typing.Protocol`` + ``typing.runtime_checkable`` that caused an exception to be raised for ``isinstance(py_, SomeRuntimeCheckableProtocol)``. v5.0.1 (2021-06-27) ------------------- - Fix bug in ``merge_with`` that prevented custom iteratee from being used when recursively merging. Thanks weineel_! v5.0.0 (2021-03-29) ------------------- - Drop support for Python 2.7. (**breaking change**) - Improve Unicode word splitting in string functions to be inline with Lodash. Thanks mervynlee94_! (**breaking change**) - ``camel_case`` - ``human_case`` - ``kebab_case`` - ``lower_case`` - ``pascal_case`` - ``separator_case`` - ``slugify`` - ``snake_case`` - ``start_case`` - ``upper_case`` - Optimize regular expression constants used in ``pydash.strings`` by pre-compiling them to regular expression pattern objects. v4.9.3 (2021-03-03) ------------------- - Fix regression introduced in ``v4.8.0`` that caused ``merge`` and ``merge_with`` to raise an exception when passing ``None`` as the first argument. v4.9.2 (2020-12-24) ------------------- - Fix regression introduced in ``v4.9.1`` that broke ``pydash.get`` for dictionaries and dot-delimited keys that reference integer dict-keys. v4.9.1 (2020-12-14) ------------------- - Fix bug in ``get/has`` that caused ``defaultdict`` objects to get populated on key access. v4.9.0 (2020-10-27) ------------------- - Add ``default_to_any``. Thanks gonzalonaveira_! - Fix mishandling of key names containing ``\.`` in ``set_``, ``set_with``, and ``update_with`` where the ``.`` was not treated as a literal value within the key name. Thanks zhaowb_! v4.8.0 (2020-06-13) ------------------- - Support attribute based setters like ``argparse.Namespace`` in ``set_``, ``set_with``, ``update``, and ``update_with``. - Fix exception in ``order_by`` when ``None`` used as a sort key. Thanks elijose55_! - Fix behavior of ``pick_by`` to return the passed in argument when only one argument given. Previously, an empty dictionary was returned. Thanks elijose55_! - Officially support Python 3.8. v4.7.6 (2019-11-20) ------------------- Bug Fixes +++++++++ - Fix handling of ``Sequence``, ``Mapping``, and ``namedtuple`` types in ``get`` so that their attributes aren't accessed during look-up. Thanks jwilson8767_! v4.7.5 (2019-05-21) ------------------- Bug Fixes +++++++++ - Fix handling of string and byte values in ``clone_with`` and ``clone_deep_with`` when a customizer is used. - Fix handling of non-indexable iterables in ``find`` and ``find_last``. v4.7.4 (2018-11-14) ------------------- Bug Fixes +++++++++ - Raise an explicit exception in ``set_``, ``set_with``, ``update``, and ``update_with`` when an object cannot be updated due to that object or one of its nested objects not being subscriptable. v4.7.3 (2018-08-07) ------------------- Bug Fixes +++++++++ - Fix bug in ``spread`` where arguments were not being passed to wrapped function properly. v4.7.1 (2018-08-03) ------------------- New Features ++++++++++++ - Modify ``to_dict`` to first try to convert using ``dict()`` before falling back to using ``pydash.helpers.iterator()``. v4.7.0 (2018-07-26) ------------------- Misc ++++ - Internal code optimizations. v4.6.1 (2018-07-16) ------------------- Misc ++++ - Support Python 3.7. v4.6.0 (2018-07-10) ------------------- Misc ++++ - Improve performance of the following functions for large datasets: - ``duplicates`` - ``sorted_uniq`` - ``sorted_uniq_by`` - ``union`` - ``union_by`` - ``union_with`` - ``uniq`` - ``uniq_by`` - ``uniq_with`` - ``xor`` - ``xor_by`` - ``xor_with`` v4.5.0 (2018-03-20) ------------------- New Features ++++++++++++ - Add ``jitter`` argument to ``retry``. v4.4.1 (2018-03-14) ------------------- New Features ++++++++++++ - Add ``attempt`` argument to ``on_exception`` callback in ``retry``. New function signature is ``on_exception(exc, attempt)`` (previously was ``on_exception(exc)``). All arguments to ``on_exception`` callback are now optional. v4.4.0 (2018-03-13) ------------------- New Features ++++++++++++ - Add ``retry`` decorator that will retry a function multiple times if the function raises an exception. v4.3.3 (2018-03-02) ------------------- Bug Fixes +++++++++ - Fix regression in ``v4.3.2`` introduced by the support added for callable class callbacks that changed the handling of callbacks that could not be inspected. Prior to ``v4.3.2``, these callbacks would default to being passed a single callback argument, but with ``v4.3.2`` these callbacks would be passed the full set of callback arguments which could result an exception being raised due to the callback not supporting that many arguments. v4.3.2 (2018-02-06) ------------------- Bug Fixes +++++++++ - Fix issue in ``defaults_deep`` where sources with non-dict values would raise an exception due to assumption that object was always a dict. - Fix issue in ``curry`` where too many arguments would be passed to the curried function when evaluating function if too many arguments used in last function call. - Workaround issue in Python 2.7 where callable classes used as callbacks were always passed the full count of arguments even when the callable class only accept a subset of arguments. v4.3.1 (2017-12-19) ------------------- Bug Fixes +++++++++ - Fix ``set_with`` so that callable values are not called when being set. This bug also impacted the following functions by proxy: - ``pick`` - ``pick_by`` - ``set_`` - ``transpose`` - ``zip_object_deep`` v4.3.0 (2017-11-22) ------------------- New Features ++++++++++++ - Add ``nest``. - Wrap non-iterables in a list in ``to_list`` instead of raising an exception. Thanks efenka_! - Add ``split_strings`` argument to ``to_list`` to control whether strings are coverted to a list (``split_strings=True``) or wrapped in a list (``split_strings=False``). Default is ``split_strings=True``. Thanks efenka_! v4.2.1 (2017-09-08) ------------------- Bug Fixes +++++++++ - Ensure that ``to_path`` always returns a ``list``. - Fix ``get`` to work with path values other than just strings, integers, and lists. v4.2.0 (2017-09-08) ------------------- New Features ++++++++++++ - Support more iterator "hooks" in ``to_dict`` so non-iterators that expose an ``items()``, ``iteritems()``, or has ``__dict__`` attributes will be converted using those methods. - Support deep paths in ``omit`` and ``omit_by``. Thanks beck3905_! - Support deep paths in ``pick`` and ``pick_by``. Thanks beck3905_! Bug Fixes +++++++++ - Fix missing argument passing to matched function in ``cond``. - Support passing a single list of pairs in ``cond`` instead of just pairs as separate arguments. v4.1.0 (2017-06-09) ------------------- New Features ++++++++++++ - Officially support Python 3.6. - Add ``properties`` function that returns list of path values for an object. - Add ``replace_end``. - Add ``replace_start``. - Make ``iteratee`` support ``properties``-style callback when a ``tuple`` is passed. - Make ``replace`` accept ``from_start`` and ``from_end`` arguments to limit replacement to start and/or end of string. Bug Fixes +++++++++ - None v4.0.4 (2017-05-31) ------------------- New Features ++++++++++++ - None Bug Fixes +++++++++ - Improve performance of ``get``. Thanks shaunpatterson_! v4.0.3 (2017-04-20) ------------------- New Features ++++++++++++ - None Bug Fixes +++++++++ - Fix regression in ``get`` where ``list`` and ``dict`` objects had attributes returned when a key was missing but the key corresponded to an attribute name. For example, ``pydash.get({}, 'update')`` would return ``{}.update()`` instead of ``None``. Previous behavior was that only item-access was allowed for ``list`` and ``dict`` which has been restored. - Fix regression in ``invoke``/``invoke_map`` where non-attributes could be invoked. For example, ``pydash.invoke({'items': lambda: 1}, 'items')`` would return ``1`` instead of ``dict_items([('a', 'items')])``. Previous behavior was that only attribute methods could be invoked which has now been restored. v4.0.2 (2017-04-04) ------------------- New Features ++++++++++++ - None Bug Fixes +++++++++ - Fix regression in ``intersection``, ``intersection_by``, and ``intersection_with`` introduced in ``v4.0.0`` where the a single argument supplied to intersection should return the same argument value instead of an empty list. Backwards-Incompatibilities +++++++++++++++++++++++++++ - None v4.0.1 (2017-04-04) ------------------- New Features ++++++++++++ - Make ``property_`` work with deep path strings. Bug Fixes +++++++++ - Revert removal of ``deep_pluck`` and rename to ``pluck``. Previously, ``deep_pluck`` was removed and ``map_`` was recommended as a replacement. However, ``deep_pluck`` (now defined as ``pluck``) functionality is not supported by ``map_`` so the removal ``pluck`` was reverted. Backwards-Incompatibilities +++++++++++++++++++++++++++ - Remove ``property_deep`` (use ``property_``). .. _changelog-v4.0.0: v4.0.0 (2017-04-03) ------------------- New Features ++++++++++++ - Add ``assign_with``. - Add ``clamp``. - Add ``clone_deep_with``. - Add ``clone_with``. - Add ``cond``. Thanks bharadwajyarlagadda_! - Add ``conforms``. - Add ``conforms_to``. - Add ``default_to``. Thanks bharadwajyarlagadda_! - Add ``difference_by``. - Add ``difference_with``. - Add ``divide``. Thanks bharadwajyarlagadda_! - Add ``eq``. Thanks bharadwajyarlagadda_! - Add ``flat_map``. - Add ``flat_map_deep``. - Add ``flat_map_depth``. - Add ``flatten_depth``. - Add ``flip``. Thanks bharadwajyarlagadda_! - Add ``from_pairs``. Thanks bharadwajyarlagadda_! - Add ``intersection_by``. - Add ``intersection_with``. - Add ``invert_by``. - Add ``invoke_map``. - Add ``is_equal_with``. Thanks bharadwajyarlagadda_! - Add ``is_match_with``. - Add ``is_set``. Thanks bharadwajyarlagadda_! - Add ``lower_case``. Thanks bharadwajyarlagadda_! - Add ``lower_first``. Thanks bharadwajyarlagadda_! - Add ``max_by``. - Add ``mean_by``. - Add ``merge_with``. - Add ``min_by``. - Add ``multiply``. Thanks bharadwajyarlagadda_! - Add ``nth``. Thanks bharadwajyarlagadda_! - Add ``nth_arg``. Thanks bharadwajyarlagadda_! - Add ``omit_by``. - Add ``over``. Thanks bharadwajyarlagadda_! - Add ``over_every``. Thanks bharadwajyarlagadda_! - Add ``over_some``. Thanks bharadwajyarlagadda_! - Add ``pick_by``. - Add ``pull_all``. Thanks bharadwajyarlagadda_! - Add ``pull_all_by``. - Add ``pull_all_with``. - Add ``range_right``. Thanks bharadwajyarlagadda_! - Add ``sample_size``. Thanks bharadwajyarlagadda_! - Add ``set_with``. - Add ``sorted_index_by``. - Add ``sorted_index_of``. Thanks bharadwajyarlagadda_! - Add ``sorted_last_index_by``. - Add ``sorted_last_index_of``. - Add ``sorted_uniq``. Thanks bharadwajyarlagadda_! - Add ``sorted_uniq_by``. - Add ``stub_list``. Thanks bharadwajyarlagadda_! - Add ``stub_dict``. Thanks bharadwajyarlagadda_! - Add ``stub_false``. Thanks bharadwajyarlagadda_! - Add ``stub_string``. Thanks bharadwajyarlagadda_! - Add ``stub_true``. Thanks bharadwajyarlagadda_! - Add ``subtract``. Thanks bharadwajyarlagadda_! - Add ``sum_by``. - Add ``to_integer``. - Add ``to_lower``. Thanks bharadwajyarlagadda_! - Add ``to_path``. Thanks bharadwajyarlagadda_! - Add ``to_upper``. Thanks bharadwajyarlagadda_! - Add ``unary``. - Add ``union_by``. Thanks bharadwajyarlagadda_! - Add ``union_with``. Thanks bharadwajyarlagadda_! - Add ``uniq_by``. - Add ``uniq_with``. - Add ``unset``. - Add ``update``. - Add ``update_with``. - Add ``upper_case``. Thanks bharadwajyarlagadda_! - Add ``upper_first``. Thanks bharadwajyarlagadda_! - Add ``xor_by``. - Add ``xor_with``. - Add ``zip_object_deep``. - Make function returned by ``constant`` ignore extra arguments when called. - Make ``get`` support attribute access within path. - Make ``iteratee`` treat an integer argument as a string path (i.e. ``iteratee(1)`` is equivalent to ``iteratee('1')`` for creating a path accessor function). - Make ``intersection`` work with unhashable types. - Make ``range_`` support decrementing when ``start`` argument is greater than ``stop`` argument. - Make ``xor`` maintain sort order of supplied arguments. Bug Fixes +++++++++ - Fix ``find_last_key`` so that it iterates over object in reverse. Backwards-Incompatibilities +++++++++++++++++++++++++++ - Make ``add`` only support two argument addition. (**breaking change**) - Make ``difference`` return duplicate values from first argument and maintain sort order. (**breaking change**) - Make ``invoke`` work on objects instead of collections. Use ``invoke_map`` for collections. (**breaking change**) - Make ``set_`` support mixed ``list``/``dict`` defaults within a single object based on whether key or index path substrings used. (**breaking change**) - Make ``set_`` modify object in place. (**breaking change**) - Only use ``merge`` callback result if result is not ``None``. Previously, result from callback (if provided) was used unconditionally. (**breaking change**) - Remove functions: (**breaking change**) - ``deep_pluck`` (no alternative) [**UPDATE:** ``deep_pluck`` functionality restored as ``pluck`` in ``v4.0.1``] - ``mapiter`` (no alternative) - ``pluck`` (use ``map_``) - ``update_path`` (use ``update`` or ``update_with``) - ``set_path`` (use ``set_`` or ``set_with``) - Remove aliases: (**breaking change**) - ``all_`` (use ``every``) - ``any_`` (use ``some``) - ``append`` (use ``push``) - ``average`` and ``avg`` (use ``mean`` or ``mean_by``) - ``callback`` (use ``iteratee``) - ``cat`` (use ``concat``) - ``collect`` (use ``map_``) - ``contains`` (use ``includes``) - ``curve`` (use ``round_``) - ``deep_get`` and ``get_path`` (use ``get``) - ``deep_has`` and ``has_path`` (use ``has``) - ``deep_prop`` (use ``property_deep``) - ``deep_set`` (use ``set_``) - ``detect`` and ``find_where`` (use ``find``) - ``each`` (use ``for_each``) - ``each_right`` (use ``for_each_right``) - ``escape_re`` (use ``escape_reg_exp``) - ``explode`` (use ``split``) - ``extend`` (use ``assign``) - ``first`` (use ``head``) - ``foldl`` (use ``reduce``) - ``foldr`` (use ``reduce_right``) - ``for_own`` (use ``for_each``) - ``for_own_right`` (use ``for_each_right``) - ``implode`` (use ``join``) - ``is_bool`` (use ``is_boolean``) - ``is_int`` (use ``is_integer``) - ``is_native`` (use ``is_builtin``) - ``is_num`` (use ``is_number``) - ``is_plain_object`` (use ``is_dict``) - ``is_re`` (use ``is_reg_exp``) - ``js_match`` (use ``reg_exp_js_match``) - ``js_replace`` (use ``reg_exp_js_replace``) - ``keys_in`` (use ``keys``) - ``moving_average`` and ``moving_avg`` (use ``moving_mean``) - ``object_`` (use ``zip_object``) - ``pad_left`` (use ``pad_start``) - ``pad_right`` (use ``pad_end``) - ``pipe`` (use ``flow``) - ``pipe_right`` and ``compose`` (use ``flow_right``) - ``prop`` (use ``property_``) - ``prop_of`` (use ``property_of``) - ``pow_`` (use ``power``) - ``re_replace`` (use ``reg_exp_replace``) - ``rest`` (use ``tail``) - ``select`` (use ``filter_``) - ``sigma`` (use ``std_deviation``) - ``sort_by_all`` and ``sort_by_order`` (use ``order_by``) - ``trim_left`` (use ``trim_start``) - ``trim_right`` (use ``trim_right``) - ``trunc`` (use ``truncate``) - ``underscore_case`` (use ``snake_case``) - ``unique`` (use ``uniq``) - ``values_in`` (use ``values``) - ``where`` (use ``filter_``) - Rename functions: (**breaking change**) - ``deep_map_values`` to ``map_values_deep`` - ``deep_property`` to ``property_deep`` - ``include`` to ``includes`` - ``index_by`` to ``key_by`` - ``mod_args`` to ``over_args`` - ``moving_average`` to ``moving_mean`` - ``pairs`` to ``to_pairs`` - Remove ``callback`` argument from: (**breaking change**) - ``assign``. Moved to ``assign_with``. - ``clone`` and ``clone_deep``. Moved to ``clone_with`` and ``clone_deep_with``. - ``is_match``. Moved to ``is_match_with``. - ``max_`` and ``min_``. Moved to ``max_by`` and ``min_by``. - ``omit``. Moved to ``omit_by``. - ``pick``. Moved to ``pick_by``. - ``sorted_index``. Moved to ``sorted_index_by``. - ``sum_``. Moved to ``sum_by``. - ``uniq``/``unique``. Moved to ``uniq_by``. - Renamed ``callback`` argument to ``predicate``: (**breaking change**) - ``drop_right_while`` - ``drop_while`` - ``every`` - ``filter_`` - ``find`` - ``find_key`` - ``find_last`` - ``find_index`` - ``find_last_index`` - ``find_last_key`` - ``partition`` - ``reject`` - ``remove`` - ``some`` - ``take_right_while`` - ``take_while`` - Renamed ``callback`` argument to ``iteratee``: (**breaking change**) - ``count_by`` - ``duplicates`` - ``for_each`` - ``for_each_right`` - ``for_in`` - ``for_in_right`` - ``group_by`` - ``key_by`` - ``map_`` - ``map_keys`` - ``map_values`` - ``map_values_deep`` - ``mapcat`` - ``median`` - ``reduce_`` - ``reduce_right`` - ``reductions`` - ``reductions_right`` - ``sort_by`` - ``times`` - ``transform`` - ``unzip_with`` - ``zip_with`` - ``zscore`` - Rename ``comparison`` argument in ``sort`` to ``comparator``. - Rename ``index`` and ``how_many`` arguments in ``splice`` to ``start`` and ``count``. - Remove ``multivalue`` argument from ``invert``. Feature moved to ``invert_by``. (**breaking change**) v3.4.8 (2017-01-05) ------------------- - Make internal function inspection methods work with Python 3 annotations. Thanks tgriesser_! v3.4.7 (2016-11-01) ------------------- - Fix bug in ``get`` where an iterable default was iterated over instead of being returned when an object path wasn't found. Thanks urbnjamesmi1_! v3.4.6 (2016-10-31) ------------------- - Fix bug in ``get`` where casting a string key to integer resulted in an uncaught exception instead of the default value being returned instead. Thanks urbnjamesmi1_! v3.4.5 (2016-10-16) ------------------- - Add optional ``default`` parameter to ``min_`` and ``max_`` functions that is used when provided iterable is empty. - Fix bug in ``is_match`` where comparison between an empty ``source`` argument returned ``None`` instead of ``True``. v3.4.4 (2016-09-06) ------------------- - Shallow copy each source in ``assign``/``extend`` instead of deep copying. - Call ``copy.deepcopy`` in ``merge`` instead of the more resource intensive ``clone_deep``. v3.4.3 (2016-04-07) ------------------- - Fix minor issue in deep path string parsing so that list indexing in paths can be specified as ``foo[0][1].bar`` instead of ``foo.[0].[1].bar``. Both formats are now supported. v3.4.2 (2016-03-24) ------------------- - Fix bug in ``start_case`` where capitalized characters after the first character of a word where mistakenly cast to lower case. v3.4.1 (2015-11-03) ------------------- - Fix Python 3.5, inspect, and pytest compatibility issue with ``py_`` chaining object when doctest run on ``pydash.__init__.py``. v3.4.0 (2015-09-22) ------------------- - Optimize callback system for performance. - Explicitly store arg count on callback for ``pydash`` generated callbacks where the arg count is known. This avoids the costly ``inspect.getargspec`` call. - Eliminate usage of costly ``guess_builtin_argcount`` which parsed docstrings, and instead only ever pass a single argument to a builtin callback function. - Optimize ``get``/``set`` so that regex parsing is only done when special characters are contained in the path key whereas before, all string paths were parsed. - Optimize ``is_builtin`` by checking for ``BuiltinFunctionType`` instance and then using ``dict`` look up table instead of a ``list`` look up. - Optimize ``is_match`` by replacing call to ``has`` with a ``try/except`` block. - Optimize ``push``/``append`` by using a native loop instead of callback mapping. v3.3.0 (2015-07-23) ------------------- - Add ``ceil``. - Add ``defaults_deep``. - Add ``floor``. - Add ``get``. - Add ``gt``. - Add ``gte``. - Add ``is_iterable``. - Add ``lt``. - Add ``lte``. - Add ``map_keys``. - Add ``method``. - Add ``method_of``. - Add ``mod_args``. - Add ``set_``. - Add ``unzip_with``. - Add ``zip_with``. - Make ``add`` support adding two numbers if passed in positionally. - Make ``get`` main definition and ``get_path`` its alias. - Make ``set_`` main definition and ``deep_set`` its alias. v3.2.2 (2015-04-29) ------------------- - Catch ``AttributeError`` in ``helpers.get_item`` and return default value if set. v3.2.1 (2015-04-29) ------------------- - Fix bug in ``reduce_right`` where collection was not reversed correctly. v3.2.0 (2015-03-03) ------------------- - Add ``sort_by_order`` as alias of ``sort_by_all``. - Fix ``is_match`` to not compare ``obj`` and ``source`` types using ``type`` and instead use ``isinstance`` comparisons exclusively. - Make ``sort_by_all`` accept an ``orders`` argument for specifying the sort order of each key via boolean ``True`` (for ascending) and ``False`` (for descending). - Make ``words`` accept a ``pattern`` argument to override the default regex used for splitting words. - Make ``words`` handle single character words better. v3.1.0 (2015-02-28) ------------------- - Add ``fill``. - Add ``in_range``. - Add ``matches_property``. - Add ``spread``. - Add ``start_case``. - Make callbacks support ``matches_property`` style as ``[key, value]`` or ``(key, value)``. - Make callbacks support shallow ``property`` style callbacks as ``[key]`` or ``(key,)``. .. _changelog-v3.0.0: v3.0.0 (2015-02-25) ------------------- - Add ``ary``. - Add ``chars``. - Add ``chop``. - Add ``chop_right``. - Add ``clean``. - Add ``commit`` method to ``chain`` that returns a new chain with the computed ``chain.value()`` as the initial value of the chain. - Add ``count_substr``. - Add ``decapitalize``. - Add ``duplicates``. - Add ``has_substr``. - Add ``human_case``. - Add ``insert_substr``. - Add ``is_blank``. - Add ``is_bool`` as alias of ``is_boolean``. - Add ``is_builtin``, ``is_native``. - Add ``is_dict`` as alias of ``is_plain_object``. - Add ``is_int`` as alias of ``is_integer``. - Add ``is_match``. - Add ``is_num`` as alias of ``is_number``. - Add ``is_tuple``. - Add ``join`` as alias of ``implode``. - Add ``lines``. - Add ``number_format``. - Add ``pascal_case``. - Add ``plant`` method to ``chain`` that returns a cloned chain with a new initial value. - Add ``predecessor``. - Add ``property_of``, ``prop_of``. - Add ``prune``. - Add ``re_replace``. - Add ``rearg``. - Add ``replace``. - Add ``run`` as alias of ``chain.value``. - Add ``separator_case``. - Add ``series_phrase``. - Add ``series_phrase_serial``. - Add ``slugify``. - Add ``sort_by_all``. - Add ``strip_tags``. - Add ``substr_left``. - Add ``substr_left_end``. - Add ``substr_right``. - Add ``substr_right_end``. - Add ``successor``. - Add ``swap_case``. - Add ``title_case``. - Add ``truncate`` as alias of ``trunc``. - Add ``to_boolean``. - Add ``to_dict``, ``to_plain_object``. - Add ``to_number``. - Add ``underscore_case`` as alias of ``snake_case``. - Add ``unquote``. - Fix ``deep_has`` to return ``False`` when ``ValueError`` raised during path checking. - Fix ``pad`` so that it doesn't over pad beyond provided length. - Fix ``trunc``/``truncate`` so that they handle texts shorter than the max string length correctly. - Make the following functions work with empty strings and ``None``: (**breaking change**) Thanks k7sleeper_! - ``camel_case`` - ``capitalize`` - ``chars`` - ``chop`` - ``chop_right`` - ``class_case`` - ``clean`` - ``count_substr`` - ``decapitalize`` - ``ends_with`` - ``join`` - ``js_replace`` - ``kebab_case`` - ``lines`` - ``quote`` - ``re_replace`` - ``replace`` - ``series_phrase`` - ``series_phrase_serial`` - ``starts_with`` - ``surround`` - Make callback invocation have better support for builtin functions and methods. Previously, if one wanted to pass a builtin function or method as a callback, it had to be wrapped in a lambda which limited the number of arguments that would be passed it. For example, ``_.each([1, 2, 3], array.append)`` would fail and would need to be converted to ``_.each([1, 2, 3], lambda item: array.append(item)``. That is no longer the case as the non-wrapped method is now supported. - Make ``capitalize`` accept ``strict`` argument to control whether to convert the rest of the string to lower case or not. Defaults to ``True``. - Make ``chain`` support late passing of initial ``value`` argument. - Make ``chain`` not store computed ``value()``. (**breaking change**) - Make ``drop``, ``drop_right``, ``take``, and ``take_right`` have default ``n=1``. - Make ``is_indexed`` return ``True`` for tuples. - Make ``partial`` and ``partial_right`` accept keyword arguments. - Make ``pluck`` style callbacks support deep paths. (**breaking change**) - Make ``re_replace`` accept non-string arguments. - Make ``sort_by`` accept ``reverse`` parameter. - Make ``splice`` work with strings. - Make ``to_string`` convert ``None`` to empty string. (**breaking change**) - Move ``arrays.join`` to ``strings.join``. (**breaking change**) - Rename ``join``/``implode``'s second parameter from ``delimiter`` to ``separator``. (**breaking change**) - Rename ``split``/``explode``'s second parameter from ``delimiter`` to ``separator``. (**breaking change**) - Reorder function arguments for ``after`` from ``(n, func)`` to ``(func, n)``. (**breaking change**) - Reorder function arguments for ``before`` from ``(n, func)`` to ``(func, n)``. (**breaking change**) - Reorder function arguments for ``times`` from ``(n, callback)`` to ``(callback, n)``. (**breaking change**) - Reorder function arguments for ``js_match`` from ``(reg_exp, text)`` to ``(text, reg_exp)``. (**breaking change**) - Reorder function arguments for ``js_replace`` from ``(reg_exp, text, repl)`` to ``(text, reg_exp, repl)``. (**breaking change**) - Support iteration over class instance properties for non-list, non-dict, and non-iterable objects. v2.4.2 (2015-02-03) ------------------- - Fix ``remove`` so that array is modified after callback iteration. v2.4.1 (2015-01-11) ------------------- - Fix ``kebab_case`` so that it casts string to lower case. v2.4.0 (2015-01-07) ------------------- - Add ``ensure_ends_with``. Thanks k7sleeper_! - Add ``ensure_starts_with``. Thanks k7sleeper_! - Add ``quote``. Thanks k7sleeper_! - Add ``surround``. Thanks k7sleeper_! v2.3.2 (2014-12-10) ------------------- - Fix ``merge`` and ``assign``/``extend`` so they apply ``clone_deep`` to source values before assigning to destination object. - Make ``merge`` accept a callback as a positional argument if it is last. v2.3.1 (2014-12-07) ------------------- - Add ``pipe`` and ``pipe_right`` as aliases of ``flow`` and ``flow_right``. - Fix ``merge`` so that trailing ``{}`` or ``[]`` don't overwrite previous source values. - Make ``py_`` an alias for ``_``. v2.3.0 (2014-11-10) ------------------- - Support ``type`` callbacks (e.g. ``int``, ``float``, ``str``, etc.) by only passing a single callback argument when invoking the callback. - Drop official support for Python 3.2. Too many testing dependencies no longer work on it. v2.2.0 (2014-10-28) ------------------- - Add ``append``. - Add ``deep_get``. - Add ``deep_has``. - Add ``deep_map_values``. - Add ``deep_set``. - Add ``deep_pluck``. - Add ``deep_property``. - Add ``join``. - Add ``pop``. - Add ``push``. - Add ``reverse``. - Add ``shift``. - Add ``sort``. - Add ``splice``. - Add ``unshift``. - Add ``url``. - Fix bug in ``snake_case`` that resulted in returned string not being converted to lower case. - Fix bug in chaining method access test which skipped the actual test. - Make ``_`` instance alias method access to methods with a trailing underscore in their name. For example, ``_.map()`` becomes an alias for ``map_()``. - Make ``deep_prop`` an alias of ``deep_property``. - Make ``has`` work with deep paths. - Make ``has_path`` an alias of ``deep_has``. - Make ``get_path`` handle escaping the ``.`` delimiter for string keys. - Make ``get_path`` handle list indexing using strings such as ``'0.1.2'`` to access ``'value'`` in ``[[0, [0, 0, 'value']]]``. - Make ``concat`` an alias of ``cat``. v2.1.0 (2014-09-17) ------------------- - Add ``add``, ``sum_``. - Add ``average``, ``avg``, ``mean``. - Add ``mapiter``. - Add ``median``. - Add ``moving_average``, ``moving_avg``. - Add ``power``, ``pow_``. - Add ``round_``, ``curve``. - Add ``scale``. - Add ``slope``. - Add ``std_deviation``, ``sigma``. - Add ``transpose``. - Add ``variance``. - Add ``zscore``. .. _changelog-v2.0.0: v2.0.0 (2014-09-11) ------------------- - Add ``_`` instance that supports both method chaining and module method calling. - Add ``cat``. - Add ``conjoin``. - Add ``deburr``. - Add ``disjoin``. - Add ``explode``. - Add ``flatten_deep``. - Add ``flow``. - Add ``flow_right``. - Add ``get_path``. - Add ``has_path``. - Add ``implode``. - Add ``intercalate``. - Add ``interleave``. - Add ``intersperse``. - Add ``is_associative``. - Add ``is_even``. - Add ``is_float``. - Add ``is_decreasing``. - Add ``is_increasing``. - Add ``is_indexed``. - Add ``is_instance_of``. - Add ``is_integer``. - Add ``is_json``. - Add ``is_monotone``. - Add ``is_negative``. - Add ``is_odd``. - Add ``is_positive``. - Add ``is_strictly_decreasing``. - Add ``is_strictly_increasing``. - Add ``is_zero``. - Add ``iterated``. - Add ``js_match``. - Add ``js_replace``. - Add ``juxtapose``. - Add ``mapcat``. - Add ``reductions``. - Add ``reductions_right``. - Add ``rename_keys``. - Add ``set_path``. - Add ``split_at``. - Add ``thru``. - Add ``to_string``. - Add ``update_path``. - Add ``words``. - Make callback function calling adapt to argspec of given callback function. If, for example, the full callback signature is ``(item, index, obj)`` but the passed in callback only supports ``(item)``, then only ``item`` will be passed in when callback is invoked. Previously, callbacks had to support all arguments or implement star-args. - Make ``chain`` lazy and only compute the final value when ``value`` called. - Make ``compose`` an alias of ``flow_right``. - Make ``flatten`` shallow by default, remove callback option, and add ``is_deep`` option. (**breaking change**) - Make ``is_number`` return ``False`` for boolean ``True`` and ``False``. (**breaking change**) - Make ``invert`` accept ``multivalue`` argument. - Make ``result`` accept ``default`` argument. - Make ``slice_`` accept optional ``start`` and ``end`` arguments. - Move files in ``pydash/api/`` to ``pydash/``. (**breaking change**) - Move predicate functions from ``pydash.api.objects`` to ``pydash.api.predicates``. (**breaking change**) - Rename ``create_callback`` to ``iteratee``. (**breaking change**) - Rename ``functions`` to ``callables`` in order to allow ``functions.py`` to exist at the root of the pydash module folder. (**breaking change**) - Rename *private* utility function ``_iter_callback`` to ``itercallback``. (**breaking change**) - Rename *private* utility function ``_iter_list_callback`` to ``iterlist_callback``. (**breaking change**) - Rename *private* utility function ``_iter_dict_callback`` to ``iterdict_callback``. (**breaking change**) - Rename *private* utility function ``_iterate`` to ``iterator``. (**breaking change**) - Rename *private* utility function ``_iter_dict`` to ``iterdict``. (**breaking change**) - Rename *private* utility function ``_iter_list`` to ``iterlist``. (**breaking change**) - Rename *private* utility function ``_iter_unique`` to ``iterunique``. (**breaking change**) - Rename *private* utility function ``_get_item`` to ``getitem``. (**breaking change**) - Rename *private* utility function ``_set_item`` to ``setitem``. (**breaking change**) - Rename *private* utility function ``_deprecated`` to ``deprecated``. (**breaking change**) - Undeprecate ``tail`` and make alias of ``rest``. v1.1.0 (2014-08-19) ------------------- - Add ``attempt``. - Add ``before``. - Add ``camel_case``. - Add ``capitalize``. - Add ``chunk``. - Add ``curry_right``. - Add ``drop_right``. - Add ``drop_right_while``. - Add ``drop_while``. - Add ``ends_with``. - Add ``escape_reg_exp`` and ``escape_re``. - Add ``is_error``. - Add ``is_reg_exp`` and ``is_re``. - Add ``kebab_case``. - Add ``keys_in`` as alias of ``keys``. - Add ``negate``. - Add ``pad``. - Add ``pad_left``. - Add ``pad_right``. - Add ``partition``. - Add ``pull_at``. - Add ``repeat``. - Add ``slice_``. - Add ``snake_case``. - Add ``sorted_last_index``. - Add ``starts_with``. - Add ``take_right``. - Add ``take_right_while``. - Add ``take_while``. - Add ``trim``. - Add ``trim_left``. - Add ``trim_right``. - Add ``trunc``. - Add ``values_in`` as alias of ``values``. - Create ``pydash.api.strings`` module. - Deprecate ``tail``. - Modify ``drop`` to accept ``n`` argument and remove as alias of ``rest``. - Modify ``take`` to accept ``n`` argument and remove as alias of ``first``. - Move ``escape`` and ``unescape`` from ``pydash.api.utilities`` to ``pydash.api.strings``. (**breaking change**) - Move ``range_`` from ``pydash.api.arrays`` to ``pydash.api.utilities``. (**breaking change**) .. _changelog-v1.0.0: v1.0.0 (2014-08-05) ------------------- - Add Python 2.6 and Python 3 support. - Add ``after``. - Add ``assign`` and ``extend``. Thanks nathancahill_! - Add ``callback`` and ``create_callback``. - Add ``chain``. - Add ``clone``. - Add ``clone_deep``. - Add ``compose``. - Add ``constant``. - Add ``count_by``. Thanks nathancahill_! - Add ``curry``. - Add ``debounce``. - Add ``defaults``. Thanks nathancahill_! - Add ``delay``. - Add ``escape``. - Add ``find_key``. Thanks nathancahill_! - Add ``find_last``. Thanks nathancahill_! - Add ``find_last_index``. Thanks nathancahill_! - Add ``find_last_key``. Thanks nathancahill_! - Add ``for_each``. Thanks nathancahill_! - Add ``for_each_right``. Thanks nathancahill_! - Add ``for_in``. Thanks nathancahill_! - Add ``for_in_right``. Thanks nathancahill_! - Add ``for_own``. Thanks nathancahill_! - Add ``for_own_right``. Thanks nathancahill_! - Add ``functions_`` and ``methods``. Thanks nathancahill_! - Add ``group_by``. Thanks nathancahill_! - Add ``has``. Thanks nathancahill_! - Add ``index_by``. Thanks nathancahill_! - Add ``identity``. - Add ``inject``. - Add ``invert``. - Add ``invoke``. Thanks nathancahill_! - Add ``is_list``. Thanks nathancahill_! - Add ``is_boolean``. Thanks nathancahill_! - Add ``is_empty``. Thanks nathancahill_! - Add ``is_equal``. - Add ``is_function``. Thanks nathancahill_! - Add ``is_none``. Thanks nathancahill_! - Add ``is_number``. Thanks nathancahill_! - Add ``is_object``. - Add ``is_plain_object``. - Add ``is_string``. Thanks nathancahill_! - Add ``keys``. - Add ``map_values``. - Add ``matches``. - Add ``max_``. Thanks nathancahill_! - Add ``memoize``. - Add ``merge``. - Add ``min_``. Thanks nathancahill_! - Add ``noop``. - Add ``now``. - Add ``omit``. - Add ``once``. - Add ``pairs``. - Add ``parse_int``. - Add ``partial``. - Add ``partial_right``. - Add ``pick``. - Add ``property_`` and ``prop``. - Add ``pull``. Thanks nathancahill_! - Add ``random``. - Add ``reduce_`` and ``foldl``. - Add ``reduce_right`` and ``foldr``. - Add ``reject``. Thanks nathancahill_! - Add ``remove``. - Add ``result``. - Add ``sample``. - Add ``shuffle``. - Add ``size``. - Add ``sort_by``. Thanks nathancahill_! - Add ``tap``. - Add ``throttle``. - Add ``times``. - Add ``transform``. - Add ``to_list``. Thanks nathancahill_! - Add ``unescape``. - Add ``unique_id``. - Add ``values``. - Add ``wrap``. - Add ``xor``. .. _changelog-v0.0.0: v0.0.0 (2014-07-22) ------------------- - Add ``all_``. - Add ``any_``. - Add ``at``. - Add ``bisect_left``. - Add ``collect``. - Add ``collections``. - Add ``compact``. - Add ``contains``. - Add ``detect``. - Add ``difference``. - Add ``drop``. - Add ``each``. - Add ``each_right``. - Add ``every``. - Add ``filter_``. - Add ``find``. - Add ``find_index``. - Add ``find_where``. - Add ``first``. - Add ``flatten``. - Add ``head``. - Add ``include``. - Add ``index_of``. - Add ``initial``. - Add ``intersection``. - Add ``last``. - Add ``last_index_of``. - Add ``map_``. - Add ``object_``. - Add ``pluck``. - Add ``range_``. - Add ``rest``. - Add ``select``. - Add ``some``. - Add ``sorted_index``. - Add ``tail``. - Add ``take``. - Add ``union``. - Add ``uniq``. - Add ``unique``. - Add ``unzip``. - Add ``where``. - Add ``without``. - Add ``zip_``. - Add ``zip_object``. .. _nathancahill: https://github.com/nathancahill .. _k7sleeper: https://github.com/k7sleeper .. _bharadwajyarlagadda: https://github.com/bharadwajyarlagadda .. _urbnjamesmi1: https://github.com/urbnjamesmi1 .. _tgriesser: https://github.com/tgriesser .. _shaunpatterson: https://github.com/shaunpatterson .. _beck3905: https://github.com/beck3905 .. _efenka: https://github.com/efenka .. _jwilson8767: https://github.com/jwilson8767 .. _elijose55: https://github.com/elijose55 .. _gonzalonaveira: https://github.com/gonzalonaveira .. _zhaowb: https://github.com/zhaowb .. _mervynlee94: https://github.com/mervynlee94 .. _weineel: https://github.com/weineel .. _bl4ckst0ne: https://github.com/bl4ckst0ne .. _DeviousStoat: https://github.com/DeviousStoat pydash-8.0.3/CONTRIBUTING.rst000066400000000000000000000050251464745015500154700ustar00rootroot00000000000000Contributing ============ Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. You can contribute in many ways: Types of Contributions ---------------------- Report Bugs +++++++++++ Report bugs at https://github.com/dgilland/pydash. If you are reporting a bug, please include: - Your operating system name and version. - Any details about your local setup that might be helpful in troubleshooting. - Detailed steps to reproduce the bug. Fix Bugs ++++++++ Look through the GitHub issues for bugs. Anything tagged with "bug" is open to whoever wants to implement it. Implement Features ++++++++++++++++++ Look through the GitHub issues for features. Anything tagged with "enhancement" or "help wanted" is open to whoever wants to implement it. Write Documentation +++++++++++++++++++ pydash could always use more documentation, whether as part of the official pydash docs, in docstrings, or even on the web in blog posts, articles, and such. Submit Feedback +++++++++++++++ The best way to send feedback is to file an issue at https://github.com/dgilland/pydash. If you are proposing a feature: - Explain in detail how it would work. - Keep the scope as narrow as possible, to make it easier to implement. - Remember that this is a volunteer-driven project, and that contributions are welcome :) Get Started! ------------ Ready to contribute? Here's how to set up ``pydash`` for local development. 1. Fork the ``pydash`` repo on GitHub. 2. Clone your fork locally:: $ git clone git@github.com:your_username_here/pydash.git 3. Install Python dependencies into a virtualenv:: $ cd pydash $ pip install -r requirements.txt 4. Create a branch for local development:: $ git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. 5. Autoformat code:: $ inv fmt 6. When you're done making changes, check that your changes pass all unit tests by testing with ``tox`` across all supported Python versions:: $ tox 7. Add yourself to ``AUTHORS.rst``. 8. Commit your changes and push your branch to GitHub:: $ git add . $ git commit -m "" $ git push origin name-of-your-bugfix-or-feature-branch 9. Submit a pull request through GitHub. Pull Request Guidelines ----------------------- Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. The pull request should work for all versions Python that this project supports. pydash-8.0.3/DEVGUIDE.rst000066400000000000000000000073721464745015500147640ustar00rootroot00000000000000Developer Guide =============== This guide provides an overview of the tooling this project uses and how to execute developer workflows using the developer CLI. Python Environments ------------------- This Python project is tested against different Python versions. For local development, it is a good idea to have those versions installed so that tests can be run against each. There are libraries that can help with this. Which tools to use is largely a matter of preference, but below are a few recommendations. For managing multiple Python versions: - pyenv_ - OS package manager (e.g. apt, yum, homebrew, etc) - Build from source For managing Python virtualenvs: - pyenv-virtualenv_ - pew_ - python-venv_ Tooling ------- The following tools are used by this project: ============= ========================== ================== Tool Description Configuration ============= ========================== ================== black_ Code formatter ``pyproject.toml`` isort_ Import statement formatter ``setup.cfg`` docformatter_ Docstring formatter ``setup.cfg`` flake8_ Code linter ``setup.cfg`` pylint_ Code linter ``pylintrc`` mypy_ Type checker ``setup.cfg`` pytest_ Test framework ``setup.cfg`` tox_ Test environment manager ``tox.ini`` invoke_ CLI task execution library ``tasks.py`` ============= ========================== ================== Workflows --------- The following workflows use developer CLI commands via `invoke`_ and are defined in ``tasks.py``. Autoformat Code +++++++++++++++ To run all autoformatters: :: inv fmt This is the same as running each autoformatter individually: :: inv black inv isort inv docformatter Lint ++++ To run all linters: :: inv lint This is the same as running each linter individually: :: inv flake8 inv pylint inv mypy Test ++++ To run all unit tests: :: inv unit To run unit tests and builds: :: inv test Test on All Supported Python Versions +++++++++++++++++++++++++++++++++++++ To run tests on all supported Python versions: :: tox This requires that the supported versions are available on the PATH. Build Package +++++++++++++ To build the package: :: inv build This will output the source and binary distributions under ``dist/``. Build Docs ++++++++++ To build documentation: :: inv docs This will output the documentation under ``docs/_build/``. Serve Docs ++++++++++ To serve docs over HTTP: :: inv docs -s|--server [-b|--bind 127.0.0.1] [-p|--port 8000] inv docs -s inv docs -s -p 8080 inv docs -s -b 0.0.0.0 -p 8080 Delete Build Files ++++++++++++++++++ To remove all build and temporary files: :: inv clean This will remove Python bytecode files, egg files, build output folders, caches, and tox folders. Release Package +++++++++++++++ To release a new version of the package to https://pypi.org: :: inv release CI/CD ----- This project uses `Github Actions `_ for CI/CD: - https://github.com/dgilland/pydash/actions .. _pyenv: https://github.com/pyenv/pyenv .. _pyenv-virtualenv: https://github.com/pyenv/pyenv-virtualenv .. _pew: https://github.com/berdario/pew .. _python-venv: https://docs.python.org/3/library/venv.html .. _black: https://black.readthedocs.io .. _isort: https://pycqa.github.io/isort/ .. _docformatter: https://github.com/myint/docformatter .. _flake8: https://flake8.pycqa.org .. _pylint: https://www.pylint.org/ .. _mypy: http://mypy-lang.org/ .. _pytest: https://docs.pytest.org .. _tox: https://tox.readthedocs.io .. _invoke: http://docs.pyinvoke.org pydash-8.0.3/LICENSE.rst000066400000000000000000000020601464745015500146370ustar00rootroot00000000000000MIT License Copyright (c) 2020 Derrick Gilland Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. pydash-8.0.3/MANIFEST.in000066400000000000000000000004331464745015500145630ustar00rootroot00000000000000graft src graft tests graft docs include AUTHORS.rst include CONTRIBUTING.rst include CHANGELOG.rst include LICENSE.rst include README.rst include requirements.txt include tox.ini include pylintrc include tasks.py global-include py.typed global-exclude *.py[cod] __pycache__ *.so pydash-8.0.3/README.rst000066400000000000000000000023561464745015500145220ustar00rootroot00000000000000pydash ****** |version| |build| |coveralls| |license| The kitchen sink of Python utility libraries for doing "stuff" in a functional way. Based on the `Lo-Dash `_ Javascript library. Note ==== Looking for a library that is more memory efficient and better suited for large datasets? Check out `fnc `_! It's built around generators and iteration and has iteratee-first function signatures. Links ===== - Project: https://github.com/dgilland/pydash - Documentation: http://pydash.readthedocs.org - PyPi: https://pypi.python.org/pypi/pydash/ - Github Actions: https://github.com/dgilland/pydash/actions .. |version| image:: http://img.shields.io/pypi/v/pydash.svg?style=flat-square :target: https://pypi.python.org/pypi/pydash/ .. |build| image:: https://img.shields.io/github/actions/workflow/status/dgilland/pydash/main.yml?branch=master&style=flat-square :target: https://github.com/dgilland/pydash/actions .. |coveralls| image:: http://img.shields.io/coveralls/dgilland/pydash/master.svg?style=flat-square :target: https://coveralls.io/r/dgilland/pydash .. |license| image:: http://img.shields.io/pypi/l/pydash.svg?style=flat-square :target: https://pypi.python.org/pypi/pydash/ pydash-8.0.3/docs/000077500000000000000000000000001464745015500137555ustar00rootroot00000000000000pydash-8.0.3/docs/Makefile000066400000000000000000000075411464745015500154240ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build PAPER ?= SOURCEDIR = . BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_elements.papersize=a4 PAPEROPT_letter = -D latex_elements.papersize=letter # $(O) is meant as a shortcut for $(SPHINXOPTS) ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(O) $(SOURCEDIR) # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(O) $(SOURCEDIR) .PHONY: help help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and an HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " lualatexpdf to make LaTeX files and run them through lualatex" @echo " xelatexpdf to make LaTeX files and run them through xelatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" @echo " dummy to check syntax errors of document sources" .PHONY: clean clean: rm -rf $(BUILDDIR)/* .PHONY: latexpdf latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: latexpdfja latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: lualatexpdf lualatexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through lualatex..." $(MAKE) PDFLATEX=lualatex -C $(BUILDDIR)/latex all-pdf @echo "lualatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: xelatexpdf xelatexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through xelatex..." $(MAKE) PDFLATEX=xelatex -C $(BUILDDIR)/latex all-pdf @echo "xelatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: info info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." .PHONY: gettext gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale # Catch-all target: route all unknown targets to Sphinx .PHONY: Makefile %: Makefile $(SPHINXBUILD) -b "$@" $(ALLSPHINXOPTS) "$(BUILDDIR)/$@" pydash-8.0.3/docs/_templates/000077500000000000000000000000001464745015500161125ustar00rootroot00000000000000pydash-8.0.3/docs/_templates/.gitignore000066400000000000000000000000001464745015500200700ustar00rootroot00000000000000pydash-8.0.3/docs/api.rst000066400000000000000000000055051464745015500152650ustar00rootroot00000000000000.. _api: ************* API Reference ************* .. testsetup:: import math import operator import re from pydash.functions import Curry, CurryRight from pydash import * All public functions are available from the main module. .. code-block:: python import pydash pydash. This is the recommended way to use pydash. .. code-block:: python # OK (importing main module) import pydash pydash.where({}) # OK (import from main module) from pydash import where where({}) # NOT RECOMMENDED (importing from submodule) from pydash.collections import where Only the main pydash module API is guaranteed to adhere to semver. It's possible that backwards incompatibility outside the main module API could be broken between minor releases. .. _api-dash-instance: py\_ Instance ============= There is a special ``py_`` instance available from ``pydash`` that supports method calling and method chaining from a single object: .. code-block:: python from pydash import py_ # Method calling py_.initial([1, 2, 3, 4, 5]) == [1, 2, 3, 4] # Method chaining py_([1, 2, 3, 4, 5]).initial().value() == [1, 2, 3, 4] # Method aliasing to underscore suffixed methods that shadow builtin names py_.map is py_.map_ py_([1, 2, 3]).map(_.to_string).value() == py_([1, 2, 3]).map_(_.to_string).value() The ``py_`` instance is basically a combination of using ``pydash.`` and ``pydash.chain``. A full listing of aliased ``py_`` methods: - ``_.object`` is :func:`pydash.arrays.object_` - ``_.slice`` is :func:`pydash.arrays.slice_` - ``_.zip`` is :func:`pydash.arrays.zip_` - ``_.all`` is :func:`pydash.collections.all_` - ``_.any`` is :func:`pydash.collections.any_` - ``_.filter`` is :func:`pydash.collections.filter_` - ``_.map`` is :func:`pydash.collections.map_` - ``_.max`` is :func:`pydash.collections.max_` - ``_.min`` is :func:`pydash.collections.min_` - ``_.reduce`` is :func:`pydash.collections.reduce_` - ``_.pow`` is :func:`pydash.numerical.pow_` - ``_.round`` is :func:`pydash.numerical.round_` - ``_.sum`` is :func:`pydash.numerical.sum_` - ``_.property`` is :func:`pydash.utilities.property_` - ``_.range`` is :func:`pydash.utilities.range_` Arrays ====== .. automodule:: pydash.arrays :members: Chaining ======== .. automodule:: pydash.chaining :members: Collections =========== .. automodule:: pydash.collections :members: Functions ========= .. automodule:: pydash.functions :members: Numerical ========= .. automodule:: pydash.numerical :members: Objects ======= .. automodule:: pydash.objects :members: Predicates ========== .. automodule:: pydash.predicates :members: Strings ======= .. automodule:: pydash.strings :members: Utilities ========= .. automodule:: pydash.utilities :members: pydash-8.0.3/docs/authors.rst000066400000000000000000000000341464745015500161710ustar00rootroot00000000000000.. include:: ../AUTHORS.rst pydash-8.0.3/docs/callbacks.rst000066400000000000000000000077411464745015500164370ustar00rootroot00000000000000Callbacks ========= .. testsetup:: import pydash For functions that support callbacks, there are several callback styles that can be used. Callable Style -------------- The most straight-forward callback is a regular callable object. For pydash functions that pass multiple arguments to their callback, the callable's argument signature does not need to support all arguments. Pydash's callback system will try to infer the number of supported arguments of the callable and only pass those arguments to the callback. However, there may be some edge cases where this will fail in which case one will need to wrap the callable in a ``lambda`` or ``def ...`` style function. The arguments passed to most callbacks are: .. code-block:: python callback(item, index, obj) where ``item`` is an element of ``obj``, ``index`` is the ``dict`` or ``list`` index, and ``obj`` is the original object being passed in. But not all callbacks support these arguments. Some functions support fewer callback arguments. See :ref:`API Reference ` for more details. .. doctest:: >>> users = [ ... {'name': 'Michelangelo', 'active': False}, ... {'name': 'Donatello', 'active': False}, ... {'name': 'Leonardo', 'active': True} ... ] # Single argument callback. >>> callback = lambda item: item['name'] == 'Donatello' >>> pydash.find_index(users, callback) 1 # Two argument callback. >>> callback = lambda item, index: index == 3 >>> pydash.find_index(users, callback) -1 # Three argument callback. >>> callback = lambda item, index, obj: obj[index]['active'] >>> pydash.find_index(users, callback) 2 Shallow Property Style ---------------------- The shallow property style callback is specified as a one item ``list`` containing the property value to return from an element. Internally, :func:`pydash.utilities.prop` is used to create the callback. .. doctest:: >>> users = [ ... {'name': 'Michelangelo', 'active': False}, ... {'name': 'Donatello', 'active': False}, ... {'name': 'Leonardo', 'active': True} ... ] >>> pydash.find_index(users, ['active']) 2 Deep Property Style ------------------- The deep property style callback is specified as a deep property ``string`` of the nested object value to return from an element. Internally, :func:`pydash.utilities.deep_prop` is used to create the callback. See :ref:`Deep Path Strings ` for more details. .. doctest:: >>> users = [ ... {'name': 'Michelangelo', 'location': {'city': 'Rome'}}, ... {'name': 'Donatello', 'location': {'city': 'Florence'}}, ... {'name': 'Leonardo', 'location': {'city': 'Amboise'}} ... ] >>> pydash.map_(users, 'location.city') ['Rome', 'Florence', 'Amboise'] Matches Property Style ---------------------- The matches property style callback is specified as a two item ``list`` containing a property key and value and returns ``True`` when an element's key is equal to value, else ``False``. Internally, :func:`pydash.utilities.matches_property` is used to create the callback. .. doctest:: >>> users = [ ... {'name': 'Michelangelo', 'active': False}, ... {'name': 'Donatello', 'active': False}, ... {'name': 'Leonardo', 'active': True} ... ] >>> pydash.find_index(users, ['active', False]) 0 >>> pydash.find_last_index(users, ['active', False]) 1 Matches Style ------------- The matches style callback is specified as a ``dict`` object and returns ``True`` when an element matches the properties of the object, else ``False``. Internally, :func:`pydash.utilities.matches` is used to create the callback. .. doctest:: >>> users = [ ... {'name': 'Michelangelo', 'location': {'city': 'Rome'}}, ... {'name': 'Donatello', 'location': {'city': 'Florence'}}, ... {'name': 'Leonardo', 'location': {'city': 'Amboise'}} ... ] >>> pydash.map_(users, {'location': {'city': 'Florence'}}) [False, True, False] pydash-8.0.3/docs/chaining.rst000066400000000000000000000055571464745015500163030ustar00rootroot00000000000000.. _method-chaining: Method Chaining *************** Method chaining in pydash is quite simple. An initial value is provided: .. code-block:: python from pydash import py_ py_([1, 2, 3, 4]) # Or through the chain() function import pydash pydash.chain([1, 2, 3, 4]) Methods are chained: .. code-block:: python py_([1, 2, 3, 4]).without(2, 3).reject(lambda x: x > 1) A final value is computed: .. code-block:: python result = py_([1, 2, 3, 4]).without(2, 3).reject(lambda x: x > 1).value() Lazy Evaluation =============== Method chaining is deferred (lazy) until ``.value()`` is called: .. doctest:: >>> from pydash import py_ >>> def echo(value): print(value) >>> lazy = py_([1, 2, 3, 4]).for_each(echo) # None of the methods have been called yet. >>> result = lazy.value() 1 2 3 4 # Each of the chained methods have now been called. >>> assert result == [1, 2, 3, 4] >>> result = lazy.value() 1 2 3 4 Committing a Chain ================== If one wishes to create a new chain object seeded with the computed value of another chain, then one can use the ``commit`` method: .. doctest:: >>> committed = lazy.commit() 1 2 3 4 >>> committed.value() [1, 2, 3, 4] >>> lazy.value() 1 2 3 4 [1, 2, 3, 4] Committing is equivalent to: .. code-block:: python committed = py_(lazy.value()) Late Value Passing ================== In :ref:`v3.0.0 ` the concept of late value passing was introduced to method chaining. This allows method chains to be re-used with different root values supplied. Essentially, ad-hoc functions can be created via the chaining syntax. .. doctest:: >>> square_sum = py_().power(2).sum() >>> assert square_sum([1, 2, 3]) == 14 >>> assert square_sum([4, 5, 6]) == 77 >>> square_sum_square = square_sum.power(2) >>> assert square_sum_square([1, 2, 3]) == 196 >>> assert square_sum_square([4, 5, 6]) == 5929 Planting a Value ================ To replace the initial value of a chain, use the ``plant`` method which will return a cloned chained using the new initial value: .. doctest:: >>> chained = py_([1, 2, 3, 4]).power(2).sum() >>> chained.value() 30 >>> rechained = chained.plant([5, 6, 7, 8]) >>> rechained.value() 174 >>> chained.value() 30 Module Access ============= Another feature of the ``py_`` object, is that it provides module access to ``pydash``: .. doctest:: >>> import pydash >>> from pydash import py_ >>> assert py_.add is pydash.add >>> py_.add(1, 2) == pydash.add(1, 2) True Through ``py_`` any function that ends with ``"_"`` can be accessed without the trailing ``"_"``: .. doctest:: >>> py_.filter([1, 2, 3], lambda x: x > 1) == pydash.filter_([1, 2, 3], lambda x: x > 1) True pydash-8.0.3/docs/changelog.rst000066400000000000000000000000361464745015500164350ustar00rootroot00000000000000.. include:: ../CHANGELOG.rst pydash-8.0.3/docs/conf.py000066400000000000000000000117741464745015500152660ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -- Project information ----------------------------------------------------- import importlib.metadata pkg_info = importlib.metadata.metadata("pydash") project = pkg_info["Name"] author = pkg_info["Author-email"] description = pkg_info["Summary"] copyright = "2013, " + author # The short X.Y version version = pkg_info["Version"] # The full version, including alpha/beta/rc tags release = version # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.coverage", "sphinx.ext.viewcode", "sphinx.ext.napoleon", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_parsers = {} source_suffix = [".rst"] # The master toctree document. master_doc = "index" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ["_build"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "furo" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = project + "doc" # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # #'preamble': '', # Latex figure (float) alignment # #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, project + ".tex", project + " Documentation", author, "manual"), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, project, project + " Documentation", [author], 1)] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, project, project + " Documentation", author, project, description, "Miscellaneous", ), ] # -- Extension configuration ------------------------------------------------- pydash-8.0.3/docs/contributing.rst000066400000000000000000000000411464745015500172110ustar00rootroot00000000000000.. include:: ../CONTRIBUTING.rst pydash-8.0.3/docs/deeppath.rst000066400000000000000000000013701464745015500163020ustar00rootroot00000000000000.. _deeppath: Deep Path Strings ================= .. testsetup:: import pydash A deep path string is used to access a nested data structure of arbitrary length. Each level is separated by a ``"."`` and can be used on both dictionaries and lists. If a ``"."`` is contained in one of the dictionary keys, then it can be escaped using ``"\"``. For accessing a dictionary key that is a number, it can be wrapped in brackets like ``"[1]"``. Examples: .. doctest:: >>> data = {'a': {'b': {'c': [0, 0, {'d': [0, {1: 2}]}]}}} >>> pydash.get(data, 'a.b.c.2.d.1.[1]') 2 >>> data = {'a': {'b.c.d': 2}} >>> pydash.get(data, r'a.b\.c\.d') 2 Pydash's callback system supports the deep property style callback using deep path strings. pydash-8.0.3/docs/devguide.rst000066400000000000000000000000351464745015500163010ustar00rootroot00000000000000.. include:: ../DEVGUIDE.rst pydash-8.0.3/docs/differences.rst000066400000000000000000000037751464745015500170000ustar00rootroot00000000000000.. _differences: Lodash Differences =================== Naming Conventions ------------------ pydash adheres to the following conventions: - Function names use ``snake_case`` instead of ``camelCase``. - Any Lodash function that shares its name with a reserved Python keyword will have an ``_`` appended after it (e.g. ``filter`` in Lodash would be ``filter_`` in pydash). - Lodash's ``toArray()`` is pydash's ``to_list()``. - Lodash's ``functions()`` is pydash's ``callables()``. This particular name difference was chosen in order to allow for the ``functions.py`` module file to exist at root of the project. Previously, ``functions.py`` existed in ``pydash/api/`` but in ``v2.0.0``, it was decided to move everything in ``api/`` to ``pydash/``. Therefore, to avoid import ambiguities, the ``functions()`` function was renamed. - Lodash's ``is_native()`` is pydash's ``is_builtin()``. This aligns better with Python's builtins terminology. Callbacks --------- There are a few differences between extra callback style support: - Pydash has an explicit shallow property access of the form ``['some_property']`` as in ``pydash.map_([{'a.b': 1, 'a': {'b': 3}}, {'a.b': 2, 'a': {'b': 4}}], ['a.b'])`` would evaulate to ``[1, 2]`` and not ``[3, 4]`` (as would be the case for ``'a.b'``). Extra Functions --------------- In addition to porting Lodash, pydash contains functions found in lodashcontrib_, lodashdeep_, lodashmath_, and underscorestring_. Function Behavior ----------------- Some of pydash's functions behave differently: - :func:`pydash.utilities.memoize` uses all passed in arguments as the cache key by default instead of only using the first argument. Templating ---------- - pydash doesn't have ``template()``. See :ref:`Templating ` for more details. .. _lodashcontrib: https://github.com/node4good/lodash-contrib .. _lodashdeep: https://github.com/marklagendijk/lodash-deep .. _lodashmath: https://github.com/Delapouite/lodash.math .. _underscorestring: https://github.com/epeli/underscore.string pydash-8.0.3/docs/index.rst000066400000000000000000000011351464745015500156160ustar00rootroot00000000000000.. pydash documentation master file .. include:: ../README.rst .. include:: quickstart.rst Guide ===== .. toctree:: :maxdepth: 2 installation quickstart differences callbacks deeppath chaining templating upgrading devguide API Reference ============= Includes links to source code. .. toctree:: :maxdepth: 2 api Project Info ============ .. toctree:: :maxdepth: 1 license versioning changelog authors contributing kudos Indices and Tables ================== - :ref:`genindex` - :ref:`modindex` - :ref:`search` pydash-8.0.3/docs/installation.rst000066400000000000000000000002731464745015500172120ustar00rootroot00000000000000Installation ============ **pydash** requires Python >= 3.6. It has no external dependencies. To install from `PyPi `_: :: pip install pydash pydash-8.0.3/docs/kudos.rst000066400000000000000000000001441464745015500156330ustar00rootroot00000000000000Kudos ***** Thank you to `Lodash `_ for providing such a great library to port.pydash-8.0.3/docs/license.rst000066400000000000000000000000551464745015500161310ustar00rootroot00000000000000License ======= .. include:: ../LICENSE.rst pydash-8.0.3/docs/quickstart.rst000066400000000000000000000027331464745015500167060ustar00rootroot00000000000000Quickstart ========== The functions available from pydash can be used in two styles. The first is by using the module directly or importing from it: .. doctest:: >>> import pydash # Arrays >>> pydash.flatten([1, 2, [3, [4, 5, [6, 7]]]]) [1, 2, 3, [4, 5, [6, 7]]] >>> pydash.flatten_deep([1, 2, [3, [4, 5, [6, 7]]]]) [1, 2, 3, 4, 5, 6, 7] # Collections >>> pydash.map_([{'name': 'moe', 'age': 40}, {'name': 'larry', 'age': 50}], 'name') ['moe', 'larry'] # Functions >>> curried = pydash.curry(lambda a, b, c: a + b + c) >>> curried(1, 2)(3) 6 # Objects >>> pydash.omit({'name': 'moe', 'age': 40}, 'age') {'name': 'moe'} # Utilities >>> pydash.times(3, lambda index: index) [0, 1, 2] # Chaining >>> pydash.chain([1, 2, 3, 4]).without(2, 3).reject(lambda x: x > 1).value() [1] The second style is to use the ``py_`` or ``_`` instances (they are the same object as two different aliases): .. doctest:: >>> from pydash import py_ # Method calling which is equivalent to pydash.flatten(...) >>> py_.flatten([1, 2, [3, [4, 5, [6, 7]]]]) [1, 2, 3, [4, 5, [6, 7]]] # Method chaining which is equivalent to pydash.chain(...) >>> py_([1, 2, 3, 4]).without(2, 3).reject(lambda x: x > 1).value() [1] # Late method chaining >>> py_().without(2, 3).reject(lambda x: x > 1)([1, 2, 3, 4]) [1] .. seealso:: For further details consult :ref:`API Reference `. pydash-8.0.3/docs/templating.rst000066400000000000000000000010621464745015500166520ustar00rootroot00000000000000.. _templating: Templating ========== Templating has been purposely left out of pydash. Having a custom templating engine was never a goal of pydash even though Lodash includes one. There already exist many mature and battle-tested templating engines like `Jinja2 `_ and `Mako `_ which are better suited to handling templating needs. However, if there was ever a strong request/justification for having templating in pydash (or a pull-request implementing it), then this decision could be re-evaluated. pydash-8.0.3/docs/upgrading.rst000066400000000000000000000244521464745015500164760ustar00rootroot00000000000000.. _upgrading: Upgrading ********* From v3.x.x to v4.0.0 ===================== Start by reading the full list of changes in ``v4.0.0`` at the :ref:`Changelog `. There are a significant amount of backwards-incompatibilities that will likely need to be addressed: - All function aliases have been removed in favor of having a single named function for everything. This was done to make things less confusing by having only a single named function that performs an action vs. potentially using two different names for the same function. - A few functions have been removed whose functionality was duplicated by another function. - Some functions have been renamed for consistency and to align with Lodash. - Many functions have had their callback argument moved to another function to align with Lodash. - The generic ``callback`` argument has been renamed to either ``iteratee``, ``predicate``, or ``comparator``. This was done to make it clearer what the callback is doing and to align more with Lodash's naming conventions. Once the shock of those backwards-incompatibilities has worn off, discover 72 new functions: - 19 new array methods - :func:`pydash.arrays.difference_by` - :func:`pydash.arrays.difference_with` - :func:`pydash.arrays.from_pairs` - :func:`pydash.arrays.intersection_by` - :func:`pydash.arrays.intersection_with` - :func:`pydash.arrays.nth` - :func:`pydash.arrays.pull_all` - :func:`pydash.arrays.sorted_index_by` - :func:`pydash.arrays.sorted_index_of` - :func:`pydash.arrays.sorted_last_index_by` - :func:`pydash.arrays.sorted_last_index_of` - :func:`pydash.arrays.sorted_uniq` - :func:`pydash.arrays.union_by` - :func:`pydash.arrays.union_with` - :func:`pydash.arrays.uniq_by` - :func:`pydash.arrays.uniq_with` - :func:`pydash.arrays.xor_by` - :func:`pydash.arrays.xor_with` - :func:`pydash.arrays.zip_object_deep` - 6 new collection methods - :func:`pydash.collections.flat_map` - :func:`pydash.collections.flat_map_deep` - :func:`pydash.collections.flat_depth` - :func:`pydash.collections.flatten_depth` - :func:`pydash.collections.invoke_map` - :func:`pydash.collections.sample_size` - 2 new function methods - :func:`pydash.functions.flip` - :func:`pydash.functions.unary` - 12 new object methods - :func:`pydash.objects.assign_with` - :func:`pydash.objects.clone_deep_with` - :func:`pydash.objects.clone_with` - :func:`pydash.objects.invert_by` - :func:`pydash.objects.merge_with` - :func:`pydash.objects.omit_by` - :func:`pydash.objects.pick_by` - :func:`pydash.objects.set_with` - :func:`pydash.objects.to_integer` - :func:`pydash.objects.unset` - :func:`pydash.objects.update` - :func:`pydash.objects.udpate_with` - 8 new numerical methods - :func:`pydash.numerical.clamp` - :func:`pydash.numerical.divide` - :func:`pydash.numerical.max_by` - :func:`pydash.numerical.mean_by` - :func:`pydash.numerical.min_by` - :func:`pydash.numerical.multiply` - :func:`pydash.numerical.subtract` - :func:`pydash.numerical.sum_by` - 4 new predicate methods - :func:`pydash.predicates.eq` - :func:`pydash.predicates.is_equal_with` - :func:`pydash.predicates.is_match_with` - :func:`pydash.predicates.is_set` - 6 new string methods - :func:`pydash.strings.lower_case` - :func:`pydash.strings.lower_first` - :func:`pydash.strings.to_lower` - :func:`pydash.strings.to_upper` - :func:`pydash.strings.upper_case` - :func:`pydash.strings.upper_first` - 15 new utility methods - :func:`pydash.utilities.cond` - :func:`pydash.utilities.conforms` - :func:`pydash.utilities.conforms_to` - :func:`pydash.utilities.default_to` - :func:`pydash.utilities.nth_arg` - :func:`pydash.utilities.over` - :func:`pydash.utilities.over_every` - :func:`pydash.utilities.over_some` - :func:`pydash.utilities.range_right` - :func:`pydash.utilities.stub_list` - :func:`pydash.utilities.stub_dict` - :func:`pydash.utilities.stub_false` - :func:`pydash.utilities.stub_string` - :func:`pydash.utilities.stub_true` - :func:`pydash.utilities.to_path` From v2.x.x to v3.0.0 ===================== There were several breaking changes in ``v3.0.0``: - Make ``to_string`` convert ``None`` to empty string. (**breaking change**) - Make the following functions work with empty strings and ``None``: (**breaking change**) - ``camel_case`` - ``capitalize`` - ``chars`` - ``chop`` - ``chop_right`` - ``class_case`` - ``clean`` - ``count_substr`` - ``decapitalize`` - ``ends_with`` - ``join`` - ``js_replace`` - ``kebab_case`` - ``lines`` - ``quote`` - ``re_replace`` - ``replace`` - ``series_phrase`` - ``series_phrase_serial`` - ``starts_with`` - ``surround`` - Reorder function arguments for ``after`` from ``(n, func)`` to ``(func, n)``. (**breaking change**) - Reorder function arguments for ``before`` from ``(n, func)`` to ``(func, n)``. (**breaking change**) - Reorder function arguments for ``times`` from ``(n, callback)`` to ``(callback, n)``. (**breaking change**) - Reorder function arguments for ``js_match`` from ``(reg_exp, text)`` to ``(text, reg_exp)``. (**breaking change**) - Reorder function arguments for ``js_replace`` from ``(reg_exp, text, repl)`` to ``(text, reg_exp, repl)``. (**breaking change**) And some potential breaking changes: - Move ``arrays.join`` to ``strings.join`` (**possible breaking change**). - Rename ``join``/``implode``'s second parameter from ``delimiter`` to ``separator``. (**possible breaking change**) - Rename ``split``/``explode``'s second parameter from ``delimiter`` to ``separator``. (**possible breaking change**) Some notable new features/functions: - 31 new string methods - :func:`pydash.strings.chars` - :func:`pydash.strings.chop` - :func:`pydash.strings.chop_right` - :func:`pydash.strings.class_case` - :func:`pydash.strings.clean` - :func:`pydash.strings.count_substr` - :func:`pydash.strings.decapitalize` - :func:`pydash.strings.has_substr` - :func:`pydash.strings.human_case` - :func:`pydash.strings.insert_substr` - :func:`pydash.strings.lines` - :func:`pydash.strings.number_format` - :func:`pydash.strings.pascal_case` - :func:`pydash.strings.predecessor` - :func:`pydash.strings.prune` - :func:`pydash.strings.re_replace` - :func:`pydash.strings.replace` - :func:`pydash.strings.separator_case` - :func:`pydash.strings.series_phrase` - :func:`pydash.strings.series_phrase_serial` - :func:`pydash.strings.slugify` - :func:`pydash.strings.split` - :func:`pydash.strings.strip_tags` - :func:`pydash.strings.substr_left` - :func:`pydash.strings.substr_left_end` - :func:`pydash.strings.substr_right` - :func:`pydash.strings.substr_right_end` - :func:`pydash.strings.successor` - :func:`pydash.strings.swap_case` - :func:`pydash.strings.title_case` - :func:`pydash.strings.unquote` - 1 new array method - :func:`pydash.arrays.duplicates` - 2 new function methods - :func:`pydash.functions.ary` - :func:`pydash.functions.rearg` - 1 new collection method: - :func:`pydash.collections.sort_by_all` - 4 new object methods - :func:`pydash.objects.to_boolean` - :func:`pydash.objects.to_dict` - :func:`pydash.objects.to_number` - :func:`pydash.objects.to_plain_object` - 4 new predicate methods - :func:`pydash.predicates.is_blank` - :func:`pydash.predicates.is_builtin` and alias :func:`pydash.predicates.is_native` - :func:`pydash.predicates.is_match` - :func:`pydash.predicates.is_tuple` - 1 new utility method - :func:`pydash.utilities.prop_of` and alias :func:`pydash.utilities.property_of` - 6 new aliases: - :func:`pydash.predicates.is_bool` for :func:`pydash.predicates.is_boolean` - :func:`pydash.predicates.is_dict` for :func:`pydash.predicates.is_plain_object` - :func:`pydash.predicates.is_int` for :func:`pydash.predicates.is_integer` - :func:`pydash.predicates.is_num` for :func:`pydash.predicates.is_number` - :func:`pydash.strings.truncate` for :func:`pydash.strings.trunc` - :func:`pydash.strings.underscore_case` for :func:`pydash.strings.snake_case` - Chaining can now accept the root ``value`` argument late. - Chains can be re-used with differnt initial values via ``chain().plant``. - New chains can be created using the chain's computed value as the new chain's initial value via ``chain().commit``. - Support iteration over class instance properties for non-list, non-dict, and non-iterable objects. Late Value Chaining ------------------- The passing of the root ``value`` argument for chaining can now be done "late" meaning that you can build chains without providing a value at the beginning. This allows you to build a chain and re-use it with different root values: .. doctest:: >>> from pydash import py_ >>> square_sum = py_().power(2).sum() >>> [square_sum([1, 2, 3]), square_sum([4, 5, 6]), square_sum([7, 8, 9])] [14, 77, 194] .. seealso:: - For more details on method chaining, check out :ref:`Method Chaining `. - For a full listing of changes in ``v3.0.0``, check out the :ref:`Changelog `. From v1.x.x to v2.0.0 ===================== There were several breaking and potentially breaking changes in ``v2.0.0``: - :func:`pydash.arrays.flatten` is now shallow by default. Previously, it was deep by default. For deep flattening, use either ``flatten(..., is_deep=True)`` or ``flatten_deep(...)``. - :func:`pydash.predicates.is_number` now returns ``False`` for boolean ``True`` and ``False``. Previously, it returned ``True``. - Internally, the files located in ``pydash.api`` were moved to ``pydash``. If you imported from ``pydash.api.``, then it's recommended to change your imports to pull from ``pydash``. - The function ``functions()`` was renamed to ``callables()`` to avoid ambiguities with the module ``functions.py``. Some notable new features: - Callback functions no longer require the full call signature definition. - A new "_" instance was added which supports both method chaining and module method calling. See :ref:`api-dash-instance` for more details. .. seealso:: For a full listing of changes in ``v2.0.0``, check out the :ref:`Changelog `. pydash-8.0.3/docs/versioning.rst000066400000000000000000000007611464745015500166760ustar00rootroot00000000000000Versioning ========== This project follows `Semantic Versioning`_ with the following caveats: - Only the public API (i.e. the objects imported into the ``pydash`` module) will maintain backwards compatibility between MINOR version bumps. - Objects within any other parts of the library are not guaranteed to not break between MINOR version bumps. With that in mind, it is recommended to only use or import objects from the main module, ``pydash``. .. _Semantic Versioning: http://semver.org/ pydash-8.0.3/pyproject.toml000066400000000000000000000075401464745015500157470ustar00rootroot00000000000000[build-system] requires = ["setuptools>=61.0", "wheel"] build-backend = "setuptools.build_meta" [project] name = "pydash" dynamic = ["version"] authors = [{ name = "Derrick Gilland", email = "dgilland@gmail.com" }] description = 'The kitchen sink of Python utility libraries for doing "stuff" in a functional way. Based on the Lo-Dash Javascript library.' readme = "README.rst" license = { file = "LICENSE.rst" } keywords = ["pydash", "utility", "functional", "lodash", "underscore"] classifiers = [ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: MIT License", "Intended Audience :: Developers", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Utilities", ] requires-python = ">=3.8" dependencies = ["typing-extensions>3.10,!=4.6.0"] [project.urls] Homepage = "https://github.com/dgilland/pydash" Documentation = "https://pydash.readthedocs.org" Repository = "https://github.com/dgilland/pydash" Issues = "https://github.com/dgilland/pydash/issues" Changelog = "https://github.com/dgilland/pydash/blob/develop/CHANGELOG.rst" [project.optional-dependencies] dev = [ "build", "coverage", "ruff", "furo", "invoke", "mypy", "pytest", "pytest-mypy-testing", "pytest-cov", "sphinx", "tox", "twine", "wheel", "sphinx-autodoc-typehints", ] [tool.setuptools.dynamic] version = { attr = "pydash.__version__" } [tool.distutils.bdist_wheel] python-tag = "py3" [tool.ruff] src = ["src"] extend-exclude = [".cache", "tests/pytest_mypy_testing"] extend-include = ["*.pyi"] line-length = 100 # target the lowest supported version to avoid introducing unsupported syntax target-version = "py38" [tool.ruff.lint] select = [ # flake8 "F", "E", "W", # flake8-bugbear "B", # pylint "PL", # isort "I", ] # F811 - redefinition of unused `name` from line `N` # E203 - whitespace before ':' # E701 - multiple statements on one line (colon) # PLR2004 - Magic value used in comparison, consider replacing `...` with a constant variable # PLW2901 - `...` loop variable `...` overwritten by assignment target # PLR0913 - Too many arguments in function definition ignore = ["F811", "E203", "E701", "PLR2004", "PLW2901", "PLR0913"] [tool.ruff.lint.extend-per-file-ignores] # Exceptions for the type stub # F403 - `from module import *` used; unable to detect undefined names # F405 - name may be undefined, or defined from star imports # E501 - line too long "*.pyi" = ["F403", "F405", "E501"] [tool.ruff.lint.isort] lines-after-imports = 2 combine-as-imports = true force-sort-within-sections = true [tool.ruff.format] docstring-code-format = true [tool.mypy] mypy_path = ["src"] python_version = "3.8" exclude = [ "tests/pytest_mypy_testing", ] show_column_numbers = true show_error_context = false ignore_missing_imports = true warn_return_any = false strict_optional = true warn_no_return = true warn_redundant_casts = false warn_unused_ignores = false disallow_any_generics = true [tool.pytest.ini_options] addopts = [ "--verbose", "--doctest-modules", "--no-cov-on-fail", "--cov-fail-under=100", "--cov-report=term-missing", "--cov-report=xml:build/coverage/coverage.xml", "--cov-report=html:build/coverage", "--junitxml=build/testresults/junit.xml", ] [tool.coverage.run] omit = [ "*/tests/*", "*/test_*", "*/_compat.py", "*/types.py", ] [tool.coverage.report] exclude_lines = [ "pragma: no cover", "@t.overload" ] pydash-8.0.3/requirements.txt000066400000000000000000000000121464745015500163020ustar00rootroot00000000000000-e .[dev] pydash-8.0.3/scripts/000077500000000000000000000000001464745015500145145ustar00rootroot00000000000000pydash-8.0.3/scripts/chaining_type_generator.py000066400000000000000000000212351464745015500217600ustar00rootroot00000000000000import argparse import ast from collections import defaultdict, deque from pathlib import Path import typing as t WRAPPER_KW = "RES" INIT_FILE = "src/pydash/__init__.py" BASE_MODULE = ''' # mypy: disable-error-code=misc """Generated from the `scripts/chaining_type_generator.py` script.""" import re import typing as t from typing_extensions import Concatenate, Literal, ParamSpec, Type import pydash as pyd from pydash.chaining.chaining import Chain from pydash.types import * from pydash.helpers import Unset, UNSET from pydash.functions import ( After, Ary, Before, CurryOne, CurryTwo, CurryThree, CurryFour, CurryFive, CurryRightOne, CurryRightTwo, CurryRightThree, CurryRightFour, CurryRightFive, Debounce, Disjoin, Flow, Iterated, Juxtapose, Negate, Once, Partial, Rearg, Spread, Throttle, ) from pydash.utilities import MemoizedFunc from _typeshed import ( SupportsDunderGE, SupportsDunderGT, SupportsDunderLE, SupportsDunderLT, SupportsRichComparison, SupportsAdd, SupportsRichComparisonT, SupportsSub, ) ValueT_co = t.TypeVar("ValueT_co", covariant=True) T = t.TypeVar("T") T1 = t.TypeVar("T1") T2 = t.TypeVar("T2") T3 = t.TypeVar("T3") T4 = t.TypeVar("T4") T5 = t.TypeVar("T5") NumT = t.TypeVar("NumT", int, float, "Decimal") NumT2 = t.TypeVar("NumT2", int, float, "Decimal") NumT3 = t.TypeVar("NumT3", int, float, "Decimal") CallableT = t.TypeVar("CallableT", bound=t.Callable[..., t.Any]) SequenceT = t.TypeVar("SequenceT", bound=t.Sequence[t.Any]) MutableSequenceT = t.TypeVar("MutableSequenceT", bound=t.MutableSequence[t.Any]) P = ParamSpec("P") class {class_name}: ''' FUNCTIONS_TO_SKIP = [ # this is already a method of `Chain` "to_string", ] def build_header(class_name: str) -> str: return BASE_MODULE.format(class_name=class_name) def modules_and_api_funcs() -> t.Dict[str, t.List[str]]: """This is mostly so we don't have to import `pydash`""" with open(INIT_FILE, "r", encoding="utf-8") as source: tree = ast.parse(source.read()) module_to_funcs = defaultdict(list) for node in ast.walk(tree): # TODO: maybe handle `Import` as well, not necessary for now if isinstance(node, ast.ImportFrom): for name in node.names: module_to_funcs[node.module].append(name.asname or name.name) return module_to_funcs def is_overload(node: ast.FunctionDef) -> bool: return any( ( (isinstance(decorator, ast.Name) and decorator.id == "overload") or (isinstance(decorator, ast.Attribute) and decorator.attr == "overload") ) for decorator in node.decorator_list ) def returns_typeguard(node: ast.FunctionDef) -> bool: def is_constant_typeguard(cst: ast.expr) -> bool: return isinstance(cst, ast.Constant) and cst.value is not None and "TypeGuard" in cst.value def is_subscript_typeguard(sub: ast.expr) -> bool: return ( isinstance(sub, ast.Subscript) and isinstance(sub.value, ast.Name) and "TypeGuard" in sub.value.id ) return node.returns is not None and ( is_constant_typeguard(node.returns) or is_subscript_typeguard(node.returns) ) def has_single_default_arg(node: ast.FunctionDef) -> bool: return len(node.args.args) == 1 and len(node.args.defaults) >= 1 def chainwrapper_args( node: ast.FunctionDef, ) -> t.Tuple[t.List[ast.expr], t.List[ast.keyword]]: # TODO: handle posonlyargs args: t.List[ast.expr] = [ast.Name(id=arg.arg) for arg in node.args.args[1:]] kwargs: t.List[ast.keyword] = [ ast.keyword(arg=kw.arg, value=ast.Name(id=kw.arg)) for kw in node.args.kwonlyargs ] if node.args.vararg: args.append(ast.Starred(value=ast.Name(id=node.args.vararg.arg))) if node.args.kwarg: kwargs.append(ast.keyword(value=ast.Name(id=node.args.kwarg.arg))) return args, kwargs def wrap_type(wrapper: ast.Subscript, to_wrap: ast.expr) -> ast.expr: if isinstance(wrapper.slice, ast.Tuple): slice = ast.Tuple( elts=[ s if not (isinstance(s, ast.Name) and s.id == WRAPPER_KW) else to_wrap for s in wrapper.slice.elts ] ) else: slice = to_wrap return ast.Subscript( value=wrapper.value, slice=slice, ) def get_first_arg(node: ast.FunctionDef) -> ast.arg: if node.args.args: return node.args.args[0] if node.args.vararg: return ast.arg(arg=node.args.vararg.arg, annotation=node.args.vararg.annotation) raise RuntimeError("Node should have a first argument") def transform_function(node: ast.FunctionDef, wrapper: ast.Subscript) -> ast.FunctionDef: first_arg = get_first_arg(node) cw_args, cw_kwargs = chainwrapper_args(node) # case where we only have a vararg argument if not node.args.args: node.args.args.append(first_arg) if first_arg.annotation: first_arg.annotation = ast.Constant( value=ast.unparse(wrap_type(wrapper, first_arg.annotation)) ) first_arg.arg = "self" # we need to remove the first default arg as it is now the self argument if len(node.args.args) == len(node.args.defaults): node.args.defaults = node.args.defaults[1:] if node.returns: # TODO: `(some_arg: T) -> TypeGuard[T]` to `(some_arg: Any) -> bool` # TODO: otherwise we would get a `T` alone # change typeguard to bool as it is useless in a chain if returns_typeguard(node): node.returns = ast.Name(id="bool") node.returns = ast.Constant(value=ast.unparse(wrap_type(wrapper, node.returns))) if not is_overload(node): node.body = [ ast.Return( value=ast.Call( func=ast.Call( func=ast.Name(id="self._wrap"), args=[ast.Name(id=f"pyd.{node.name}")], keywords=[], ), args=cw_args, keywords=cw_kwargs, ) ) ] return node def filename_from_module(module: str) -> str: return "src/pydash/chaining/chaining.py" if module == "chaining" else f"src/pydash/{module}.py" def main() -> int: parser = argparse.ArgumentParser() parser.add_argument( "--class_name", help="Name of the output class to put typed methods in", required=True, ) parser.add_argument( "--output", type=Path, help="Path to the file to write the typed class to (probably a `.pyi` file)", required=True, ) parser.add_argument( "--wrapper", help="The main generic class (eg. `Chain`)", required=True, ) args = parser.parse_args() wrapper = args.wrapper + f"[{WRAPPER_KW}]" wrapper = ast.parse(wrapper).body[0] assert isinstance(wrapper, ast.Expr), "`wrapper` value should contain one expression" wrapper = wrapper.value assert isinstance( wrapper, ast.Subscript ), "`wrapper` value should contain one with one subscript" to_file = open(args.output, "w") to_file.write(build_header(args.class_name)) module_to_funcs = modules_and_api_funcs() for module in module_to_funcs.keys(): filename = filename_from_module(module) with open(filename, encoding="utf-8") as source: tree = ast.parse(source.read(), filename=filename) class_methods = deque() for node in ast.walk(tree): if isinstance(node, ast.ClassDef): class_methods.extend(f for f in node.body if isinstance(f, ast.FunctionDef)) # skipping class methods if node in class_methods: class_methods.popleft() continue if ( isinstance(node, ast.FunctionDef) and node.name in module_to_funcs[module] and (node.args.args or node.args.vararg) # skipping funcs without args for now and not has_single_default_arg(node) # skipping 1 default arg funcs and node.name not in FUNCTIONS_TO_SKIP ): new_node = transform_function(node, wrapper) to_file.write(" " * 4) to_file.write(ast.unparse(new_node).replace("\n", f"\n{' ' * 4}")) to_file.write("\n\n") if new_node.name.endswith("_") and not is_overload(new_node): to_file.write(f"{' ' * 4}{new_node.name.rstrip('_')} = {new_node.name}") to_file.write("\n\n") to_file.close() return 0 if __name__ == "__main__": raise SystemExit(main()) pydash-8.0.3/scripts/mypy_doctests_generator.py000066400000000000000000000040551464745015500220460ustar00rootroot00000000000000from pathlib import Path import re import sys import typing as t class DocString(t.NamedTuple): content: str function_name: str def example_block(self) -> t.Union[t.Iterable[str], None]: expression_re = re.compile(r"Examples?:\n+((?:\s{8}.+?\n)+)") match = expression_re.search(self.content) if not match: return None example = match.group(1) return (line.strip() for line in example.strip().splitlines()) def docstrings(path: Path) -> t.Iterable[DocString]: docstring_re = re.compile(r"def (.+?)\((?:.|\n)+?:[\s\n]+?r?\"\"\"((?:.|\s|\n)+?)\"\"\"") with open(path) as f: text = f.read() docstrings = docstring_re.finditer(text) return map( lambda match: DocString(function_name=match.group(1), content=match.group(2)), docstrings ) def generate_test_function(docstring: DocString) -> str: if not (example_block := docstring.example_block()): return "" built_function = "" built_function += "@pytest.mark.mypy_testing\n" built_function += f"def test_mypy_{docstring.function_name}() -> None:\n" for line in map(lambda line_: line_.strip(), example_block): if not line: continue to_be_revealed = line.replace(">>> ", "") if line.startswith(">>> "): to_be_revealed = f"_.{to_be_revealed}" built_function += f" reveal_type({to_be_revealed}) # R:\n" return built_function def main(path: Path) -> str: imports = "import pytest\n\n" imports += "import pydash as _\n\n\n" return imports + "\n\n".join(map(generate_test_function, docstrings(path))) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("filename", help="path to python file", type=Path) parser.add_argument("output", help="path to output file", type=Path) args = parser.parse_args() if not args.filename.exists(): print(f"`{args.filename}` does not exist") sys.exit(1) args.output.write_text(main(args.filename)) pydash-8.0.3/src/000077500000000000000000000000001464745015500136145ustar00rootroot00000000000000pydash-8.0.3/src/pydash/000077500000000000000000000000001464745015500151045ustar00rootroot00000000000000pydash-8.0.3/src/pydash/__init__.py000066400000000000000000000266721464745015500172320ustar00rootroot00000000000000"""Python port of Lo-Dash.""" __version__ = "8.0.3" from .arrays import ( chunk, compact, concat, difference, difference_by, difference_with, drop, drop_right, drop_right_while, drop_while, duplicates, fill, find_index, find_last_index, flatten, flatten_deep, flatten_depth, from_pairs, head, index_of, initial, intercalate, interleave, intersection, intersection_by, intersection_with, intersperse, last, last_index_of, mapcat, nth, pop, pull, pull_all, pull_all_by, pull_all_with, pull_at, push, remove, reverse, shift, slice_, sort, sorted_index, sorted_index_by, sorted_index_of, sorted_last_index, sorted_last_index_by, sorted_last_index_of, sorted_uniq, sorted_uniq_by, splice, split_at, tail, take, take_right, take_right_while, take_while, union, union_by, union_with, uniq, uniq_by, uniq_with, unshift, unzip, unzip_with, without, xor, xor_by, xor_with, zip_, zip_object, zip_object_deep, zip_with, ) from .chaining import _Dash, chain, tap from .collections import ( at, count_by, every, filter_, find, find_last, flat_map, flat_map_deep, flat_map_depth, for_each, for_each_right, group_by, includes, invoke_map, key_by, map_, nest, order_by, partition, pluck, reduce_, reduce_right, reductions, reductions_right, reject, sample, sample_size, shuffle, size, some, sort_by, ) from .exceptions import InvalidMethod from .functions import ( after, ary, before, conjoin, curry, curry_right, debounce, delay, disjoin, flip, flow, flow_right, iterated, juxtapose, negate, once, over_args, partial, partial_right, rearg, spread, throttle, unary, wrap, ) from .numerical import ( add, ceil, clamp, divide, floor, max_, max_by, mean, mean_by, median, min_, min_by, moving_mean, multiply, power, round_, scale, slope, std_deviation, subtract, sum_, sum_by, transpose, variance, zscore, ) from .objects import ( apply, apply_catch, apply_if, apply_if_not_none, assign, assign_with, callables, clone, clone_deep, clone_deep_with, clone_with, defaults, defaults_deep, find_key, find_last_key, for_in, for_in_right, get, has, invert, invert_by, invoke, keys, map_keys, map_values, map_values_deep, merge, merge_with, omit, omit_by, parse_int, pick, pick_by, rename_keys, set_, set_with, to_boolean, to_dict, to_integer, to_list, to_number, to_pairs, to_string, transform, unset, update, update_with, values, ) from .predicates import ( eq, eq_cmp, gt, gt_cmp, gte, gte_cmp, in_range, in_range_cmp, is_associative, is_blank, is_boolean, is_builtin, is_date, is_decreasing, is_dict, is_empty, is_equal, is_equal_cmp, is_equal_with, is_equal_with_cmp, is_error, is_even, is_float, is_function, is_increasing, is_indexed, is_instance_of, is_instance_of_cmp, is_integer, is_iterable, is_json, is_list, is_match, is_match_cmp, is_match_with, is_match_with_cmp, is_monotone, is_monotone_cmp, is_nan, is_negative, is_none, is_number, is_object, is_odd, is_positive, is_reg_exp, is_set, is_strictly_decreasing, is_strictly_increasing, is_string, is_tuple, is_zero, lt, lt_cmp, lte, lte_cmp, ) from .strings import ( camel_case, capitalize, chars, chop, chop_right, clean, count_substr, deburr, decapitalize, ends_with, ensure_ends_with, ensure_starts_with, escape, escape_reg_exp, has_substr, human_case, insert_substr, join, kebab_case, lines, lower_case, lower_first, number_format, pad, pad_end, pad_start, pascal_case, predecessor, prune, quote, reg_exp_js_match, reg_exp_js_replace, reg_exp_replace, repeat, replace, replace_end, replace_start, separator_case, series_phrase, series_phrase_serial, slugify, snake_case, split, start_case, starts_with, strip_tags, substr_left, substr_left_end, substr_right, substr_right_end, successor, surround, swap_case, title_case, to_lower, to_upper, trim, trim_end, trim_start, truncate, unescape, unquote, upper_case, upper_first, url, words, ) from .utilities import ( attempt, cond, conforms, conforms_to, constant, default_to, default_to_any, identity, iteratee, matches, matches_property, memoize, method, method_of, noop, now, nth_arg, over, over_every, over_some, properties, property_, property_of, random, range_, range_right, result, retry, stub_dict, stub_false, stub_list, stub_string, stub_true, times, to_path, unique_id, ) py_ = _Dash() _ = py_ __all__ = ( "chunk", "compact", "concat", "difference", "difference_by", "difference_with", "drop", "drop_right", "drop_right_while", "drop_while", "duplicates", "fill", "find_index", "find_last_index", "flatten", "flatten_deep", "flatten_depth", "from_pairs", "head", "index_of", "initial", "intercalate", "interleave", "intersection", "intersection_by", "intersection_with", "intersperse", "last", "last_index_of", "mapcat", "nth", "pop", "pull", "pull_all", "pull_all_by", "pull_all_with", "pull_at", "push", "remove", "reverse", "shift", "slice_", "sort", "sorted_index", "sorted_index_by", "sorted_index_of", "sorted_last_index", "sorted_last_index_by", "sorted_last_index_of", "sorted_uniq", "sorted_uniq_by", "splice", "split_at", "tail", "take", "take_right", "take_right_while", "take_while", "union", "union_by", "union_with", "uniq", "uniq_by", "uniq_with", "unshift", "unzip", "unzip_with", "without", "xor", "xor_by", "xor_with", "zip_", "zip_object", "zip_object_deep", "zip_with", "_Dash", "chain", "tap", "at", "count_by", "every", "filter_", "find", "find_last", "flat_map", "flat_map_deep", "flat_map_depth", "for_each", "for_each_right", "group_by", "includes", "invoke_map", "key_by", "map_", "nest", "order_by", "partition", "pluck", "reduce_", "reduce_right", "reductions", "reductions_right", "reject", "sample", "sample_size", "shuffle", "size", "some", "sort_by", "InvalidMethod", "after", "ary", "before", "conjoin", "curry", "curry_right", "debounce", "delay", "disjoin", "flip", "flow", "flow_right", "iterated", "juxtapose", "negate", "once", "over_args", "partial", "partial_right", "rearg", "spread", "throttle", "unary", "wrap", "add", "ceil", "clamp", "divide", "floor", "max_", "max_by", "mean", "mean_by", "median", "min_", "min_by", "moving_mean", "multiply", "power", "round_", "scale", "slope", "std_deviation", "subtract", "sum_", "sum_by", "transpose", "variance", "zscore", "apply", "apply_catch", "apply_if", "apply_if_not_none", "assign", "assign_with", "callables", "clone", "clone_deep", "clone_deep_with", "clone_with", "defaults", "defaults_deep", "find_key", "find_last_key", "for_in", "for_in_right", "get", "has", "invert", "invert_by", "invoke", "keys", "map_keys", "map_values", "map_values_deep", "merge", "merge_with", "omit", "omit_by", "parse_int", "pick", "pick_by", "rename_keys", "set_", "set_with", "to_boolean", "to_dict", "to_integer", "to_list", "to_number", "to_pairs", "to_string", "transform", "unset", "update", "update_with", "values", "eq", "eq_cmp", "gt", "gt_cmp", "gte", "gte_cmp", "in_range", "in_range_cmp", "is_associative", "is_blank", "is_boolean", "is_builtin", "is_date", "is_decreasing", "is_dict", "is_empty", "is_equal", "is_equal_cmp", "is_equal_with", "is_equal_with_cmp", "is_error", "is_even", "is_float", "is_function", "is_increasing", "is_indexed", "is_instance_of", "is_instance_of_cmp", "is_integer", "is_iterable", "is_json", "is_list", "is_match", "is_match_cmp", "is_match_with", "is_match_with_cmp", "is_monotone", "is_monotone_cmp", "is_nan", "is_negative", "is_none", "is_number", "is_object", "is_odd", "is_positive", "is_reg_exp", "is_set", "is_strictly_decreasing", "is_strictly_increasing", "is_string", "is_tuple", "is_zero", "lt", "lt_cmp", "lte", "lte_cmp", "camel_case", "capitalize", "chars", "chop", "chop_right", "clean", "count_substr", "deburr", "decapitalize", "ends_with", "ensure_ends_with", "ensure_starts_with", "escape", "escape_reg_exp", "has_substr", "human_case", "insert_substr", "join", "kebab_case", "lines", "lower_case", "lower_first", "number_format", "pad", "pad_end", "pad_start", "pascal_case", "predecessor", "prune", "quote", "reg_exp_js_match", "reg_exp_js_replace", "reg_exp_replace", "repeat", "replace", "replace_end", "replace_start", "separator_case", "series_phrase", "series_phrase_serial", "slugify", "snake_case", "split", "start_case", "starts_with", "strip_tags", "substr_left", "substr_left_end", "substr_right", "substr_right_end", "successor", "surround", "swap_case", "title_case", "to_lower", "to_upper", "trim", "trim_end", "trim_start", "truncate", "unescape", "unquote", "upper_case", "upper_first", "url", "words", "attempt", "cond", "conforms", "conforms_to", "constant", "default_to", "default_to_any", "identity", "iteratee", "matches", "matches_property", "memoize", "method", "method_of", "noop", "now", "nth_arg", "over", "over_every", "over_some", "properties", "property_", "property_of", "random", "range_", "range_right", "result", "retry", "stub_dict", "stub_false", "stub_list", "stub_string", "stub_true", "times", "to_path", "unique_id", ) pydash-8.0.3/src/pydash/arrays.py000066400000000000000000002133511464745015500167640ustar00rootroot00000000000000""" Functions that operate on lists. .. versionadded:: 1.0.0 """ from __future__ import annotations from bisect import bisect_left, bisect_right from functools import cmp_to_key from math import ceil import typing as t import pydash as pyd from .helpers import base_get, iteriteratee, parse_iteratee from .types import IterateeObjT if t.TYPE_CHECKING: from _typeshed import SupportsRichComparisonT # pragma: no cover __all__ = ( "chunk", "compact", "concat", "difference", "difference_by", "difference_with", "drop", "drop_right", "drop_right_while", "drop_while", "duplicates", "fill", "find_index", "find_last_index", "flatten", "flatten_deep", "flatten_depth", "from_pairs", "head", "index_of", "initial", "intercalate", "interleave", "intersection", "intersection_by", "intersection_with", "intersperse", "last", "last_index_of", "mapcat", "nth", "pull", "pull_all", "pull_all_by", "pull_all_with", "pull_at", "push", "remove", "reverse", "shift", "slice_", "sort", "sorted_index", "sorted_index_by", "sorted_index_of", "sorted_last_index", "sorted_last_index_by", "sorted_last_index_of", "sorted_uniq", "sorted_uniq_by", "splice", "split_at", "tail", "take", "take_right", "take_right_while", "take_while", "union", "union_by", "union_with", "uniq", "uniq_by", "uniq_with", "unshift", "unzip", "unzip_with", "without", "xor", "xor_by", "xor_with", "zip_", "zip_object", "zip_object_deep", "zip_with", ) T = t.TypeVar("T") T2 = t.TypeVar("T2") T3 = t.TypeVar("T3") T4 = t.TypeVar("T4") T5 = t.TypeVar("T5") SequenceT = t.TypeVar("SequenceT", bound=t.Sequence[t.Any]) MutableSequenceT = t.TypeVar("MutableSequenceT", bound=t.MutableSequence[t.Any]) def chunk(array: t.Sequence[T], size: int = 1) -> t.List[t.Sequence[T]]: """ Creates a list of elements split into groups the length of `size`. If `array` can't be split evenly, the final chunk will be the remaining elements. Args: array: List to chunk. size: Chunk size. Defaults to ``1``. Returns: New list containing chunks of `array`. Example: >>> chunk([1, 2, 3, 4, 5], 2) [[1, 2], [3, 4], [5]] .. versionadded:: 1.1.0 """ chunks = int(ceil(len(array) / float(size))) return [array[i * size : (i + 1) * size] for i in range(chunks)] def compact(array: t.Iterable[t.Union[T, None]]) -> t.List[T]: """ Creates a list with all falsey values of array removed. Args: array: List to compact. Returns: Compacted list. Example: >>> compact(["", 1, 0, True, False, None]) [1, True] .. versionadded:: 1.0.0 """ return [item for item in array if item] def concat(*arrays: t.Iterable[T]) -> t.List[T]: """ Concatenates zero or more lists into one. Args: arrays: Lists to concatenate. Returns: Concatenated list. Example: >>> concat([1, 2], [3, 4], [[5], [6]]) [1, 2, 3, 4, [5], [6]] .. versionadded:: 2.0.0 .. versionchanged:: 4.0.0 Renamed from ``cat`` to ``concat``. """ return flatten(arrays) def difference(array: t.Iterable[T], *others: t.Iterable[T]) -> t.List[T]: """ Creates a list of list elements not present in others. Args: array: List to process. others: Lists to check. Returns: Difference between `others`. Example: >>> difference([1, 2, 3], [1], [2]) [3] .. versionadded:: 1.0.0 """ return difference_with(array, *others) @t.overload def difference_by( array: t.Iterable[T], *others: t.Iterable[T], iteratee: t.Union[IterateeObjT, t.Callable[[T], t.Any], None], ) -> t.List[T]: ... @t.overload def difference_by( array: t.Iterable[T], *others: t.Union[IterateeObjT, t.Iterable[T], t.Callable[[T], t.Any]] ) -> t.List[T]: ... def difference_by(array, *others, **kwargs): """ This method is like :func:`difference` except that it accepts an iteratee which is invoked for each element of each array to generate the criterion by which they're compared. The order and references of result values are determined by `array`. The iteratee is invoked with one argument: ``(value)``. Args: array: The array to find the difference of. others: Lists to check for difference with `array`. Keyword Args: iteratee: Function to transform the elements of the arrays. Defaults to :func:`.identity`. Returns: Difference between `others`. Example: >>> difference_by([1.2, 1.5, 1.7, 2.8], [0.9, 3.2], round) [1.5, 1.7] .. versionadded:: 4.0.0 """ array = array[:] if not others: return array # Check if last other is a potential iteratee. iteratee, others = parse_iteratee("iteratee", *others, **kwargs) for other in others: if not other: continue array = list(iterdifference(array, other, iteratee=iteratee)) return array @t.overload def difference_with( array: t.Iterable[T], *others: t.Iterable[T2], comparator: t.Union[t.Callable[[T, T2], t.Any], None], ) -> t.List[T]: ... @t.overload def difference_with( array: t.Iterable[T], *others: t.Union[t.Iterable[T2], t.Callable[[T, T2], t.Any]] ) -> t.List[T]: ... def difference_with(array, *others, **kwargs): """ This method is like :func:`difference` except that it accepts a comparator which is invoked to compare the elements of all arrays. The order and references of result values are determined by the first array. The comparator is invoked with two arguments: ``(arr_val, oth_val)``. Args: array: The array to find the difference of. others: Lists to check for difference with `array`. Keyword Args: comparator: Function to compare the elements of the arrays. Defaults to :func:`.is_equal`. Returns: Difference between `others`. Example: >>> array = ["apple", "banana", "pear"] >>> others = (["avocado", "pumpkin"], ["peach"]) >>> comparator = lambda a, b: a[0] == b[0] >>> difference_with(array, *others, comparator=comparator) ['banana'] .. versionadded:: 4.0.0 """ array = array[:] if not others: return array comparator = kwargs.get("comparator") last_other = others[-1] # Check if last other is a comparator. if comparator is None and (callable(last_other) or last_other is None): comparator = last_other others = others[:-1] for other in others: if not other: continue array = list(iterdifference(array, other, comparator=comparator)) return array def drop(array: t.Sequence[T], n: int = 1) -> t.List[T]: """ Creates a slice of `array` with `n` elements dropped from the beginning. Args: array: List to process. n: Number of elements to drop. Defaults to ``1``. Returns: Dropped list. Example: >>> drop([1, 2, 3, 4], 2) [3, 4] .. versionadded:: 1.0.0 .. versionchanged:: 1.1.0 Added ``n`` argument and removed as alias of :func:`rest`. .. versionchanged:: 3.0.0 Made ``n`` default to ``1``. """ return drop_while(array, lambda _, index: index < n) def drop_right(array: t.Sequence[T], n: int = 1) -> t.List[T]: """ Creates a slice of `array` with `n` elements dropped from the end. Args: array: List to process. n: Number of elements to drop. Defaults to ``1``. Returns: Dropped list. Example: >>> drop_right([1, 2, 3, 4], 2) [1, 2] .. versionadded:: 1.1.0 .. versionchanged:: 3.0.0 Made ``n`` default to ``1``. """ length = len(array) return drop_right_while(array, lambda _, index: (length - index) <= n) @t.overload def drop_right_while( array: t.Sequence[T], predicate: t.Callable[[T, int, t.List[T]], t.Any] ) -> t.List[T]: ... @t.overload def drop_right_while(array: t.Sequence[T], predicate: t.Callable[[T, int], t.Any]) -> t.List[T]: ... @t.overload def drop_right_while(array: t.Sequence[T], predicate: t.Callable[[T], t.Any]) -> t.List[T]: ... @t.overload def drop_right_while(array: t.Sequence[T], predicate: None = None) -> t.List[T]: ... def drop_right_while(array, predicate=None): """ Creates a slice of `array` excluding elements dropped from the end. Elements are dropped until the `predicate` returns falsey. The `predicate` is invoked with three arguments: ``(value, index, array)``. Args: array: List to process. predicate: Predicate called per iteration Returns: Dropped list. Example: >>> drop_right_while([1, 2, 3, 4], lambda x: x >= 3) [1, 2] .. versionadded:: 1.1.0 """ n = len(array) for is_true, _, _, _ in iteriteratee(array, predicate, reverse=True): if is_true: n -= 1 else: break return array[:n] @t.overload def drop_while( array: t.Sequence[T], predicate: t.Callable[[T, int, t.List[T]], t.Any] ) -> t.List[T]: ... @t.overload def drop_while(array: t.Sequence[T], predicate: t.Callable[[T, int], t.Any]) -> t.List[T]: ... @t.overload def drop_while(array: t.Sequence[T], predicate: t.Callable[[T], t.Any]) -> t.List[T]: ... @t.overload def drop_while(array: t.Sequence[T], predicate: None = None) -> t.List[T]: ... def drop_while(array, predicate=None): """ Creates a slice of `array` excluding elements dropped from the beginning. Elements are dropped until the `predicate` returns falsey. The `predicate` is invoked with three arguments: ``(value, index, array)``. Args: array: List to process. predicate: Predicate called per iteration Returns: Dropped list. Example: >>> drop_while([1, 2, 3, 4], lambda x: x < 3) [3, 4] .. versionadded:: 1.1.0 """ n = 0 for is_true, _, _, _ in iteriteratee(array, predicate): if is_true: n += 1 else: break return array[n:] def duplicates( array: t.Sequence[T], iteratee: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None ) -> t.List[T]: """ Creates a unique list of duplicate values from `array`. If iteratee is passed, each element of array is passed through an iteratee before duplicates are computed. The iteratee is invoked with three arguments: ``(value, index, array)``. If an object path is passed for iteratee, the created iteratee will return the path value of the given element. If an object is passed for iteratee, the created filter style iteratee will return ``True`` for elements that have the properties of the given object, else ``False``. Args: array: List to process. iteratee: Iteratee applied per iteration. Returns: List of duplicates. Example: >>> duplicates([0, 1, 3, 2, 3, 1]) [3, 1] .. versionadded:: 3.0.0 """ if iteratee: cbk = pyd.iteratee(iteratee) computed = [cbk(item) for item in array] else: computed = array # type: ignore # NOTE: Using array[i] instead of item since iteratee could have modified # returned item values. lst = uniq(array[i] for i, _ in iterduplicates(computed)) return lst def fill( array: t.Sequence[T], value: T2, start: int = 0, end: t.Union[int, None] = None ) -> t.List[t.Union[T, T2]]: """ Fills elements of array with value from `start` up to, but not including, `end`. Args: array: List to fill. value: Value to fill with. start: Index to start filling. Defaults to ``0``. end: Index to end filling. Defaults to ``len(array)``. Returns: Filled `array`. Example: >>> fill([1, 2, 3, 4, 5], 0) [0, 0, 0, 0, 0] >>> fill([1, 2, 3, 4, 5], 0, 1, 3) [1, 0, 0, 4, 5] >>> fill([1, 2, 3, 4, 5], 0, 0, 100) [0, 0, 0, 0, 0] Warning: `array` is modified in place. .. versionadded:: 3.1.0 """ if end is None: end = len(array) else: end = min(end, len(array)) # Use this style of assignment so that `array` is mutated. array[:] = array[:start] + [value] * len(array[start:end]) + array[end:] # type: ignore return array # type: ignore @t.overload def find_index(array: t.Iterable[T], predicate: t.Callable[[T, int, t.List[T]], t.Any]) -> int: ... @t.overload def find_index(array: t.Iterable[T], predicate: t.Callable[[T, int], t.Any]) -> int: ... @t.overload def find_index(array: t.Iterable[T], predicate: t.Callable[[T], t.Any]) -> int: ... @t.overload def find_index(array: t.Iterable[t.Any], predicate: IterateeObjT) -> int: ... @t.overload def find_index(array: t.Iterable[t.Any], predicate: None = None) -> int: ... def find_index(array, predicate=None): """ This method is similar to :func:`pydash.collections.find`, except that it returns the index of the element that passes the predicate check, instead of the element itself. Args: array: List to process. predicate: Predicate applied per iteration. Returns: Index of found item or ``-1`` if not found. Example: >>> find_index([1, 2, 3, 4], lambda x: x >= 3) 2 >>> find_index([1, 2, 3, 4], lambda x: x > 4) -1 .. versionadded:: 1.0.0 """ search = (i for is_true, _, i, _ in iteriteratee(array, predicate) if is_true) return next(search, -1) @t.overload def find_last_index( array: t.Iterable[T], predicate: t.Callable[[T, int, t.List[T]], t.Any] ) -> int: ... @t.overload def find_last_index(array: t.Iterable[T], predicate: t.Callable[[T, int], t.Any]) -> int: ... @t.overload def find_last_index(array: t.Iterable[T], predicate: t.Callable[[T], t.Any]) -> int: ... @t.overload def find_last_index(array: t.Iterable[t.Any], predicate: IterateeObjT) -> int: ... @t.overload def find_last_index(array: t.Iterable[t.Any], predicate: None = None) -> int: ... def find_last_index(array, predicate=None): """ This method is similar to :func:`find_index`, except that it iterates over elements from right to left. Args: array: List to process. predicate: Predicate applied per iteration. Returns: Index of found item or ``-1`` if not found. Example: >>> find_last_index([1, 2, 3, 4], lambda x: x >= 3) 3 >>> find_last_index([1, 2, 3, 4], lambda x: x > 4) -1 .. versionadded:: 1.0.0 """ search = (i for is_true, _, i, _ in iteriteratee(array, predicate, reverse=True) if is_true) return next(search, -1) @t.overload def flatten(array: t.Iterable[t.Iterable[T]]) -> t.List[T]: ... @t.overload def flatten(array: t.Iterable[T]) -> t.List[T]: ... def flatten(array): """ Flattens array a single level deep. Args: array: List to flatten. Returns: Flattened list. Example: >>> flatten([[1], [2, [3]], [[4]]]) [1, 2, [3], [4]] .. versionadded:: 1.0.0 .. versionchanged:: 2.0.0 Removed `callback` option. Added ``is_deep`` option. Made it shallow by default. .. versionchanged:: 4.0.0 Removed ``is_deep`` option. Use :func:`flatten_deep` instead. """ return flatten_depth(array, depth=1) def flatten_deep(array: t.Iterable[t.Any]) -> t.List[t.Any]: """ Flattens an array recursively. Args: array: List to flatten. Returns: Flattened list. Example: >>> flatten_deep([[1], [2, [3]], [[4]]]) [1, 2, 3, 4] .. versionadded:: 2.0.0 """ return flatten_depth(array, depth=-1) def flatten_depth(array: t.Iterable[t.Any], depth: int = 1) -> t.List[t.Any]: """ Recursively flatten `array` up to `depth` times. Args: array: List to flatten. depth: Depth to flatten to. Defaults to ``1``. Returns: Flattened list. Example: >>> flatten_depth([[[1], [2, [3]], [[4]]]], 1) [[1], [2, [3]], [[4]]] >>> flatten_depth([[[1], [2, [3]], [[4]]]], 2) [1, 2, [3], [4]] >>> flatten_depth([[[1], [2, [3]], [[4]]]], 3) [1, 2, 3, 4] >>> flatten_depth([[[1], [2, [3]], [[4]]]], 4) [1, 2, 3, 4] .. versionadded:: 4.0.0 """ return list(iterflatten(array, depth=depth)) @t.overload def from_pairs(pairs: t.Iterable[t.Tuple[T, T2]]) -> t.Dict[T, T2]: ... @t.overload def from_pairs( pairs: t.Iterable[t.List[t.Union[T, T2]]], ) -> t.Dict[t.Union[T, T2], t.Union[T, T2]]: ... def from_pairs(pairs): """ Returns a dict from the given list of pairs. Args: pairs: List of key-value pairs. Returns: dict Example: >>> from_pairs([["a", 1], ["b", 2]]) == {"a": 1, "b": 2} True .. versionadded:: 4.0.0 """ return dict(pairs) def head(array: t.Sequence[T]) -> t.Union[T, None]: """ Return the first element of `array`. Args: array: List to process. Returns: First element of list. Example: >>> head([1, 2, 3, 4]) 1 .. versionadded:: 1.0.0 .. versionchanged:: Renamed from ``first`` to ``head``. """ return base_get(array, 0, default=None) def index_of(array: t.Sequence[T], value: T, from_index: int = 0) -> int: """ Gets the index at which the first occurrence of value is found. Args: array: List to search. value: Value to search for. from_index: Index to search from. Returns: Index of found item or ``-1`` if not found. Example: >>> index_of([1, 2, 3, 4], 2) 1 >>> index_of([2, 1, 2, 3], 2, from_index=1) 2 .. versionadded:: 1.0.0 """ try: return array.index(value, from_index) except ValueError: return -1 def initial(array: t.Sequence[T]) -> t.Sequence[T]: """ Return all but the last element of `array`. Args: array: List to process. Returns: Initial part of `array`. Example: >>> initial([1, 2, 3, 4]) [1, 2, 3] .. versionadded:: 1.0.0 """ return array[:-1] @t.overload def intercalate(array: t.Iterable[t.Iterable[T]], separator: T2) -> t.List[t.Union[T, T2]]: ... @t.overload def intercalate(array: t.Iterable[T], separator: T2) -> t.List[t.Union[T, T2]]: ... def intercalate(array, separator): """ Like :func:`intersperse` for lists of lists but shallowly flattening the result. Args: array: List to intercalate. separator: Element to insert. Returns: Intercalated list. Example: >>> intercalate([1, [2], [3], 4], "x") [1, 'x', 2, 'x', 3, 'x', 4] .. versionadded:: 2.0.0 """ return flatten(intersperse(array, separator)) def interleave(*arrays: t.Iterable[T]) -> t.List[T]: """ Merge multiple lists into a single list by inserting the next element of each list by sequential round-robin into the new list. Args: arrays: Lists to interleave. Returns: Interleaved list. Example: >>> interleave([1, 2, 3], [4, 5, 6], [7, 8, 9]) [1, 4, 7, 2, 5, 8, 3, 6, 9] .. versionadded:: 2.0.0 """ return list(iterinterleave(*arrays)) def intersection(array: t.Sequence[T], *others: t.Iterable[t.Any]) -> t.List[T]: """ Computes the intersection of all the passed-in arrays. Args: array: The array to find the intersection of. others: Lists to check for intersection with `array`. Returns: Intersection of provided lists. Example: >>> intersection([1, 2, 3], [1, 2, 3, 4, 5], [2, 3]) [2, 3] >>> intersection([1, 2, 3]) [1, 2, 3] .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Support finding intersection of unhashable types. """ return intersection_with(array, *others) @t.overload def intersection_by( array: t.Sequence[T], *others: t.Iterable[t.Any], iteratee: t.Union[t.Callable[[T], t.Any], IterateeObjT], ) -> t.List[T]: ... @t.overload def intersection_by( array: t.Sequence[T], *others: t.Union[t.Iterable[t.Any], t.Callable[[T], t.Any], IterateeObjT] ) -> t.List[T]: ... def intersection_by(array, *others, **kwargs): """ This method is like :func:`intersection` except that it accepts an iteratee which is invoked for each element of each array to generate the criterion by which they're compared. The order and references of result values are determined by `array`. The iteratee is invoked with one argument: ``(value)``. Args: array: The array to find the intersection of. others: Lists to check for intersection with `array`. Keyword Args: iteratee: Function to transform the elements of the arrays. Defaults to :func:`.identity`. Returns: Intersection of provided lists. Example: >>> intersection_by([1.2, 1.5, 1.7, 2.8], [0.9, 3.2], round) [1.2, 2.8] .. versionadded:: 4.0.0 """ array = array[:] if not others: return array iteratee, others = parse_iteratee("iteratee", *others, **kwargs) # Sort by smallest list length to make intersection faster. others = sorted(others, key=lambda other: len(other)) for other in others: array = list(iterintersection(array, other, iteratee=iteratee)) if not array: break return array @t.overload def intersection_with( array: t.Sequence[T], *others: t.Iterable[T2], comparator: t.Callable[[T, T2], t.Any] ) -> t.List[T]: ... @t.overload def intersection_with( array: t.Sequence[T], *others: t.Union[t.Iterable[T2], t.Callable[[T, T2], t.Any]] ) -> t.List[T]: ... def intersection_with(array, *others, **kwargs): """ This method is like :func:`intersection` except that it accepts a comparator which is invoked to compare the elements of all arrays. The order and references of result values are determined by the first array. The comparator is invoked with two arguments: ``(arr_val, oth_val)``. Args: array: The array to find the intersection of. others: Lists to check for intersection with `array`. Keyword Args: comparator: Function to compare the elements of the arrays. Defaults to :func:`.is_equal`. Returns: Intersection of provided lists. Example: >>> array = ["apple", "banana", "pear"] >>> others = (["avocado", "pumpkin"], ["peach"]) >>> comparator = lambda a, b: a[0] == b[0] >>> intersection_with(array, *others, comparator=comparator) ['pear'] .. versionadded:: 4.0.0 """ array = array[:] if not others: return array comparator, others = parse_iteratee("comparator", *others, **kwargs) # Sort by smallest list length to reduce to intersection faster. others = sorted(others, key=lambda other: len(other)) for other in others: array = list(iterintersection(array, other, comparator=comparator)) if not array: break return array def intersperse(array: t.Iterable[T], separator: T2) -> t.List[t.Union[T, T2]]: """ Insert a separating element between the elements of `array`. Args: array: List to intersperse. separator: Element to insert. Returns: Interspersed list. Example: >>> intersperse([1, [2], [3], 4], "x") [1, 'x', [2], 'x', [3], 'x', 4] .. versionadded:: 2.0.0 """ return list(iterintersperse(array, separator)) def last(array: t.Sequence[T]) -> t.Union[T, None]: """ Return the last element of `array`. Args: array: List to process. Returns: Last part of `array`. Example: >>> last([1, 2, 3, 4]) 4 .. versionadded:: 1.0.0 """ return base_get(array, -1, default=None) def last_index_of( array: t.Sequence[t.Any], value: t.Any, from_index: t.Union[int, None] = None ) -> int: """ Gets the index at which the last occurrence of value is found. Args: array: List to search. value: Value to search for. from_index: Index to search from. Returns: Index of found item or ``-1`` if not found. Example: >>> last_index_of([1, 2, 2, 4], 2) 2 >>> last_index_of([1, 2, 2, 4], 2, from_index=1) 1 .. versionadded:: 1.0.0 """ index = array_len = len(array) try: # safe as we are catching any type errors from_index = int(from_index) # type: ignore except (TypeError, ValueError): pass else: # Set starting index base on from_index offset. index = max(0, index + from_index) if from_index < 0 else min(from_index, index - 1) while index: if index < array_len and array[index] == value: return index index -= 1 return -1 @t.overload def mapcat( array: t.Iterable[T], iteratee: t.Callable[[T, int, t.List[T]], t.Union[t.List[T2], t.List[t.List[T2]]]], ) -> t.List[T2]: ... @t.overload def mapcat(array: t.Iterable[T], iteratee: t.Callable[[T, int, t.List[T]], T2]) -> t.List[T2]: ... @t.overload def mapcat( array: t.Iterable[T], iteratee: t.Callable[[T, int], t.Union[t.List[T2], t.List[t.List[T2]]]] ) -> t.List[T2]: ... @t.overload def mapcat(array: t.Iterable[T], iteratee: t.Callable[[T, int], T2]) -> t.List[T2]: ... @t.overload def mapcat( array: t.Iterable[T], iteratee: t.Callable[[T], t.Union[t.List[T2], t.List[t.List[T2]]]] ) -> t.List[T2]: ... @t.overload def mapcat(array: t.Iterable[T], iteratee: t.Callable[[T], T2]) -> t.List[T2]: ... @t.overload def mapcat( array: t.Iterable[t.Union[t.List[T], t.List[t.List[T]]]], iteratee: None = None ) -> t.List[t.Union[T, t.List[T]]]: ... def mapcat(array, iteratee=None): """ Map an iteratee to each element of a list and concatenate the results into a single list using :func:`concat`. Args: array: List to map and concatenate. iteratee: Iteratee to apply to each element. Returns: Mapped and concatenated list. Example: >>> mapcat(range(4), lambda x: list(range(x))) [0, 0, 1, 0, 1, 2] .. versionadded:: 2.0.0 """ return concat(*pyd.map_(array, iteratee)) def nth(array: t.Iterable[T], pos: int = 0) -> t.Union[T, None]: """ Gets the element at index n of array. Args: array: List passed in by the user. pos: Index of element to return. Returns: Returns the element at :attr:`pos`. Example: >>> nth([1, 2, 3], 0) 1 >>> nth([3, 4, 5, 6], 2) 5 >>> nth([11, 22, 33], -1) 33 >>> nth([11, 22, 33]) 11 .. versionadded:: 4.0.0 """ return pyd.get(array, pos) def pop(array: t.List[T], index: int = -1) -> T: """ Remove element of array at `index` and return element. Args: array: List to pop from. index: Index to remove element from. Defaults to ``-1``. Returns: Value at `index`. Warning: `array` is modified in place. Example: >>> array = [1, 2, 3, 4] >>> item = pop(array) >>> item 4 >>> array [1, 2, 3] >>> item = pop(array, index=0) >>> item 1 >>> array [2, 3] .. versionadded:: 2.2.0 """ return array.pop(index) def pull(array: t.List[T], *values: T) -> t.List[T]: """ Removes all provided values from the given array. Args: array: List to pull from. values: Values to remove. Returns: Modified `array`. Warning: `array` is modified in place. Example: >>> pull([1, 2, 2, 3, 3, 4], 2, 3) [1, 4] .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 :func:`pull` method now calls :func:`pull_all` method for the desired functionality. """ return pull_all(array, values) def pull_all(array: t.List[T], values: t.Iterable[T]) -> t.List[T]: """ Removes all provided values from the given array. Args: array: Array to modify. values: Values to remove. Returns: Modified `array`. Example: >>> pull_all([1, 2, 2, 3, 3, 4], [2, 3]) [1, 4] .. versionadded:: 4.0.0 """ # Use this style of assignment so that `array` is mutated. array[:] = without(array, *values) return array def pull_all_by( array: t.List[T], values: t.Iterable[T], iteratee: t.Union[IterateeObjT, t.Callable[[T], t.Any], None] = None, ) -> t.List[T]: """ This method is like :func:`pull_all` except that it accepts iteratee which is invoked for each element of array and values to generate the criterion by which they're compared. The iteratee is invoked with one argument: ``(value)``. Args: array: Array to modify. values: Values to remove. iteratee: Function to transform the elements of the arrays. Defaults to :func:`.identity`. Returns: Modified `array`. Example: >>> array = [{"x": 1}, {"x": 2}, {"x": 3}, {"x": 1}] >>> pull_all_by(array, [{"x": 1}, {"x": 3}], "x") [{'x': 2}] .. versionadded:: 4.0.0 """ values = difference(array, difference_by(array, values, iteratee=iteratee)) return pull_all(array, values) def pull_all_with( array: t.List[T], values: t.Iterable[T], comparator: t.Union[t.Callable[[T, T], t.Any], None] = None, ) -> t.List[T]: """ This method is like :func:`pull_all` except that it accepts comparator which is invoked to compare elements of array to values. The comparator is invoked with two arguments: ``(arr_val, oth_val)``. Args: array: Array to modify. values: Values to remove. comparator: Function to compare the elements of the arrays. Defaults to :func:`.is_equal`. Returns: Modified `array`. Example: >>> array = [{"x": 1, "y": 2}, {"x": 3, "y": 4}, {"x": 5, "y": 6}] >>> res = pull_all_with(array, [{"x": 3, "y": 4}], lambda a, b: a == b) >>> res == [{"x": 1, "y": 2}, {"x": 5, "y": 6}] True >>> array = [{"x": 1, "y": 2}, {"x": 3, "y": 4}, {"x": 5, "y": 6}] >>> res = pull_all_with(array, [{"x": 3, "y": 4}], lambda a, b: a != b) >>> res == [{"x": 3, "y": 4}] True .. versionadded:: 4.0.0 """ values = difference(array, difference_with(array, values, comparator=comparator)) return pull_all(array, values) def pull_at(array: t.List[T], *indexes: int) -> t.List[T]: """ Removes elements from `array` corresponding to the specified indexes and returns a list of the removed elements. Indexes may be specified as a list of indexes or as individual arguments. Args: array: List to pull from. indexes: Indexes to pull. Returns: Modified `array`. Warning: `array` is modified in place. Example: >>> pull_at([1, 2, 3, 4], 0, 2) [2, 4] .. versionadded:: 1.1.0 """ flat_indexes = flatten(indexes) for index in sorted(flat_indexes, reverse=True): del array[index] return array def push(array: t.List[T], *items: T2) -> t.List[t.Union[T, T2]]: """ Push items onto the end of `array` and return modified `array`. Args: array: List to push to. items: Items to append. Returns: Modified `array`. Warning: `array` is modified in place. Example: >>> array = [1, 2, 3] >>> push(array, 4, 5, [6]) [1, 2, 3, 4, 5, [6]] .. versionadded:: 2.2.0 .. versionchanged:: 4.0.0 Removed alias ``append``. """ for item in items: array.append(item) # type: ignore return array # type: ignore def remove( array: t.List[T], predicate: t.Union[ t.Callable[[T, int, t.List[T]], t.Any], t.Callable[[T, int], t.Any], t.Callable[[T], t.Any], None, ] = None, ) -> t.List[T]: """ Removes all elements from a list that the predicate returns truthy for and returns an array of removed elements. Args: array: List to remove elements from. predicate: Predicate applied per iteration. Returns: Removed elements of `array`. Warning: `array` is modified in place. Example: >>> array = [1, 2, 3, 4] >>> items = remove(array, lambda x: x >= 3) >>> items [3, 4] >>> array [1, 2] .. versionadded:: 1.0.0 """ removed = [] kept = [] for is_true, _, i, _ in iteriteratee(array, predicate): if is_true: removed.append(array[i]) else: kept.append(array[i]) # Modify array in place. array[:] = kept return removed def reverse(array: SequenceT) -> SequenceT: """ Return `array` in reverse order. Args: array: Object to process. Returns: Reverse of object. Example: >>> reverse([1, 2, 3, 4]) [4, 3, 2, 1] .. versionadded:: 2.2.0 """ # NOTE: Using this method to reverse object since it works for both lists and strings. return array[::-1] # type: ignore def shift(array: t.List[T]) -> T: """ Remove the first element of `array` and return it. Args: array: List to shift. Returns: First element of `array`. Warning: `array` is modified in place. Example: >>> array = [1, 2, 3, 4] >>> item = shift(array) >>> item 1 >>> array [2, 3, 4] .. versionadded:: 2.2.0 """ return pop(array, 0) def slice_(array: SequenceT, start: int = 0, end: t.Union[int, None] = None) -> SequenceT: """ Slices `array` from the `start` index up to, but not including, the `end` index. Args: array: Array to slice. start: Start index. Defaults to ``0``. end: End index. Defaults to selecting the value at ``start`` index. Returns: Sliced list. Example: >>> slice_([1, 2, 3, 4]) [1] >>> slice_([1, 2, 3, 4], 1) [2] >>> slice_([1, 2, 3, 4], 1, 3) [2, 3] .. versionadded:: 1.1.0 """ if end is None: end = (start + 1) if start >= 0 else (len(array) + start + 1) return array[start:end] # type: ignore @t.overload def sort( array: t.List["SupportsRichComparisonT"], comparator: None = None, key: None = None, reverse: bool = False, ) -> t.List["SupportsRichComparisonT"]: ... @t.overload def sort( array: t.List[T], comparator: t.Callable[[T, T], int], *, reverse: bool = False ) -> t.List[T]: ... @t.overload def sort( array: t.List[T], *, key: t.Callable[[T], "SupportsRichComparisonT"], reverse: bool = False ) -> t.List[T]: ... def sort(array, comparator=None, key=None, reverse=False): """ Sort `array` using optional `comparator`, `key`, and `reverse` options and return sorted `array`. Note: Python 3 removed the option to pass a custom comparator function and instead only allows a key function. Therefore, if a comparator function is passed in, it will be converted to a key function automatically using ``functools.cmp_to_key``. Args: array: List to sort. comparator: A custom comparator function used to sort the list. Function should accept two arguments and return a negative, zero, or position number depending on whether the first argument is considered smaller than, equal to, or larger than the second argument. Defaults to ``None``. This argument is mutually exclusive with `key`. key: A function of one argument used to extract a comparator key from each list element. Defaults to ``None``. This argument is mutually exclusive with `comparator`. reverse: Whether to reverse the sort. Defaults to ``False``. Returns: Sorted list. Warning: `array` is modified in place. Example: >>> sort([2, 1, 4, 3]) [1, 2, 3, 4] >>> sort([2, 1, 4, 3], reverse=True) [4, 3, 2, 1] >>> results = sort([{'a': 2, 'b': 1},\ {'a': 3, 'b': 2},\ {'a': 0, 'b': 3}],\ key=lambda item: item['a']) >>> assert results == [{'a': 0, 'b': 3},\ {'a': 2, 'b': 1},\ {'a': 3, 'b': 2}] .. versionadded:: 2.2.0 """ if comparator and key: raise ValueError('The "comparator" and "key" arguments are mutually exclusive') if comparator: key = cmp_to_key(comparator) array.sort(key=key, reverse=reverse) return array def sorted_index( array: t.Sequence["SupportsRichComparisonT"], value: "SupportsRichComparisonT" ) -> int: """ Uses a binary search to determine the lowest index at which `value` should be inserted into `array` in order to maintain its sort order. Args: array: List to inspect. value: Value to evaluate. Returns: Returns the index at which `value` should be inserted into `array`. Example: >>> sorted_index([1, 2, 2, 3, 4], 2) 1 .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Move iteratee support to :func:`sorted_index_by`. """ return sorted_index_by(array, value) @t.overload def sorted_index_by( array: t.Sequence[T], value: T, iteratee: t.Union[IterateeObjT, t.Callable[[T], "SupportsRichComparisonT"]], ) -> int: ... @t.overload def sorted_index_by( array: t.Sequence["SupportsRichComparisonT"], value: "SupportsRichComparisonT", iteratee: None = None, ) -> int: ... def sorted_index_by(array, value, iteratee=None): """ This method is like :func:`sorted_index` except that it accepts iteratee which is invoked for `value` and each element of `array` to compute their sort ranking. The iteratee is invoked with one argument: ``(value)``. Args: array: List to inspect. value: Value to evaluate. iteratee: The iteratee invoked per element. Defaults to :func:`.identity`. Returns: Returns the index at which `value` should be inserted into `array`. Example: >>> array = [{"x": 4}, {"x": 5}] >>> sorted_index_by(array, {"x": 4}, lambda o: o["x"]) 0 >>> sorted_index_by(array, {"x": 4}, "x") 0 .. versionadded:: 4.0.0 """ if iteratee: # Generate array of sorted keys computed using iteratee. iteratee = pyd.iteratee(iteratee) array = sorted(iteratee(item) for item in array) value = iteratee(value) return bisect_left(array, value) def sorted_index_of( array: t.Sequence["SupportsRichComparisonT"], value: "SupportsRichComparisonT" ) -> int: """ Returns the index of the matched `value` from the sorted `array`, else ``-1``. Args: array: Array to inspect. value: Value to search for. Returns: Returns the index of the first matched value, else ``-1``. Example: >>> sorted_index_of([3, 5, 7, 10], 3) 0 >>> sorted_index_of([10, 10, 5, 7, 3], 10) -1 .. versionadded:: 4.0.0 """ index = sorted_index(array, value) if index < len(array) and array[index] == value: return index else: return -1 def sorted_last_index( array: t.Sequence["SupportsRichComparisonT"], value: "SupportsRichComparisonT" ) -> int: """ This method is like :func:`sorted_index` except that it returns the highest index at which `value` should be inserted into `array` in order to maintain its sort order. Args: array: List to inspect. value: Value to evaluate. Returns: Returns the index at which `value` should be inserted into `array`. Example: >>> sorted_last_index([1, 2, 2, 3, 4], 2) 3 .. versionadded:: 1.1.0 .. versionchanged:: 4.0.0 Move iteratee support to :func:`sorted_last_index_by`. """ return sorted_last_index_by(array, value) @t.overload def sorted_last_index_by( array: t.Sequence[T], value: T, iteratee: t.Union[IterateeObjT, t.Callable[[T], "SupportsRichComparisonT"]], ) -> int: ... @t.overload def sorted_last_index_by( array: t.Sequence["SupportsRichComparisonT"], value: "SupportsRichComparisonT", iteratee: None = None, ) -> int: ... def sorted_last_index_by(array, value, iteratee=None): """ This method is like :func:`sorted_last_index` except that it accepts iteratee which is invoked for `value` and each element of `array` to compute their sort ranking. The iteratee is invoked with one argument: ``(value)``. Args: array: List to inspect. value: Value to evaluate. iteratee: The iteratee invoked per element. Defaults to :func:`.identity`. Returns: Returns the index at which `value` should be inserted into `array`. Example: >>> array = [{"x": 4}, {"x": 5}] >>> sorted_last_index_by(array, {"x": 4}, lambda o: o["x"]) 1 >>> sorted_last_index_by(array, {"x": 4}, "x") 1 """ if iteratee: # Generate array of sorted keys computed using iteratee. iteratee = pyd.iteratee(iteratee) array = sorted(iteratee(item) for item in array) value = iteratee(value) return bisect_right(array, value) def sorted_last_index_of( array: t.Sequence["SupportsRichComparisonT"], value: "SupportsRichComparisonT" ) -> int: """ This method is like :func:`last_index_of` except that it performs a binary search on a sorted `array`. Args: array: Array to inspect. value: Value to search for. Returns: Returns the index of the matched value, else ``-1``. Example: >>> sorted_last_index_of([4, 5, 5, 5, 6], 5) 3 >>> sorted_last_index_of([6, 5, 5, 5, 4], 6) -1 .. versionadded:: 4.0.0 """ index = sorted_last_index(array, value) - 1 if index < len(array) and array[index] == value: return index else: return -1 def sorted_uniq(array: t.Iterable["SupportsRichComparisonT"]) -> t.List["SupportsRichComparisonT"]: """ Return sorted array with unique elements. Args: array: List of values to be sorted. Returns: List of unique elements in a sorted fashion. Example: >>> sorted_uniq([4, 2, 2, 5]) [2, 4, 5] >>> sorted_uniq([-2, -2, 4, 1]) [-2, 1, 4] .. versionadded:: 4.0.0 """ return sorted(uniq(array)) def sorted_uniq_by( array: t.Iterable["SupportsRichComparisonT"], iteratee: t.Union[ t.Callable[["SupportsRichComparisonT"], "SupportsRichComparisonT"], None ] = None, ) -> t.List["SupportsRichComparisonT"]: """ This method is like :func:`sorted_uniq` except that it accepts iteratee which is invoked for each element in array to generate the criterion by which uniqueness is computed. The order of result values is determined by the order they occur in the array. The iteratee is invoked with one argument: ``(value)``. Args: array: List of values to be sorted. iteratee: Function to transform the elements of the arrays. Defaults to :func:`.identity`. Returns: Unique list. Example: >>> sorted_uniq_by([3, 2, 1, 3, 2, 1], lambda val: val % 2) [2, 3] .. versionadded:: 4.0.0 """ return sorted(uniq_by(array, iteratee=iteratee)) def splice( array: MutableSequenceT, start: int, count: t.Union[int, None] = None, *items: t.Any ) -> MutableSequenceT: """ Modify the contents of `array` by inserting elements starting at index `start` and removing `count` number of elements after. Args: array: List to splice. start: Start to splice at. count: Number of items to remove starting at `start`. If ``None`` then all items after `start` are removed. Defaults to ``None``. items: Elements to insert starting at `start`. Each item is inserted in the order given. Returns: The removed elements of `array` or the spliced string. Warning: `array` is modified in place if ``list``. Example: >>> array = [1, 2, 3, 4] >>> splice(array, 1) [2, 3, 4] >>> array [1] >>> array = [1, 2, 3, 4] >>> splice(array, 1, 2) [2, 3] >>> array [1, 4] >>> array = [1, 2, 3, 4] >>> splice(array, 1, 2, 0, 0) [2, 3] >>> array [1, 0, 0, 4] .. versionadded:: 2.2.0 .. versionchanged:: 3.0.0 Support string splicing. """ if count is None: count = len(array) - start is_string = pyd.is_string(array) if is_string: # allow reassignment with different type array = list(array) # type: ignore removed = array[start : start + count] del array[start : start + count] for item in reverse(items): array.insert(start, item) if is_string: return "".join(array) # type: ignore else: return removed # type: ignore def split_at(array: t.Sequence[T], index: int) -> t.List[t.Sequence[T]]: """ Returns a list of two lists composed of the split of `array` at `index`. Args: array: List to split. index: Index to split at. Returns: Split list. Example: >>> split_at([1, 2, 3, 4], 2) [[1, 2], [3, 4]] .. versionadded:: 2.0.0 """ return [array[:index], array[index:]] def tail(array: t.Sequence[T]) -> t.Sequence[T]: """ Return all but the first element of `array`. Args: array: List to process. Returns: Rest of the list. Example: >>> tail([1, 2, 3, 4]) [2, 3, 4] .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Renamed from ``rest`` to ``tail``. """ return array[1:] def take(array: t.Sequence[T], n: int = 1) -> t.Sequence[T]: """ Creates a slice of `array` with `n` elements taken from the beginning. Args: array: List to process. n: Number of elements to take. Defaults to ``1``. Returns: Taken list. Example: >>> take([1, 2, 3, 4], 2) [1, 2] .. versionadded:: 1.0.0 .. versionchanged:: 1.1.0 Added ``n`` argument and removed as alias of :func:`first`. .. versionchanged:: 3.0.0 Made ``n`` default to ``1``. """ return take_while(array, lambda _, index: index < n) def take_right(array: t.Sequence[T], n: int = 1) -> t.Sequence[T]: """ Creates a slice of `array` with `n` elements taken from the end. Args: array: List to process. n: Number of elements to take. Defaults to ``1``. Returns: Taken list. Example: >>> take_right([1, 2, 3, 4], 2) [3, 4] .. versionadded:: 1.1.0 .. versionchanged:: 3.0.0 Made ``n`` default to ``1``. """ length = len(array) return take_right_while(array, lambda _, index: (length - index) <= n) @t.overload def take_right_while( array: t.Sequence[T], predicate: t.Callable[[T, int, t.List[T]], t.Any] ) -> t.Sequence[T]: ... @t.overload def take_right_while( array: t.Sequence[T], predicate: t.Callable[[T, int], t.Any] ) -> t.Sequence[T]: ... @t.overload def take_right_while(array: t.Sequence[T], predicate: t.Callable[[T], t.Any]) -> t.Sequence[T]: ... @t.overload def take_right_while(array: t.Sequence[T], predicate: None = None) -> t.Sequence[T]: ... def take_right_while(array, predicate=None): """ Creates a slice of `array` with elements taken from the end. Elements are taken until the `predicate` returns falsey. The `predicate` is invoked with three arguments: ``(value, index, array)``. Args: array: List to process. predicate: Predicate called per iteration Returns: Dropped list. Example: >>> take_right_while([1, 2, 3, 4], lambda x: x >= 3) [3, 4] .. versionadded:: 1.1.0 """ n = len(array) for is_true, _, _, _ in iteriteratee(array, predicate, reverse=True): if is_true: n -= 1 else: break return array[n:] @t.overload def take_while( array: t.Sequence[T], predicate: t.Callable[[T, int, t.List[T]], t.Any] ) -> t.List[T]: ... @t.overload def take_while(array: t.Sequence[T], predicate: t.Callable[[T, int], t.Any]) -> t.List[T]: ... @t.overload def take_while(array: t.Sequence[T], predicate: t.Callable[[T], t.Any]) -> t.List[T]: ... @t.overload def take_while(array: t.Sequence[T], predicate: None = None) -> t.List[T]: ... def take_while(array, predicate=None): """ Creates a slice of `array` with elements taken from the beginning. Elements are taken until the `predicate` returns falsey. The `predicate` is invoked with three arguments: ``(value, index, array)``. Args: array: List to process. predicate: Predicate called per iteration Returns: Taken list. Example: >>> take_while([1, 2, 3, 4], lambda x: x < 3) [1, 2] .. versionadded:: 1.1.0 """ n = 0 for is_true, _, _, _ in iteriteratee(array, predicate): if is_true: n += 1 else: break return array[:n] @t.overload def union(array: t.Sequence[T]) -> t.List[T]: ... @t.overload def union(array: t.Sequence[T], *others: t.Sequence[T2]) -> t.List[t.Union[T, T2]]: ... def union(array, *others): """ Computes the union of the passed-in arrays. Args: array: List to union with. others: Lists to unionize with `array`. Returns: Unionized list. Example: >>> union([1, 2, 3], [2, 3, 4], [3, 4, 5]) [1, 2, 3, 4, 5] .. versionadded:: 1.0.0 """ if not others: return array[:] return uniq(flatten([array] + list(others))) @t.overload def union_by( array: t.Sequence[T], *others: t.Iterable[T], iteratee: t.Callable[[T], t.Any] ) -> t.List[T]: ... @t.overload def union_by( array: t.Sequence[T], *others: t.Union[t.Iterable[T], t.Callable[[T], t.Any]] ) -> t.List[T]: ... def union_by(array, *others, **kwargs): """ This method is similar to :func:`union` except that it accepts iteratee which is invoked for each element of each array to generate the criterion by which uniqueness is computed. Args: array: List to unionize with. others: Lists to unionize with `array`. Keyword Args: iteratee: Function to invoke on each element. Returns: Unionized list. Example: >>> union_by([1, 2, 3], [2, 3, 4], iteratee=lambda x: x % 2) [1, 2] >>> union_by([1, 2, 3], [2, 3, 4], iteratee=lambda x: x % 9) [1, 2, 3, 4] .. versionadded:: 4.0.0 """ if not others: return array[:] iteratee, others = parse_iteratee("iteratee", *others, **kwargs) return uniq_by(flatten([array] + list(others)), iteratee=iteratee) @t.overload def union_with( array: t.Sequence[T], *others: t.Iterable[T2], comparator: t.Callable[[T, T2], t.Any] ) -> t.List[T]: ... @t.overload def union_with( array: t.Sequence[T], *others: t.Union[t.Iterable[T2], t.Callable[[T, T2], t.Any]] ) -> t.List[T]: ... def union_with(array, *others, **kwargs): """ This method is like :func:`union` except that it accepts comparator which is invoked to compare elements of arrays. Result values are chosen from the first array in which the value occurs. Args: array: List to unionize with. others: Lists to unionize with `array`. Keyword Args: comparator: Function to compare the elements of the arrays. Defaults to :func:`.is_equal`. Returns: Unionized list. Example: >>> comparator = lambda a, b: (a % 2) == (b % 2) >>> union_with([1, 2, 3], [2, 3, 4], comparator=comparator) [1, 2] >>> union_with([1, 2, 3], [2, 3, 4]) [1, 2, 3, 4] .. versionadded:: 4.0.0 """ if not others: return array[:] comparator, others = parse_iteratee("comparator", *others, **kwargs) return uniq_with(flatten([array] + list(others)), comparator=comparator) def uniq(array: t.Iterable[T]) -> t.List[T]: """ Creates a duplicate-value-free version of the array. If iteratee is passed, each element of array is passed through an iteratee before uniqueness is computed. The iteratee is invoked with three arguments: ``(value, index, array)``. If an object path is passed for iteratee, the created iteratee will return the path value of the given element. If an object is passed for iteratee, the created filter style iteratee will return ``True`` for elements that have the properties of the given object, else ``False``. Args: array: List to process. Returns: Unique list. Example: >>> uniq([1, 2, 3, 1, 2, 3]) [1, 2, 3] .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 - Moved `iteratee` argument to :func:`uniq_by`. - Removed alias ``unique``. """ return uniq_by(array) def uniq_by( array: t.Iterable[T], iteratee: t.Union[t.Callable[[T], t.Any], None] = None ) -> t.List[T]: """ This method is like :func:`uniq` except that it accepts iteratee which is invoked for each element in array to generate the criterion by which uniqueness is computed. The order of result values is determined by the order they occur in the array. The iteratee is invoked with one argument: ``(value)``. Args: array: List to process. iteratee: Function to transform the elements of the arrays. Defaults to :func:`.identity`. Returns: Unique list. Example: >>> uniq_by([1, 2, 3, 1, 2, 3], lambda val: val % 2) [1, 2] .. versionadded:: 4.0.0 """ return list(iterunique(array, iteratee=iteratee)) def uniq_with( array: t.Sequence[T], comparator: t.Union[t.Callable[[T, T], t.Any], None] = None ) -> t.List[T]: """ This method is like :func:`uniq` except that it accepts comparator which is invoked to compare elements of array. The order of result values is determined by the order they occur in the array.The comparator is invoked with two arguments: ``(value, other)``. Args: array: List to process. comparator: Function to compare the elements of the arrays. Defaults to :func:`.is_equal`. Returns: Unique list. Example: >>> uniq_with([1, 2, 3, 4, 5], lambda a, b: (a % 2) == (b % 2)) [1, 2] .. versionadded:: 4.0.0 """ return list(iterunique(array, comparator=comparator)) def unshift(array: t.List[T], *items: T2) -> t.List[t.Union[T, T2]]: """ Insert the given elements at the beginning of `array` and return the modified list. Args: array: List to modify. items: Items to insert. Returns: Modified list. Warning: `array` is modified in place. Example: >>> array = [1, 2, 3, 4] >>> unshift(array, -1, -2) [-1, -2, 1, 2, 3, 4] >>> array [-1, -2, 1, 2, 3, 4] .. versionadded:: 2.2.0 """ for item in reverse(items): array.insert(0, item) # type: ignore return array # type: ignore @t.overload def unzip(array: t.Iterable[t.Tuple[T, T2]]) -> t.List[t.Tuple[T, T2]]: ... @t.overload def unzip(array: t.Iterable[t.Tuple[T, T2, T3]]) -> t.List[t.Tuple[T, T2, T3]]: ... @t.overload def unzip(array: t.Iterable[t.Tuple[T, T2, T3, T4]]) -> t.List[t.Tuple[T, T2, T3, T4]]: ... @t.overload def unzip(array: t.Iterable[t.Tuple[T, T2, T3, T4, T5]]) -> t.List[t.Tuple[T, T2, T3, T4, T5]]: ... @t.overload def unzip(array: t.Iterable[t.Iterable[t.Any]]) -> t.List[t.Tuple[t.Any, ...]]: ... def unzip(array): """ The inverse of :func:`zip_`, this method splits groups of elements into tuples composed of elements from each group at their corresponding indexes. Args: array: List to process. Returns: Unzipped list. Example: >>> unzip([(1, 4, 7), (2, 5, 8), (3, 6, 9)]) [(1, 2, 3), (4, 5, 6), (7, 8, 9)] .. versionadded:: 1.0.0 .. versionchanged:: 8.0.0 Support list of tuples instead. """ return zip_(*array) @t.overload def unzip_with( array: t.Iterable[t.Tuple[T, T2]], iteratee: t.Union[ t.Callable[[t.Union[T, T2, T3], t.Union[T, T2], int], T3], t.Callable[[t.Union[T, T2, T3], t.Union[T, T2]], T3], t.Callable[[t.Union[T, T2, T3]], T3], ], ) -> t.List[T3]: ... @t.overload def unzip_with( array: t.Iterable[t.Iterable[t.Any]], iteratee: t.Union[ t.Callable[[t.Any, t.Any, int], T3], t.Callable[[t.Any, t.Any], T3], t.Callable[[t.Any], T3], ], ) -> t.List[T3]: ... @t.overload def unzip_with( array: t.Iterable[t.Iterable[T]], iteratee: None = None, ) -> t.List[t.Tuple[T]]: ... def unzip_with(array, iteratee=None): """ This method is like :func:`unzip` except that it accepts an iteratee to specify how regrouped values should be combined. The iteratee is invoked with three arguments: ``(accumulator, value, index)``. Args: array: List to process. iteratee: Function to combine regrouped values. Returns: Unzipped list. Example: >>> from pydash import add >>> unzip_with([(1, 10, 100), (2, 20, 200)], add) [3, 30, 300] .. versionadded:: 3.3.0 """ if not array: return [] result = unzip(array) if iteratee is None: return result def cbk(group): return pyd.reduce_(group, iteratee) return pyd.map_(result, cbk) def without(array: t.Iterable[T], *values: T) -> t.List[T]: """ Creates an array with all occurrences of the passed values removed. Args: array: List to filter. values: Values to remove. Returns: Filtered list. Example: >>> without([1, 2, 3, 2, 4, 4], 2, 4) [1, 3] .. versionadded:: 1.0.0 """ return [item for item in array if item not in values] def xor(array: t.Iterable[T], *lists: t.Iterable[T]) -> t.List[T]: """ Creates a list that is the symmetric difference of the provided lists. Args: array: List to process. *lists: Lists to xor with. Returns: XOR'd list. Example: >>> xor([1, 3, 4], [1, 2, 4], [2]) [3] .. versionadded:: 1.0.0 """ return xor_by(array, *lists) @t.overload def xor_by( array: t.Iterable[T], *lists: t.Iterable[T], iteratee: t.Union[t.Callable[[T], t.Any], IterateeObjT], ) -> t.List[T]: ... @t.overload def xor_by( array: t.Iterable[T], *lists: t.Union[t.Iterable[T], t.Callable[[T], t.Any]] ) -> t.List[T]: ... def xor_by(array, *lists, **kwargs): """ This method is like :func:`xor` except that it accepts iteratee which is invoked for each element of each arras to generate the criterion by which they're compared. The order of result values is determined by the order they occur in the arrays. The iteratee is invoked with one argument: ``(value)``. Args: array: List to process. *lists: Lists to xor with. Keyword Args: iteratee: Function to transform the elements of the arrays. Defaults to :func:`.identity`. Returns: XOR'd list. Example: >>> xor_by([2.1, 1.2], [2.3, 3.4], round) [1.2, 3.4] >>> xor_by([{"x": 1}], [{"x": 2}, {"x": 1}], "x") [{'x': 2}] .. versionadded:: 4.0.0 """ if not lists: return array[:] iteratee, lists = parse_iteratee("iteratee", *lists, **kwargs) return xor( uniq( difference_by( array + lists[0], intersection_by(array, lists[0], iteratee=iteratee), iteratee=iteratee, ) ), *lists[1:], ) @t.overload def xor_with( array: t.Sequence[T], *lists: t.Iterable[T2], comparator: t.Callable[[T, T2], t.Any] ) -> t.List[T]: ... @t.overload def xor_with( array: t.Sequence[T], *lists: t.Union[t.Iterable[T2], t.Callable[[T, T2], t.Any]] ) -> t.List[T]: ... def xor_with(array, *lists, **kwargs): """ This method is like :func:`xor` except that it accepts comparator which is invoked to compare elements of arrays. The order of result values is determined by the order they occur in the arrays. The comparator is invoked with two arguments: ``(arr_val, oth_val)``. Args: array: List to process. *lists: Lists to xor with. Keyword Args: comparator: Function to compare the elements of the arrays. Defaults to :func:`.is_equal`. Returns: XOR'd list. Example: >>> objects = [{"x": 1, "y": 2}, {"x": 2, "y": 1}] >>> others = [{"x": 1, "y": 1}, {"x": 1, "y": 2}] >>> expected = [{"y": 1, "x": 2}, {"y": 1, "x": 1}] >>> xor_with(objects, others, lambda a, b: a == b) == expected True .. versionadded:: 4.0.0 """ if not lists: return array[:] comp, lists = parse_iteratee("comparator", *lists, **kwargs) return xor_with( uniq( difference_with( array + lists[0], intersection_with(array, lists[0], comparator=comp), comparator=comp, ) ), *lists[1:], ) @t.overload def zip_(array1: t.Iterable[T], array2: t.Iterable[T2], /) -> t.List[t.Tuple[T, T2]]: ... @t.overload def zip_( array1: t.Iterable[T], array2: t.Iterable[T2], array3: t.Iterable[T3], / ) -> t.List[t.Tuple[T, T2, T3]]: ... @t.overload def zip_( array1: t.Iterable[T], array2: t.Iterable[T2], array3: t.Iterable[T3], array4: t.Iterable[T4], / ) -> t.List[t.Tuple[T, T2, T3, T4]]: ... @t.overload def zip_( array1: t.Iterable[T], array2: t.Iterable[T2], array3: t.Iterable[T3], array4: t.Iterable[T4], array5: t.Iterable[T5], /, ) -> t.List[t.Tuple[T, T2, T3, T4, T5]]: ... @t.overload def zip_(*arrays: t.Iterable[t.Any]) -> t.List[t.Tuple[t.Any, ...]]: ... def zip_(*arrays): """ Groups the elements of each array at their corresponding indexes. Useful for separate data sources that are coordinated through matching array indexes. Args: arrays: Lists to process. Returns: Zipped list. Example: >>> zip_([1, 2, 3], [4, 5, 6], [7, 8, 9]) [(1, 4, 7), (2, 5, 8), (3, 6, 9)] .. versionadded:: 1.0.0 .. versionchanged:: 8.0.0 Return list of tuples instead of list of lists. """ return list(zip(*arrays)) @t.overload def zip_object(keys: t.Iterable[t.Tuple[T, T2]], values: None = None) -> t.Dict[T, T2]: ... @t.overload def zip_object( keys: t.Iterable[t.List[t.Union[T, T2]]], values: None = None ) -> t.Dict[t.Union[T, T2], t.Union[T, T2]]: ... @t.overload def zip_object(keys: t.Iterable[T], values: t.List[T2]) -> t.Dict[T, T2]: ... def zip_object(keys, values=None): """ Creates a dict composed of lists of keys and values. Pass either a single two-dimensional list, i.e. ``[[key1, value1], [key2, value2]]``, or two lists, one of keys and one of corresponding values. Args: keys: Either a list of keys or a list of ``[key, value]`` pairs. values: List of values to zip. Returns: Zipped dict. Example: >>> zip_object([1, 2, 3], [4, 5, 6]) {1: 4, 2: 5, 3: 6} .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Removed alias ``object_``. """ if values is None: keys, values = unzip(keys) return dict(zip(keys, values)) def zip_object_deep( keys: t.Iterable[t.Any], values: t.Union[t.List[t.Any], None] = None ) -> t.Dict[t.Any, t.Any]: """ This method is like :func:`zip_object` except that it supports property paths. Args: keys: Either a list of keys or a list of ``[key, value]`` pairs. values: List of values to zip. Returns: Zipped dict. Example: >>> expected = {"a": {"b": {"c": 1, "d": 2}}} >>> zip_object_deep(["a.b.c", "a.b.d"], [1, 2]) == expected True .. versionadded:: 4.0.0 """ if values is None: # pragma: no cover keys, values = unzip(keys) obj: t.Dict[t.Any, t.Any] = {} for idx, key in enumerate(keys): obj = pyd.set_(obj, key, pyd.get(values, idx)) return obj @t.overload def zip_with( array1: t.Iterable[T], array2: t.Iterable[T2], *, iteratee: t.Union[ t.Callable[[T, T2, int], T3], t.Callable[[T, T2], T3], t.Callable[[T], T3], ], ) -> t.List[T3]: ... @t.overload def zip_with( *arrays: t.Iterable[t.Any], iteratee: t.Union[ t.Callable[[t.Any, t.Any, int], T2], t.Callable[[t.Any, t.Any], T2], t.Callable[[t.Any], T2], ], ) -> t.List[T2]: ... @t.overload def zip_with( *arrays: t.Union[ t.Iterable[t.Any], t.Callable[[t.Any, t.Any, int], T2], t.Callable[[t.Any, t.Any], T2], t.Callable[[t.Any], T2], ], ) -> t.List[T2]: ... def zip_with(*arrays, **kwargs): """ This method is like :func:`zip` except that it accepts an iteratee to specify how grouped values should be combined. The iteratee is invoked with three arguments: ``(accumulator, value, index)``. Args: *arrays: Lists to process. Keyword Args: iteratee (callable): Function to combine grouped values. Returns: Zipped list of grouped elements. Example: >>> from pydash import add >>> zip_with([1, 2], [10, 20], [100, 200], add) [111, 222] >>> zip_with([1, 2], [10, 20], [100, 200], iteratee=add) [111, 222] .. versionadded:: 3.3.0 """ if "iteratee" in kwargs: iteratee = kwargs["iteratee"] elif len(arrays) > 1: iteratee = arrays[-1] arrays = arrays[:-1] else: iteratee = None return unzip_with(arrays, iteratee) # # Utility methods not a part of the main API # def iterflatten(array, depth=-1): """Iteratively flatten a list shallowly or deeply.""" for item in array: if isinstance(item, (list, tuple)) and depth != 0: for subitem in iterflatten(item, depth - 1): yield subitem else: yield item def iterinterleave(*arrays): """Interleave multiple lists.""" iters = [iter(arr) for arr in arrays] while iters: nextiters = [] for itr in iters: try: yield next(itr) nextiters.append(itr) except StopIteration: pass iters = nextiters def iterintersperse(iterable, separator): """Iteratively intersperse iterable.""" iterable = iter(iterable) yield next(iterable) for item in iterable: yield separator yield item def iterunique(array, comparator=None, iteratee=None): # noqa: PLR0912 """Yield each unique item in array.""" if not array: # pragma: no cover return if iteratee is not None: iteratee = pyd.iteratee(iteratee) seen_hashable = set() seen_unhashable = [] for item in array: if iteratee is None: cmp_item = item else: cmp_item = iteratee(item) if comparator is None: try: if cmp_item not in seen_hashable: yield item seen_hashable.add(cmp_item) except TypeError: if cmp_item not in seen_unhashable: yield item seen_unhashable.append(cmp_item) else: unseen = True for seen_item in seen_unhashable: if comparator(cmp_item, seen_item): unseen = False break if unseen: yield item seen_unhashable.append(cmp_item) def iterduplicates(array): """Yield duplictes found in `array`.""" seen = [] for i, item in enumerate(array): if item in seen: yield i, item else: seen.append(item) def iterintersection(array, other, comparator=None, iteratee=None): """Yield intersecting values between `array` and `other` using `comparator` to determine if they intersect.""" if not array or not other: # pragma: no cover return if comparator is None: comparator = pyd.is_equal iteratee = pyd.iteratee(iteratee) # NOTE: Maintain ordering of yielded values based on `array` ordering. seen = [] for item in array: cmp_item = iteratee(item) if cmp_item in seen: continue seen.append(cmp_item) seen_others = [] for value in other: cmp_value = iteratee(value) if cmp_value in seen_others: continue seen_others.append(cmp_value) if comparator(cmp_item, cmp_value): yield item break def iterdifference(array, other, comparator=None, iteratee=None): """Yield different values in `array` as compared to `other` using `comparator` to determine if they are different.""" if not array or not other: # pragma: no cover return if comparator is None: comparator = pyd.is_equal iteratee = pyd.iteratee(iteratee) def is_different(item, seen): is_diff = True if item not in seen: for value in other: if comparator(iteratee(item), iteratee(value)): is_diff = False break if is_diff: seen.append(item) return is_diff seen = [] not_seen = [] for item in array: if item in not_seen or is_different(item, seen): yield item pydash-8.0.3/src/pydash/chaining/000077500000000000000000000000001464745015500166645ustar00rootroot00000000000000pydash-8.0.3/src/pydash/chaining/__init__.py000066400000000000000000000001351464745015500207740ustar00rootroot00000000000000from .chaining import _Dash, chain, tap __all__ = ( "_Dash", "chain", "tap", ) pydash-8.0.3/src/pydash/chaining/all_funcs.py000066400000000000000000000025241464745015500212070ustar00rootroot00000000000000from abc import ABC, abstractmethod import typing as t class AllFuncs(ABC): """Exposing all of the exposed functions of a module through an class.""" module: t.Any invalid_method_exception: t.Type[Exception] @abstractmethod def _wrap(self, func) -> t.Callable: """Proxy attribute access to :attr:`module`.""" raise NotImplementedError() # pragma: no cover @classmethod def get_method(cls, name: str) -> t.Callable: """ Return valid :attr:`module` method. Args: name: Name of pydash method to get. Returns: :attr:`module` callable. Raises: InvalidMethod: Raised if `name` is not a valid :attr:`module` method. """ method = getattr(cls.module, name, None) if not callable(method) and not name.endswith("_"): # Alias method names not ending in underscore to their underscore # counterpart. This allows chaining of functions like "map_()" # using "map()" instead. method = getattr(cls.module, name + "_", None) if not callable(method): raise cls.invalid_method_exception(f"Invalid {cls.module.__name__} method: {name}") return method def __getattr__(self, name: str) -> t.Callable: return self._wrap(self.get_method(name)) pydash-8.0.3/src/pydash/chaining/all_funcs.pyi000066400000000000000000004006731464745015500213670ustar00rootroot00000000000000# mypy: disable-error-code=misc """Generated from the `scripts/chaining_type_generator.py` script.""" import re import typing as t from _typeshed import ( SupportsAdd, SupportsDunderGE, SupportsDunderGT, SupportsDunderLE, SupportsDunderLT, SupportsRichComparison, SupportsRichComparisonT, SupportsSub, ) from typing_extensions import Concatenate, Literal, ParamSpec, Type import pydash as pyd from pydash.chaining.chaining import Chain from pydash.functions import ( After, Ary, Before, CurryFive, CurryFour, CurryOne, CurryRightFive, CurryRightFour, CurryRightOne, CurryRightThree, CurryRightTwo, CurryThree, CurryTwo, Debounce, Disjoin, Flow, Iterated, Juxtapose, Negate, Once, Partial, Rearg, Spread, Throttle, ) from pydash.helpers import UNSET, Unset from pydash.types import * from pydash.utilities import MemoizedFunc ValueT_co = t.TypeVar("ValueT_co", covariant=True) T = t.TypeVar("T") T1 = t.TypeVar("T1") T2 = t.TypeVar("T2") T3 = t.TypeVar("T3") T4 = t.TypeVar("T4") T5 = t.TypeVar("T5") NumT = t.TypeVar("NumT", int, float, "Decimal") NumT2 = t.TypeVar("NumT2", int, float, "Decimal") NumT3 = t.TypeVar("NumT3", int, float, "Decimal") CallableT = t.TypeVar("CallableT", bound=t.Callable[..., t.Any]) SequenceT = t.TypeVar("SequenceT", bound=t.Sequence[t.Any]) MutableSequenceT = t.TypeVar("MutableSequenceT", bound=t.MutableSequence[t.Any]) P = ParamSpec("P") class AllFuncs: def chunk(self: "Chain[t.Sequence[T]]", size: int = 1) -> "Chain[t.List[t.Sequence[T]]]": return self._wrap(pyd.chunk)(size) def compact(self: "Chain[t.Iterable[t.Union[T, None]]]") -> "Chain[t.List[T]]": return self._wrap(pyd.compact)() def concat(self: "Chain[t.Iterable[T]]", *arrays: t.Iterable[T]) -> "Chain[t.List[T]]": return self._wrap(pyd.concat)(*arrays) def difference(self: "Chain[t.Iterable[T]]", *others: t.Iterable[T]) -> "Chain[t.List[T]]": return self._wrap(pyd.difference)(*others) @t.overload def difference_by( self: "Chain[t.Iterable[T]]", *others: t.Iterable[T], iteratee: t.Union[IterateeObjT, t.Callable[[T], t.Any], None], ) -> "Chain[t.List[T]]": ... @t.overload def difference_by( self: "Chain[t.Iterable[T]]", *others: t.Union[IterateeObjT, t.Iterable[T], t.Callable[[T], t.Any]], ) -> "Chain[t.List[T]]": ... def difference_by(self, *others, **kwargs): return self._wrap(pyd.difference_by)(*others, **kwargs) @t.overload def difference_with( self: "Chain[t.Iterable[T]]", *others: t.Iterable[T2], comparator: t.Union[t.Callable[[T, T2], t.Any], None], ) -> "Chain[t.List[T]]": ... @t.overload def difference_with( self: "Chain[t.Iterable[T]]", *others: t.Union[t.Iterable[T2], t.Callable[[T, T2], t.Any]] ) -> "Chain[t.List[T]]": ... def difference_with(self, *others, **kwargs): return self._wrap(pyd.difference_with)(*others, **kwargs) def drop(self: "Chain[t.Sequence[T]]", n: int = 1) -> "Chain[t.List[T]]": return self._wrap(pyd.drop)(n) def drop_right(self: "Chain[t.Sequence[T]]", n: int = 1) -> "Chain[t.List[T]]": return self._wrap(pyd.drop_right)(n) @t.overload def drop_right_while( self: "Chain[t.Sequence[T]]", predicate: t.Callable[[T, int, t.List[T]], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def drop_right_while( self: "Chain[t.Sequence[T]]", predicate: t.Callable[[T, int], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def drop_right_while( self: "Chain[t.Sequence[T]]", predicate: t.Callable[[T], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def drop_right_while( self: "Chain[t.Sequence[T]]", predicate: None = None ) -> "Chain[t.List[T]]": ... def drop_right_while(self, predicate=None): return self._wrap(pyd.drop_right_while)(predicate) @t.overload def drop_while( self: "Chain[t.Sequence[T]]", predicate: t.Callable[[T, int, t.List[T]], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def drop_while( self: "Chain[t.Sequence[T]]", predicate: t.Callable[[T, int], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def drop_while( self: "Chain[t.Sequence[T]]", predicate: t.Callable[[T], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def drop_while(self: "Chain[t.Sequence[T]]", predicate: None = None) -> "Chain[t.List[T]]": ... def drop_while(self, predicate=None): return self._wrap(pyd.drop_while)(predicate) def duplicates( self: "Chain[t.Sequence[T]]", iteratee: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T]]": return self._wrap(pyd.duplicates)(iteratee) def fill( self: "Chain[t.Sequence[T]]", value: T2, start: int = 0, end: t.Union[int, None] = None ) -> "Chain[t.List[t.Union[T, T2]]]": return self._wrap(pyd.fill)(value, start, end) @t.overload def find_index( self: "Chain[t.Iterable[T]]", predicate: t.Callable[[T, int, t.List[T]], t.Any] ) -> "Chain[int]": ... @t.overload def find_index( self: "Chain[t.Iterable[T]]", predicate: t.Callable[[T, int], t.Any] ) -> "Chain[int]": ... @t.overload def find_index( self: "Chain[t.Iterable[T]]", predicate: t.Callable[[T], t.Any] ) -> "Chain[int]": ... @t.overload def find_index(self: "Chain[t.Iterable[t.Any]]", predicate: IterateeObjT) -> "Chain[int]": ... @t.overload def find_index(self: "Chain[t.Iterable[t.Any]]", predicate: None = None) -> "Chain[int]": ... def find_index(self, predicate=None): return self._wrap(pyd.find_index)(predicate) @t.overload def find_last_index( self: "Chain[t.Iterable[T]]", predicate: t.Callable[[T, int, t.List[T]], t.Any] ) -> "Chain[int]": ... @t.overload def find_last_index( self: "Chain[t.Iterable[T]]", predicate: t.Callable[[T, int], t.Any] ) -> "Chain[int]": ... @t.overload def find_last_index( self: "Chain[t.Iterable[T]]", predicate: t.Callable[[T], t.Any] ) -> "Chain[int]": ... @t.overload def find_last_index( self: "Chain[t.Iterable[t.Any]]", predicate: IterateeObjT ) -> "Chain[int]": ... @t.overload def find_last_index( self: "Chain[t.Iterable[t.Any]]", predicate: None = None ) -> "Chain[int]": ... def find_last_index(self, predicate=None): return self._wrap(pyd.find_last_index)(predicate) @t.overload def flatten(self: "Chain[t.Iterable[t.Iterable[T]]]") -> "Chain[t.List[T]]": ... @t.overload def flatten(self: "Chain[t.Iterable[T]]") -> "Chain[t.List[T]]": ... def flatten(self): return self._wrap(pyd.flatten)() def flatten_deep(self: "Chain[t.Iterable[t.Any]]") -> "Chain[t.List[t.Any]]": return self._wrap(pyd.flatten_deep)() def flatten_depth(self: "Chain[t.Iterable[t.Any]]", depth: int = 1) -> "Chain[t.List[t.Any]]": return self._wrap(pyd.flatten_depth)(depth) @t.overload def from_pairs(self: "Chain[t.Iterable[t.Tuple[T, T2]]]") -> "Chain[t.Dict[T, T2]]": ... @t.overload def from_pairs( self: "Chain[t.Iterable[t.List[t.Union[T, T2]]]]", ) -> "Chain[t.Dict[t.Union[T, T2], t.Union[T, T2]]]": ... def from_pairs(self): return self._wrap(pyd.from_pairs)() def head(self: "Chain[t.Sequence[T]]") -> "Chain[t.Union[T, None]]": return self._wrap(pyd.head)() def index_of(self: "Chain[t.Sequence[T]]", value: T, from_index: int = 0) -> "Chain[int]": return self._wrap(pyd.index_of)(value, from_index) def initial(self: "Chain[t.Sequence[T]]") -> "Chain[t.Sequence[T]]": return self._wrap(pyd.initial)() @t.overload def intercalate( self: "Chain[t.Iterable[t.Iterable[T]]]", separator: T2 ) -> "Chain[t.List[t.Union[T, T2]]]": ... @t.overload def intercalate( self: "Chain[t.Iterable[T]]", separator: T2 ) -> "Chain[t.List[t.Union[T, T2]]]": ... def intercalate(self, separator): return self._wrap(pyd.intercalate)(separator) def interleave(self: "Chain[t.Iterable[T]]", *arrays: t.Iterable[T]) -> "Chain[t.List[T]]": return self._wrap(pyd.interleave)(*arrays) def intersection( self: "Chain[t.Sequence[T]]", *others: t.Iterable[t.Any] ) -> "Chain[t.List[T]]": return self._wrap(pyd.intersection)(*others) @t.overload def intersection_by( self: "Chain[t.Sequence[T]]", *others: t.Iterable[t.Any], iteratee: t.Union[t.Callable[[T], t.Any], IterateeObjT], ) -> "Chain[t.List[T]]": ... @t.overload def intersection_by( self: "Chain[t.Sequence[T]]", *others: t.Union[t.Iterable[t.Any], t.Callable[[T], t.Any], IterateeObjT], ) -> "Chain[t.List[T]]": ... def intersection_by(self, *others, **kwargs): return self._wrap(pyd.intersection_by)(*others, **kwargs) @t.overload def intersection_with( self: "Chain[t.Sequence[T]]", *others: t.Iterable[T2], comparator: t.Callable[[T, T2], t.Any], ) -> "Chain[t.List[T]]": ... @t.overload def intersection_with( self: "Chain[t.Sequence[T]]", *others: t.Union[t.Iterable[T2], t.Callable[[T, T2], t.Any]] ) -> "Chain[t.List[T]]": ... def intersection_with(self, *others, **kwargs): return self._wrap(pyd.intersection_with)(*others, **kwargs) def intersperse(self: "Chain[t.Iterable[T]]", separator: T2) -> "Chain[t.List[t.Union[T, T2]]]": return self._wrap(pyd.intersperse)(separator) def last(self: "Chain[t.Sequence[T]]") -> "Chain[t.Union[T, None]]": return self._wrap(pyd.last)() def last_index_of( self: "Chain[t.Sequence[t.Any]]", value: t.Any, from_index: t.Union[int, None] = None ) -> "Chain[int]": return self._wrap(pyd.last_index_of)(value, from_index) @t.overload def mapcat( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int, t.List[T]], t.Union[t.List[T2], t.List[t.List[T2]]]], ) -> "Chain[t.List[T2]]": ... @t.overload def mapcat( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int, t.List[T]], T2] ) -> "Chain[t.List[T2]]": ... @t.overload def mapcat( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int], t.Union[t.List[T2], t.List[t.List[T2]]]], ) -> "Chain[t.List[T2]]": ... @t.overload def mapcat( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int], T2] ) -> "Chain[t.List[T2]]": ... @t.overload def mapcat( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], t.Union[t.List[T2], t.List[t.List[T2]]]], ) -> "Chain[t.List[T2]]": ... @t.overload def mapcat( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], T2] ) -> "Chain[t.List[T2]]": ... @t.overload def mapcat( self: "Chain[t.Iterable[t.Union[t.List[T], t.List[t.List[T]]]]]", iteratee: None = None ) -> "Chain[t.List[t.Union[T, t.List[T]]]]": ... def mapcat(self, iteratee=None): return self._wrap(pyd.mapcat)(iteratee) def nth(self: "Chain[t.Iterable[T]]", pos: int = 0) -> "Chain[t.Union[T, None]]": return self._wrap(pyd.nth)(pos) def pop(self: "Chain[t.List[T]]", index: int = -1) -> "Chain[T]": return self._wrap(pyd.pop)(index) def pull(self: "Chain[t.List[T]]", *values: T) -> "Chain[t.List[T]]": return self._wrap(pyd.pull)(*values) def pull_all(self: "Chain[t.List[T]]", values: t.Iterable[T]) -> "Chain[t.List[T]]": return self._wrap(pyd.pull_all)(values) def pull_all_by( self: "Chain[t.List[T]]", values: t.Iterable[T], iteratee: t.Union[IterateeObjT, t.Callable[[T], t.Any], None] = None, ) -> "Chain[t.List[T]]": return self._wrap(pyd.pull_all_by)(values, iteratee) def pull_all_with( self: "Chain[t.List[T]]", values: t.Iterable[T], comparator: t.Union[t.Callable[[T, T], t.Any], None] = None, ) -> "Chain[t.List[T]]": return self._wrap(pyd.pull_all_with)(values, comparator) def pull_at(self: "Chain[t.List[T]]", *indexes: int) -> "Chain[t.List[T]]": return self._wrap(pyd.pull_at)(*indexes) def push(self: "Chain[t.List[T]]", *items: T2) -> "Chain[t.List[t.Union[T, T2]]]": return self._wrap(pyd.push)(*items) def remove( self: "Chain[t.List[T]]", predicate: t.Union[ t.Callable[[T, int, t.List[T]], t.Any], t.Callable[[T, int], t.Any], t.Callable[[T], t.Any], None, ] = None, ) -> "Chain[t.List[T]]": return self._wrap(pyd.remove)(predicate) def reverse(self: "Chain[SequenceT]") -> "Chain[SequenceT]": return self._wrap(pyd.reverse)() def shift(self: "Chain[t.List[T]]") -> "Chain[T]": return self._wrap(pyd.shift)() def slice_( self: "Chain[SequenceT]", start: int = 0, end: t.Union[int, None] = None ) -> "Chain[SequenceT]": return self._wrap(pyd.slice_)(start, end) slice = slice_ @t.overload def sort( self: "Chain[t.List['SupportsRichComparisonT']]", comparator: None = None, key: None = None, reverse: bool = False, ) -> "Chain[t.List['SupportsRichComparisonT']]": ... @t.overload def sort( self: "Chain[t.List[T]]", comparator: t.Callable[[T, T], int], *, reverse: bool = False ) -> "Chain[t.List[T]]": ... @t.overload def sort( self: "Chain[t.List[T]]", *, key: t.Callable[[T], "SupportsRichComparisonT"], reverse: bool = False, ) -> "Chain[t.List[T]]": ... def sort(self, comparator=None, key=None, reverse=False): return self._wrap(pyd.sort)(comparator, key, reverse) def sorted_index( self: "Chain[t.Sequence['SupportsRichComparisonT']]", value: "SupportsRichComparisonT" ) -> "Chain[int]": return self._wrap(pyd.sorted_index)(value) @t.overload def sorted_index_by( self: "Chain[t.Sequence[T]]", value: T, iteratee: t.Union[IterateeObjT, t.Callable[[T], "SupportsRichComparisonT"]], ) -> "Chain[int]": ... @t.overload def sorted_index_by( self: "Chain[t.Sequence['SupportsRichComparisonT']]", value: "SupportsRichComparisonT", iteratee: None = None, ) -> "Chain[int]": ... def sorted_index_by(self, value, iteratee=None): return self._wrap(pyd.sorted_index_by)(value, iteratee) def sorted_index_of( self: "Chain[t.Sequence['SupportsRichComparisonT']]", value: "SupportsRichComparisonT" ) -> "Chain[int]": return self._wrap(pyd.sorted_index_of)(value) def sorted_last_index( self: "Chain[t.Sequence['SupportsRichComparisonT']]", value: "SupportsRichComparisonT" ) -> "Chain[int]": return self._wrap(pyd.sorted_last_index)(value) @t.overload def sorted_last_index_by( self: "Chain[t.Sequence[T]]", value: T, iteratee: t.Union[IterateeObjT, t.Callable[[T], "SupportsRichComparisonT"]], ) -> "Chain[int]": ... @t.overload def sorted_last_index_by( self: "Chain[t.Sequence['SupportsRichComparisonT']]", value: "SupportsRichComparisonT", iteratee: None = None, ) -> "Chain[int]": ... def sorted_last_index_by(self, value, iteratee=None): return self._wrap(pyd.sorted_last_index_by)(value, iteratee) def sorted_last_index_of( self: "Chain[t.Sequence['SupportsRichComparisonT']]", value: "SupportsRichComparisonT" ) -> "Chain[int]": return self._wrap(pyd.sorted_last_index_of)(value) def sorted_uniq( self: "Chain[t.Iterable['SupportsRichComparisonT']]", ) -> "Chain[t.List['SupportsRichComparisonT']]": return self._wrap(pyd.sorted_uniq)() def sorted_uniq_by( self: "Chain[t.Iterable['SupportsRichComparisonT']]", iteratee: t.Union[ t.Callable[["SupportsRichComparisonT"], "SupportsRichComparisonT"], None ] = None, ) -> "Chain[t.List['SupportsRichComparisonT']]": return self._wrap(pyd.sorted_uniq_by)(iteratee) def splice( self: "Chain[MutableSequenceT]", start: int, count: t.Union[int, None] = None, *items: t.Any ) -> "Chain[MutableSequenceT]": return self._wrap(pyd.splice)(start, count, *items) def split_at(self: "Chain[t.Sequence[T]]", index: int) -> "Chain[t.List[t.Sequence[T]]]": return self._wrap(pyd.split_at)(index) def tail(self: "Chain[t.Sequence[T]]") -> "Chain[t.Sequence[T]]": return self._wrap(pyd.tail)() def take(self: "Chain[t.Sequence[T]]", n: int = 1) -> "Chain[t.Sequence[T]]": return self._wrap(pyd.take)(n) def take_right(self: "Chain[t.Sequence[T]]", n: int = 1) -> "Chain[t.Sequence[T]]": return self._wrap(pyd.take_right)(n) @t.overload def take_right_while( self: "Chain[t.Sequence[T]]", predicate: t.Callable[[T, int, t.List[T]], t.Any] ) -> "Chain[t.Sequence[T]]": ... @t.overload def take_right_while( self: "Chain[t.Sequence[T]]", predicate: t.Callable[[T, int], t.Any] ) -> "Chain[t.Sequence[T]]": ... @t.overload def take_right_while( self: "Chain[t.Sequence[T]]", predicate: t.Callable[[T], t.Any] ) -> "Chain[t.Sequence[T]]": ... @t.overload def take_right_while( self: "Chain[t.Sequence[T]]", predicate: None = None ) -> "Chain[t.Sequence[T]]": ... def take_right_while(self, predicate=None): return self._wrap(pyd.take_right_while)(predicate) @t.overload def take_while( self: "Chain[t.Sequence[T]]", predicate: t.Callable[[T, int, t.List[T]], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def take_while( self: "Chain[t.Sequence[T]]", predicate: t.Callable[[T, int], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def take_while( self: "Chain[t.Sequence[T]]", predicate: t.Callable[[T], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def take_while(self: "Chain[t.Sequence[T]]", predicate: None = None) -> "Chain[t.List[T]]": ... def take_while(self, predicate=None): return self._wrap(pyd.take_while)(predicate) @t.overload def union(self: "Chain[t.Sequence[T]]") -> "Chain[t.List[T]]": ... @t.overload def union( self: "Chain[t.Sequence[T]]", *others: t.Sequence[T2] ) -> "Chain[t.List[t.Union[T, T2]]]": ... def union(self, *others): return self._wrap(pyd.union)(*others) @t.overload def union_by( self: "Chain[t.Sequence[T]]", *others: t.Iterable[T], iteratee: t.Callable[[T], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def union_by( self: "Chain[t.Sequence[T]]", *others: t.Union[t.Iterable[T], t.Callable[[T], t.Any]] ) -> "Chain[t.List[T]]": ... def union_by(self, *others, **kwargs): return self._wrap(pyd.union_by)(*others, **kwargs) @t.overload def union_with( self: "Chain[t.Sequence[T]]", *others: t.Iterable[T2], comparator: t.Callable[[T, T2], t.Any], ) -> "Chain[t.List[T]]": ... @t.overload def union_with( self: "Chain[t.Sequence[T]]", *others: t.Union[t.Iterable[T2], t.Callable[[T, T2], t.Any]] ) -> "Chain[t.List[T]]": ... def union_with(self, *others, **kwargs): return self._wrap(pyd.union_with)(*others, **kwargs) def uniq(self: "Chain[t.Iterable[T]]") -> "Chain[t.List[T]]": return self._wrap(pyd.uniq)() def uniq_by( self: "Chain[t.Iterable[T]]", iteratee: t.Union[t.Callable[[T], t.Any], None] = None ) -> "Chain[t.List[T]]": return self._wrap(pyd.uniq_by)(iteratee) def uniq_with( self: "Chain[t.Sequence[T]]", comparator: t.Union[t.Callable[[T, T], t.Any], None] = None ) -> "Chain[t.List[T]]": return self._wrap(pyd.uniq_with)(comparator) def unshift(self: "Chain[t.List[T]]", *items: T2) -> "Chain[t.List[t.Union[T, T2]]]": return self._wrap(pyd.unshift)(*items) @t.overload def unzip(self: "Chain[t.Iterable[t.Tuple[T, T2]]]") -> "Chain[t.List[t.Tuple[T, T2]]]": ... @t.overload def unzip( self: "Chain[t.Iterable[t.Tuple[T, T2, T3]]]", ) -> "Chain[t.List[t.Tuple[T, T2, T3]]]": ... @t.overload def unzip( self: "Chain[t.Iterable[t.Tuple[T, T2, T3, T4]]]", ) -> "Chain[t.List[t.Tuple[T, T2, T3, T4]]]": ... @t.overload def unzip( self: "Chain[t.Iterable[t.Tuple[T, T2, T3, T4, T5]]]", ) -> "Chain[t.List[t.Tuple[T, T2, T3, T4, T5]]]": ... @t.overload def unzip( self: "Chain[t.Iterable[t.Iterable[t.Any]]]", ) -> "Chain[t.List[t.Tuple[t.Any, ...]]]": ... def unzip(self): return self._wrap(pyd.unzip)() @t.overload def unzip_with( self: "Chain[t.Iterable[t.Tuple[T, T2]]]", iteratee: t.Union[ t.Callable[[t.Union[T, T2, T3], t.Union[T, T2], int], T3], t.Callable[[t.Union[T, T2, T3], t.Union[T, T2]], T3], t.Callable[[t.Union[T, T2, T3]], T3], ], ) -> "Chain[t.List[T3]]": ... @t.overload def unzip_with( self: "Chain[t.Iterable[t.Iterable[t.Any]]]", iteratee: t.Union[ t.Callable[[t.Any, t.Any, int], T3], t.Callable[[t.Any, t.Any], T3], t.Callable[[t.Any], T3], ], ) -> "Chain[t.List[T3]]": ... @t.overload def unzip_with( self: "Chain[t.Iterable[t.Iterable[T]]]", iteratee: None = None ) -> "Chain[t.List[t.Tuple[T]]]": ... def unzip_with(self, iteratee=None): return self._wrap(pyd.unzip_with)(iteratee) def without(self: "Chain[t.Iterable[T]]", *values: T) -> "Chain[t.List[T]]": return self._wrap(pyd.without)(*values) def xor(self: "Chain[t.Iterable[T]]", *lists: t.Iterable[T]) -> "Chain[t.List[T]]": return self._wrap(pyd.xor)(*lists) @t.overload def xor_by( self: "Chain[t.Iterable[T]]", *lists: t.Iterable[T], iteratee: t.Union[t.Callable[[T], t.Any], IterateeObjT], ) -> "Chain[t.List[T]]": ... @t.overload def xor_by( self: "Chain[t.Iterable[T]]", *lists: t.Union[t.Iterable[T], t.Callable[[T], t.Any]] ) -> "Chain[t.List[T]]": ... def xor_by(self, *lists, **kwargs): return self._wrap(pyd.xor_by)(*lists, **kwargs) @t.overload def xor_with( self: "Chain[t.Sequence[T]]", *lists: t.Iterable[T2], comparator: t.Callable[[T, T2], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def xor_with( self: "Chain[t.Sequence[T]]", *lists: t.Union[t.Iterable[T2], t.Callable[[T, T2], t.Any]] ) -> "Chain[t.List[T]]": ... def xor_with(self, *lists, **kwargs): return self._wrap(pyd.xor_with)(*lists, **kwargs) @t.overload def zip_( self: "Chain[t.Iterable[t.Any]]", *arrays: t.Iterable[t.Any] ) -> "Chain[t.List[t.Tuple[t.Any, ...]]]": ... def zip_(self, *arrays): return self._wrap(pyd.zip_)(*arrays) zip = zip_ @t.overload def zip_object( self: "Chain[t.Iterable[t.Tuple[T, T2]]]", values: None = None ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def zip_object( self: "Chain[t.Iterable[t.List[t.Union[T, T2]]]]", values: None = None ) -> "Chain[t.Dict[t.Union[T, T2], t.Union[T, T2]]]": ... @t.overload def zip_object(self: "Chain[t.Iterable[T]]", values: t.List[T2]) -> "Chain[t.Dict[T, T2]]": ... def zip_object(self, values=None): return self._wrap(pyd.zip_object)(values) def zip_object_deep( self: "Chain[t.Iterable[t.Any]]", values: t.Union[t.List[t.Any], None] = None ) -> "Chain[t.Dict[t.Any, t.Any]]": return self._wrap(pyd.zip_object_deep)(values) @t.overload def zip_with( self: "Chain[t.Iterable[T]]", array2: t.Iterable[T2], *, iteratee: t.Union[ t.Callable[[T, T2, int], T3], t.Callable[[T, T2], T3], t.Callable[[T], T3] ], ) -> "Chain[t.List[T3]]": ... @t.overload def zip_with( self: "Chain[t.Iterable[t.Any]]", *arrays: t.Iterable[t.Any], iteratee: t.Union[ t.Callable[[t.Any, t.Any, int], T2], t.Callable[[t.Any, t.Any], T2], t.Callable[[t.Any], T2], ], ) -> "Chain[t.List[T2]]": ... @t.overload def zip_with( self: "Chain[t.Union[t.Iterable[t.Any], t.Callable[[t.Any, t.Any, int], T2], t.Callable[[t.Any, t.Any], T2], t.Callable[[t.Any], T2]]]", *arrays: t.Union[ t.Iterable[t.Any], t.Callable[[t.Any, t.Any, int], T2], t.Callable[[t.Any, t.Any], T2], t.Callable[[t.Any], T2], ], ) -> "Chain[t.List[T2]]": ... def zip_with(self, *arrays, **kwargs): return self._wrap(pyd.zip_with)(*arrays, **kwargs) def tap(self: "Chain[T]", interceptor: t.Callable[[T], t.Any]) -> "Chain[T]": return self._wrap(pyd.tap)(interceptor) @t.overload def at(self: "Chain[t.Mapping[T, T2]]", *paths: T) -> "Chain[t.List[t.Union[T2, None]]]": ... @t.overload def at( self: "Chain[t.Mapping[T, t.Any]]", *paths: t.Union[T, t.Iterable[T]] ) -> "Chain[t.List[t.Any]]": ... @t.overload def at(self: "Chain[t.Iterable[T]]", *paths: int) -> "Chain[t.List[t.Union[T, None]]]": ... @t.overload def at( self: "Chain[t.Iterable[t.Any]]", *paths: t.Union[int, t.Iterable[int]] ) -> "Chain[t.List[t.Any]]": ... def at(self, *paths): return self._wrap(pyd.at)(*paths) @t.overload def count_by( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: None = None ) -> "Chain[t.Dict[T2, int]]": ... @t.overload def count_by( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T, t.Dict[T, T2]], T3] ) -> "Chain[t.Dict[T3, int]]": ... @t.overload def count_by( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T], T3] ) -> "Chain[t.Dict[T3, int]]": ... @t.overload def count_by( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2], T3] ) -> "Chain[t.Dict[T3, int]]": ... @t.overload def count_by( self: "Chain[t.Iterable[T]]", iteratee: None = None ) -> "Chain[t.Dict[T, int]]": ... @t.overload def count_by( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int, t.List[T]], T2] ) -> "Chain[t.Dict[T2, int]]": ... @t.overload def count_by( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int], T2] ) -> "Chain[t.Dict[T2, int]]": ... @t.overload def count_by( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], T2] ) -> "Chain[t.Dict[T2, int]]": ... def count_by(self, iteratee=None): return self._wrap(pyd.count_by)(iteratee) def every( self: "Chain[t.Iterable[T]]", predicate: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None, ) -> "Chain[bool]": return self._wrap(pyd.every)(predicate) @t.overload def filter_( self: "Chain[t.Mapping[T, T2]]", predicate: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T2]]": ... @t.overload def filter_( self: "Chain[t.Mapping[T, T2]]", predicate: t.Union[t.Callable[[T2, T], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T2]]": ... @t.overload def filter_( self: "Chain[t.Mapping[t.Any, T2]]", predicate: t.Union[t.Callable[[T2], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T2]]": ... @t.overload def filter_( self: "Chain[t.Iterable[T]]", predicate: t.Union[t.Callable[[T, int, t.List[T]], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T]]": ... @t.overload def filter_( self: "Chain[t.Iterable[T]]", predicate: t.Union[t.Callable[[T, int], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T]]": ... @t.overload def filter_( self: "Chain[t.Iterable[T]]", predicate: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T]]": ... def filter_(self, predicate=None): return self._wrap(pyd.filter_)(predicate) filter = filter_ @t.overload def find( self: "Chain[t.Dict[T, T2]]", predicate: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.Union[T2, None]]": ... @t.overload def find( self: "Chain[t.Dict[T, T2]]", predicate: t.Union[t.Callable[[T2, T], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.Union[T2, None]]": ... @t.overload def find( self: "Chain[t.Dict[T, T2]]", predicate: t.Union[t.Callable[[T2], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.Union[T2, None]]": ... @t.overload def find( self: "Chain[t.List[T]]", predicate: t.Union[t.Callable[[T, int, t.List[T]], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.Union[T, None]]": ... @t.overload def find( self: "Chain[t.List[T]]", predicate: t.Union[t.Callable[[T, int], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.Union[T, None]]": ... @t.overload def find( self: "Chain[t.List[T]]", predicate: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.Union[T, None]]": ... def find(self, predicate=None): return self._wrap(pyd.find)(predicate) @t.overload def find_last( self: "Chain[t.Dict[T, T2]]", predicate: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.Union[T2, None]]": ... @t.overload def find_last( self: "Chain[t.Dict[T, T2]]", predicate: t.Union[t.Callable[[T2, T], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.Union[T2, None]]": ... @t.overload def find_last( self: "Chain[t.Dict[t.Any, T2]]", predicate: t.Union[t.Callable[[T2], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.Union[T2, None]]": ... @t.overload def find_last( self: "Chain[t.List[T]]", predicate: t.Union[t.Callable[[T, int, t.List[T]], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.Union[T, None]]": ... @t.overload def find_last( self: "Chain[t.List[T]]", predicate: t.Union[t.Callable[[T, int], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.Union[T, None]]": ... @t.overload def find_last( self: "Chain[t.List[T]]", predicate: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.Union[T, None]]": ... def find_last(self, predicate=None): return self._wrap(pyd.find_last)(predicate) @t.overload def flat_map( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T, t.Dict[T, T2]], t.Iterable[T3]], ) -> "Chain[t.List[T3]]": ... @t.overload def flat_map( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T], t.Iterable[T3]] ) -> "Chain[t.List[T3]]": ... @t.overload def flat_map( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2], t.Iterable[T3]] ) -> "Chain[t.List[T3]]": ... @t.overload def flat_map( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T, t.Dict[T, T2]], T3] ) -> "Chain[t.List[T3]]": ... @t.overload def flat_map( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T], T3] ) -> "Chain[t.List[T3]]": ... @t.overload def flat_map( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2], T3] ) -> "Chain[t.List[T3]]": ... @t.overload def flat_map( self: "Chain[t.Mapping[t.Any, t.Iterable[T2]]]", iteratee: None = None ) -> "Chain[t.List[T2]]": ... @t.overload def flat_map( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: None = None ) -> "Chain[t.List[T2]]": ... @t.overload def flat_map( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int, t.List[T]], t.Iterable[T2]] ) -> "Chain[t.List[T2]]": ... @t.overload def flat_map( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int], t.Iterable[T2]] ) -> "Chain[t.List[T2]]": ... @t.overload def flat_map( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], t.Iterable[T2]] ) -> "Chain[t.List[T2]]": ... @t.overload def flat_map( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int, t.List[T]], T2] ) -> "Chain[t.List[T2]]": ... @t.overload def flat_map( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int], T2] ) -> "Chain[t.List[T2]]": ... @t.overload def flat_map( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], T2] ) -> "Chain[t.List[T2]]": ... @t.overload def flat_map( self: "Chain[t.Iterable[t.Iterable[T]]]", iteratee: None = None ) -> "Chain[t.List[T]]": ... @t.overload def flat_map(self: "Chain[t.Iterable[T]]", iteratee: None = None) -> "Chain[t.List[T]]": ... def flat_map(self, iteratee=None): return self._wrap(pyd.flat_map)(iteratee) @t.overload def flat_map_deep( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], None] = None, ) -> "Chain[t.List[t.Any]]": ... @t.overload def flat_map_deep( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Union[t.Callable[[T2, T], t.Any], None] = None ) -> "Chain[t.List[t.Any]]": ... @t.overload def flat_map_deep( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Union[t.Callable[[T2], t.Any], None] = None ) -> "Chain[t.List[t.Any]]": ... @t.overload def flat_map_deep( self: "Chain[t.Iterable[T]]", iteratee: t.Union[t.Callable[[T, int, t.List[T]], t.Any], None] = None, ) -> "Chain[t.List[t.Any]]": ... @t.overload def flat_map_deep( self: "Chain[t.Iterable[T]]", iteratee: t.Union[t.Callable[[T, int], t.Any], None] = None ) -> "Chain[t.List[t.Any]]": ... @t.overload def flat_map_deep( self: "Chain[t.Iterable[T]]", iteratee: t.Union[t.Callable[[T], t.Any], None] = None ) -> "Chain[t.List[t.Any]]": ... def flat_map_deep(self, iteratee=None): return self._wrap(pyd.flat_map_deep)(iteratee) @t.overload def flat_map_depth( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], None] = None, depth: int = 1, ) -> "Chain[t.List[t.Any]]": ... @t.overload def flat_map_depth( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Union[t.Callable[[T2, T], t.Any], None] = None, depth: int = 1, ) -> "Chain[t.List[t.Any]]": ... @t.overload def flat_map_depth( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Union[t.Callable[[T2], t.Any], None] = None, depth: int = 1, ) -> "Chain[t.List[t.Any]]": ... @t.overload def flat_map_depth( self: "Chain[t.Iterable[T]]", iteratee: t.Union[t.Callable[[T, int, t.List[T]], t.Any], None] = None, depth: int = 1, ) -> "Chain[t.List[t.Any]]": ... @t.overload def flat_map_depth( self: "Chain[t.Iterable[T]]", iteratee: t.Union[t.Callable[[T, int], t.Any], None] = None, depth: int = 1, ) -> "Chain[t.List[t.Any]]": ... @t.overload def flat_map_depth( self: "Chain[t.Iterable[T]]", iteratee: t.Union[t.Callable[[T], t.Any], None] = None, depth: int = 1, ) -> "Chain[t.List[t.Any]]": ... def flat_map_depth(self, iteratee=None, depth=1): return self._wrap(pyd.flat_map_depth)(iteratee, depth) @t.overload def for_each( self: "Chain[t.Dict[T, T2]]", iteratee: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def for_each( self: "Chain[t.Dict[T, T2]]", iteratee: t.Union[t.Callable[[T2, T], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def for_each( self: "Chain[t.Dict[T, T2]]", iteratee: t.Union[t.Callable[[T2], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def for_each( self: "Chain[t.List[T]]", iteratee: t.Union[t.Callable[[T, int, t.List[T]], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T]]": ... @t.overload def for_each( self: "Chain[t.List[T]]", iteratee: t.Union[t.Callable[[T, int], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T]]": ... @t.overload def for_each( self: "Chain[t.List[T]]", iteratee: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T]]": ... def for_each(self, iteratee=None): return self._wrap(pyd.for_each)(iteratee) @t.overload def for_each_right( self: "Chain[t.Dict[T, T2]]", iteratee: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], IterateeObjT], ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def for_each_right( self: "Chain[t.Dict[T, T2]]", iteratee: t.Union[t.Callable[[T2, T], t.Any], IterateeObjT] ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def for_each_right( self: "Chain[t.Dict[T, T2]]", iteratee: t.Union[t.Callable[[T2], t.Any], IterateeObjT] ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def for_each_right( self: "Chain[t.List[T]]", iteratee: t.Union[t.Callable[[T, int, t.List[T]], t.Any], IterateeObjT], ) -> "Chain[t.List[T]]": ... @t.overload def for_each_right( self: "Chain[t.List[T]]", iteratee: t.Union[t.Callable[[T, int], t.Any], IterateeObjT] ) -> "Chain[t.List[T]]": ... @t.overload def for_each_right( self: "Chain[t.List[T]]", iteratee: t.Union[t.Callable[[T], t.Any], IterateeObjT] ) -> "Chain[t.List[T]]": ... def for_each_right(self, iteratee): return self._wrap(pyd.for_each_right)(iteratee) @t.overload def group_by( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], T2] ) -> "Chain[t.Dict[T2, t.List[T]]]": ... @t.overload def group_by( self: "Chain[t.Iterable[T]]", iteratee: t.Union[IterateeObjT, None] = None ) -> "Chain[t.Dict[t.Any, t.List[T]]]": ... def group_by(self, iteratee=None): return self._wrap(pyd.group_by)(iteratee) def includes( self: "Chain[t.Union[t.Sequence[t.Any], t.Dict[t.Any, t.Any]]]", target: t.Any, from_index: int = 0, ) -> "Chain[bool]": return self._wrap(pyd.includes)(target, from_index) def invoke_map( self: "Chain[t.Iterable[t.Any]]", path: PathT, *args: t.Any, **kwargs: t.Any ) -> "Chain[t.List[t.Any]]": return self._wrap(pyd.invoke_map)(path, *args, **kwargs) @t.overload def key_by( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], T2] ) -> "Chain[t.Dict[T2, T]]": ... @t.overload def key_by( self: "Chain[t.Iterable[t.Any]]", iteratee: t.Union[IterateeObjT, None] = None ) -> "Chain[t.Dict[t.Any, t.Any]]": ... def key_by(self, iteratee=None): return self._wrap(pyd.key_by)(iteratee) @t.overload def map_( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2], T3] ) -> "Chain[t.List[T3]]": ... @t.overload def map_( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T], T3] ) -> "Chain[t.List[T3]]": ... @t.overload def map_( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T, t.Dict[T, T2]], T3] ) -> "Chain[t.List[T3]]": ... @t.overload def map_( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], T2] ) -> "Chain[t.List[T2]]": ... @t.overload def map_( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int], T2] ) -> "Chain[t.List[T2]]": ... @t.overload def map_( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int, t.List[T]], T2] ) -> "Chain[t.List[T2]]": ... @t.overload def map_( self: "Chain[t.Iterable[t.Any]]", iteratee: t.Union[IterateeObjT, None] = None ) -> "Chain[t.List[t.Any]]": ... def map_(self, iteratee=None): return self._wrap(pyd.map_)(iteratee) map = map_ def nest(self: "Chain[t.Iterable[t.Any]]", *properties: t.Any) -> "Chain[t.Any]": return self._wrap(pyd.nest)(*properties) @t.overload def order_by( self: "Chain[t.Mapping[t.Any, T2]]", keys: t.Iterable[t.Union[str, int]], orders: t.Union[t.Iterable[bool], bool], reverse: bool = False, ) -> "Chain[t.List[T2]]": ... @t.overload def order_by( self: "Chain[t.Mapping[t.Any, T2]]", keys: t.Iterable[str], orders: None = None, reverse: bool = False, ) -> "Chain[t.List[T2]]": ... @t.overload def order_by( self: "Chain[t.Iterable[T]]", keys: t.Iterable[t.Union[str, int]], orders: t.Union[t.Iterable[bool], bool], reverse: bool = False, ) -> "Chain[t.List[T]]": ... @t.overload def order_by( self: "Chain[t.Iterable[T]]", keys: t.Iterable[str], orders: None = None, reverse: bool = False, ) -> "Chain[t.List[T]]": ... def order_by(self, keys, orders=None, reverse=False): return self._wrap(pyd.order_by)(keys, orders, reverse) @t.overload def partition( self: "Chain[t.Mapping[T, T2]]", predicate: t.Callable[[T2, T, t.Dict[T, T2]], t.Any] ) -> "Chain[t.List[t.List[T2]]]": ... @t.overload def partition( self: "Chain[t.Mapping[T, T2]]", predicate: t.Callable[[T2, T], t.Any] ) -> "Chain[t.List[t.List[T2]]]": ... @t.overload def partition( self: "Chain[t.Mapping[t.Any, T2]]", predicate: t.Callable[[T2], t.Any] ) -> "Chain[t.List[t.List[T2]]]": ... @t.overload def partition( self: "Chain[t.Mapping[t.Any, T2]]", predicate: t.Union[IterateeObjT, None] = None ) -> "Chain[t.List[t.List[T2]]]": ... @t.overload def partition( self: "Chain[t.Iterable[T]]", predicate: t.Callable[[T, int, t.List[T]], t.Any] ) -> "Chain[t.List[t.List[T]]]": ... @t.overload def partition( self: "Chain[t.Iterable[T]]", predicate: t.Callable[[T, int], t.Any] ) -> "Chain[t.List[t.List[T]]]": ... @t.overload def partition( self: "Chain[t.Iterable[T]]", predicate: t.Callable[[T], t.Any] ) -> "Chain[t.List[t.List[T]]]": ... @t.overload def partition( self: "Chain[t.Iterable[T]]", predicate: t.Union[IterateeObjT, None] = None ) -> "Chain[t.List[t.List[T]]]": ... def partition(self, predicate=None): return self._wrap(pyd.partition)(predicate) def pluck(self: "Chain[t.Iterable[t.Any]]", path: PathT) -> "Chain[t.List[t.Any]]": return self._wrap(pyd.pluck)(path) @t.overload def reduce_( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T3, T2, T], T3], accumulator: T3 ) -> "Chain[T3]": ... @t.overload def reduce_( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T3, T2], T3], accumulator: T3 ) -> "Chain[T3]": ... @t.overload def reduce_( self: "Chain[t.Mapping[t.Any, t.Any]]", iteratee: t.Callable[[T3], T3], accumulator: T3 ) -> "Chain[T3]": ... @t.overload def reduce_( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T2, T], T2], accumulator: None = None, ) -> "Chain[T2]": ... @t.overload def reduce_( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2, T2], T2], accumulator: None = None, ) -> "Chain[T2]": ... @t.overload def reduce_( self: "Chain[t.Mapping[t.Any, t.Any]]", iteratee: t.Callable[[T], T], accumulator: None = None, ) -> "Chain[T]": ... @t.overload def reduce_( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T2, T, int], T2], accumulator: T2 ) -> "Chain[T2]": ... @t.overload def reduce_( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T2, T], T2], accumulator: T2 ) -> "Chain[T2]": ... @t.overload def reduce_( self: "Chain[t.Iterable[t.Any]]", iteratee: t.Callable[[T2], T2], accumulator: T2 ) -> "Chain[T2]": ... @t.overload def reduce_( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, T, int], T], accumulator: None = None ) -> "Chain[T]": ... @t.overload def reduce_( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, T], T], accumulator: None = None ) -> "Chain[T]": ... @t.overload def reduce_( self: "Chain[t.Iterable[t.Any]]", iteratee: t.Callable[[T], T], accumulator: None = None ) -> "Chain[T]": ... @t.overload def reduce_( self: "Chain[t.Iterable[T]]", iteratee: None = None, accumulator: t.Union[T, None] = None ) -> "Chain[T]": ... def reduce_(self, iteratee=None, accumulator=None): return self._wrap(pyd.reduce_)(iteratee, accumulator) reduce = reduce_ @t.overload def reduce_right( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T3, T2, T], T3], accumulator: T3 ) -> "Chain[T3]": ... @t.overload def reduce_right( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T3, T2], T3], accumulator: T3 ) -> "Chain[T3]": ... @t.overload def reduce_right( self: "Chain[t.Mapping[t.Any, t.Any]]", iteratee: t.Callable[[T3], T3], accumulator: T3 ) -> "Chain[T3]": ... @t.overload def reduce_right( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T2, T], T2], accumulator: None = None, ) -> "Chain[T2]": ... @t.overload def reduce_right( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2, T2], T2], accumulator: None = None, ) -> "Chain[T2]": ... @t.overload def reduce_right( self: "Chain[t.Mapping[t.Any, t.Any]]", iteratee: t.Callable[[T], T], accumulator: None = None, ) -> "Chain[T]": ... @t.overload def reduce_right( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T2, T, int], T2], accumulator: T2 ) -> "Chain[T2]": ... @t.overload def reduce_right( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T2, T], T2], accumulator: T2 ) -> "Chain[T2]": ... @t.overload def reduce_right( self: "Chain[t.Iterable[t.Any]]", iteratee: t.Callable[[T2], T2], accumulator: T2 ) -> "Chain[T2]": ... @t.overload def reduce_right( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, T, int], T], accumulator: None = None ) -> "Chain[T]": ... @t.overload def reduce_right( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, T], T], accumulator: None = None ) -> "Chain[T]": ... @t.overload def reduce_right( self: "Chain[t.Iterable[t.Any]]", iteratee: t.Callable[[T], T], accumulator: None = None ) -> "Chain[T]": ... @t.overload def reduce_right( self: "Chain[t.Iterable[T]]", iteratee: None = None, accumulator: t.Union[T, None] = None ) -> "Chain[T]": ... def reduce_right(self, iteratee=None, accumulator=None): return self._wrap(pyd.reduce_right)(iteratee, accumulator) @t.overload def reductions( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T3, T2, T], T3], accumulator: T3, from_right: bool = False, ) -> "Chain[t.List[T3]]": ... @t.overload def reductions( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T3, T2], T3], accumulator: T3, from_right: bool = False, ) -> "Chain[t.List[T3]]": ... @t.overload def reductions( self: "Chain[t.Mapping[t.Any, t.Any]]", iteratee: t.Callable[[T3], T3], accumulator: T3, from_right: bool = False, ) -> "Chain[t.List[T3]]": ... @t.overload def reductions( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T2, T], T2], accumulator: None = None, from_right: bool = False, ) -> "Chain[t.List[T2]]": ... @t.overload def reductions( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2, T2], T2], accumulator: None = None, from_right: bool = False, ) -> "Chain[t.List[T2]]": ... @t.overload def reductions( self: "Chain[t.Mapping[t.Any, t.Any]]", iteratee: t.Callable[[T], T], accumulator: None = None, from_right: bool = False, ) -> "Chain[t.List[T]]": ... @t.overload def reductions( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T2, T, int], T2], accumulator: T2, from_right: bool = False, ) -> "Chain[t.List[T2]]": ... @t.overload def reductions( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T2, T], T2], accumulator: T2, from_right: bool = False, ) -> "Chain[t.List[T2]]": ... @t.overload def reductions( self: "Chain[t.Iterable[t.Any]]", iteratee: t.Callable[[T2], T2], accumulator: T2, from_right: bool = False, ) -> "Chain[t.List[T2]]": ... @t.overload def reductions( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, T, int], T], accumulator: None = None, from_right: bool = False, ) -> "Chain[t.List[T]]": ... @t.overload def reductions( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, T], T], accumulator: None = None, from_right: bool = False, ) -> "Chain[t.List[T]]": ... @t.overload def reductions( self: "Chain[t.Iterable[t.Any]]", iteratee: t.Callable[[T], T], accumulator: None = None, from_right: bool = False, ) -> "Chain[t.List[T]]": ... @t.overload def reductions( self: "Chain[t.Iterable[T]]", iteratee: None = None, accumulator: t.Union[T, None] = None, from_right: bool = False, ) -> "Chain[t.List[T]]": ... def reductions(self, iteratee=None, accumulator=None, from_right=False): return self._wrap(pyd.reductions)(iteratee, accumulator, from_right) @t.overload def reductions_right( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T3, T2, T], T3], accumulator: T3 ) -> "Chain[t.List[T3]]": ... @t.overload def reductions_right( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T3, T2], T3], accumulator: T3 ) -> "Chain[t.List[T3]]": ... @t.overload def reductions_right( self: "Chain[t.Mapping[t.Any, t.Any]]", iteratee: t.Callable[[T3], T3], accumulator: T3 ) -> "Chain[t.List[T3]]": ... @t.overload def reductions_right( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T2, T], T2], accumulator: None = None, ) -> "Chain[t.List[T2]]": ... @t.overload def reductions_right( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2, T2], T2], accumulator: None = None, ) -> "Chain[t.List[T2]]": ... @t.overload def reductions_right( self: "Chain[t.Mapping[t.Any, t.Any]]", iteratee: t.Callable[[T], T], accumulator: None = None, ) -> "Chain[t.List[T]]": ... @t.overload def reductions_right( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T2, T, int], T2], accumulator: T2 ) -> "Chain[t.List[T2]]": ... @t.overload def reductions_right( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T2, T], T2], accumulator: T2 ) -> "Chain[t.List[T2]]": ... @t.overload def reductions_right( self: "Chain[t.Iterable[t.Any]]", iteratee: t.Callable[[T2], T2], accumulator: T2 ) -> "Chain[t.List[T2]]": ... @t.overload def reductions_right( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, T, int], T], accumulator: None = None ) -> "Chain[t.List[T]]": ... @t.overload def reductions_right( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, T], T], accumulator: None = None ) -> "Chain[t.List[T]]": ... @t.overload def reductions_right( self: "Chain[t.Iterable[t.Any]]", iteratee: t.Callable[[T], T], accumulator: None = None ) -> "Chain[t.List[T]]": ... @t.overload def reductions_right( self: "Chain[t.Iterable[T]]", iteratee: None = None, accumulator: t.Union[T, None] = None ) -> "Chain[t.List[T]]": ... def reductions_right(self, iteratee=None, accumulator=None): return self._wrap(pyd.reductions_right)(iteratee, accumulator) @t.overload def reject( self: "Chain[t.Mapping[T, T2]]", predicate: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T2]]": ... @t.overload def reject( self: "Chain[t.Mapping[T, T2]]", predicate: t.Union[t.Callable[[T2, T], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T2]]": ... @t.overload def reject( self: "Chain[t.Mapping[t.Any, T2]]", predicate: t.Union[t.Callable[[T2], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T2]]": ... @t.overload def reject( self: "Chain[t.Iterable[T]]", predicate: t.Union[t.Callable[[T, int, t.List[T]], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T]]": ... @t.overload def reject( self: "Chain[t.Iterable[T]]", predicate: t.Union[t.Callable[[T, int], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T]]": ... @t.overload def reject( self: "Chain[t.Iterable[T]]", predicate: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None, ) -> "Chain[t.List[T]]": ... def reject(self, predicate=None): return self._wrap(pyd.reject)(predicate) def sample(self: "Chain[t.Sequence[T]]") -> "Chain[T]": return self._wrap(pyd.sample)() def sample_size( self: "Chain[t.Sequence[T]]", n: t.Union[int, None] = None ) -> "Chain[t.List[T]]": return self._wrap(pyd.sample_size)(n) @t.overload def shuffle(self: "Chain[t.Mapping[t.Any, T]]") -> "Chain[t.List[T]]": ... @t.overload def shuffle(self: "Chain[t.Iterable[T]]") -> "Chain[t.List[T]]": ... def shuffle(self): return self._wrap(pyd.shuffle)() def size(self: "Chain[t.Sized]") -> "Chain[int]": return self._wrap(pyd.size)() def some( self: "Chain[t.Iterable[T]]", predicate: t.Union[t.Callable[[T], t.Any], None] = None ) -> "Chain[bool]": return self._wrap(pyd.some)(predicate) @t.overload def sort_by( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Union[t.Callable[[T2], t.Any], IterateeObjT, None] = None, reverse: bool = False, ) -> "Chain[t.List[T2]]": ... @t.overload def sort_by( self: "Chain[t.Iterable[T]]", iteratee: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None, reverse: bool = False, ) -> "Chain[t.List[T]]": ... def sort_by(self, iteratee=None, reverse=False): return self._wrap(pyd.sort_by)(iteratee, reverse) def after(self: "Chain[t.Callable[P, T]]", n: t.SupportsInt) -> "Chain[After[P, T]]": return self._wrap(pyd.after)(n) def ary(self: "Chain[t.Callable[..., T]]", n: t.Union[t.SupportsInt, None]) -> "Chain[Ary[T]]": return self._wrap(pyd.ary)(n) def before(self: "Chain[t.Callable[P, T]]", n: t.SupportsInt) -> "Chain[Before[P, T]]": return self._wrap(pyd.before)(n) def conjoin( self: "Chain[t.Callable[[T], t.Any]]", *funcs: t.Callable[[T], t.Any] ) -> "Chain[t.Callable[[t.Iterable[T]], bool]]": return self._wrap(pyd.conjoin)(*funcs) @t.overload def curry( self: "Chain[t.Callable[[T1], T]]", arity: t.Union[int, None] = None ) -> "Chain[CurryOne[T1, T]]": ... @t.overload def curry( self: "Chain[t.Callable[[T1, T2], T]]", arity: t.Union[int, None] = None ) -> "Chain[CurryTwo[T1, T2, T]]": ... @t.overload def curry( self: "Chain[t.Callable[[T1, T2, T3], T]]", arity: t.Union[int, None] = None ) -> "Chain[CurryThree[T1, T2, T3, T]]": ... @t.overload def curry( self: "Chain[t.Callable[[T1, T2, T3, T4], T]]", arity: t.Union[int, None] = None ) -> "Chain[CurryFour[T1, T2, T3, T4, T]]": ... @t.overload def curry( self: "Chain[t.Callable[[T1, T2, T3, T4, T5], T]]", arity: t.Union[int, None] = None ) -> "Chain[CurryFive[T1, T2, T3, T4, T5, T]]": ... def curry(self, arity=None): return self._wrap(pyd.curry)(arity) @t.overload def curry_right( self: "Chain[t.Callable[[T1], T]]", arity: t.Union[int, None] = None ) -> "Chain[CurryRightOne[T1, T]]": ... @t.overload def curry_right( self: "Chain[t.Callable[[T1, T2], T]]", arity: t.Union[int, None] = None ) -> "Chain[CurryRightTwo[T2, T1, T]]": ... @t.overload def curry_right( self: "Chain[t.Callable[[T1, T2, T3], T]]", arity: t.Union[int, None] = None ) -> "Chain[CurryRightThree[T3, T2, T1, T]]": ... @t.overload def curry_right( self: "Chain[t.Callable[[T1, T2, T3, T4], T]]", arity: t.Union[int, None] = None ) -> "Chain[CurryRightFour[T4, T3, T2, T1, T]]": ... @t.overload def curry_right( self: "Chain[t.Callable[[T1, T2, T3, T4, T5], T]]", ) -> "Chain[CurryRightFive[T5, T4, T3, T2, T1, T]]": ... def curry_right(self, arity=None): return self._wrap(pyd.curry_right)(arity) def debounce( self: "Chain[t.Callable[P, T]]", wait: int, max_wait: t.Union[int, Literal[False]] = False ) -> "Chain[Debounce[P, T]]": return self._wrap(pyd.debounce)(wait, max_wait) def delay( self: "Chain[t.Callable[P, T]]", wait: int, *args: "P.args", **kwargs: "P.kwargs" ) -> "Chain[T]": return self._wrap(pyd.delay)(wait, *args, **kwargs) def disjoin( self: "Chain[t.Callable[[T], t.Any]]", *funcs: t.Callable[[T], t.Any] ) -> "Chain[Disjoin[T]]": return self._wrap(pyd.disjoin)(*funcs) @t.overload def flip( self: "Chain[t.Callable[[T1, T2, T3, T4, T5], T]]", ) -> "Chain[t.Callable[[T5, T4, T3, T2, T1], T]]": ... @t.overload def flip( self: "Chain[t.Callable[[T1, T2, T3, T4], T]]", ) -> "Chain[t.Callable[[T4, T3, T2, T1], T]]": ... @t.overload def flip( self: "Chain[t.Callable[[T1, T2, T3], T]]", ) -> "Chain[t.Callable[[T3, T2, T1], T]]": ... @t.overload def flip(self: "Chain[t.Callable[[T1, T2], T]]") -> "Chain[t.Callable[[T2, T1], T]]": ... @t.overload def flip(self: "Chain[t.Callable[[T1], T]]") -> "Chain[t.Callable[[T1], T]]": ... def flip(self: "Chain[t.Callable[..., t.Any]]") -> "Chain[t.Callable[..., t.Any]]": return self._wrap(pyd.flip)() @t.overload def flow( self: "Chain[t.Callable[P, T2]]", func2: t.Callable[[T2], T3], func3: t.Callable[[T3], T4], func4: t.Callable[[T4], T5], func5: t.Callable[[T5], T], ) -> "Chain[Flow[P, T]]": ... @t.overload def flow( self: "Chain[t.Callable[P, T2]]", func2: t.Callable[[T2], T3], func3: t.Callable[[T3], T4], func4: t.Callable[[T4], T], ) -> "Chain[Flow[P, T]]": ... @t.overload def flow( self: "Chain[t.Callable[P, T2]]", func2: t.Callable[[T2], T3], func3: t.Callable[[T3], T] ) -> "Chain[Flow[P, T]]": ... @t.overload def flow( self: "Chain[t.Callable[P, T2]]", func2: t.Callable[[T2], T] ) -> "Chain[Flow[P, T]]": ... @t.overload def flow(self: "Chain[t.Callable[P, T]]") -> "Chain[Flow[P, T]]": ... def flow(self, *funcs): return self._wrap(pyd.flow)(*funcs) @t.overload def flow_right( self: "Chain[t.Callable[[T4], T]]", func4: t.Callable[[T3], T4], func3: t.Callable[[T2], T3], func2: t.Callable[[T1], T2], func1: t.Callable[P, T1], ) -> "Chain[Flow[P, T]]": ... @t.overload def flow_right( self: "Chain[t.Callable[[T3], T]]", func3: t.Callable[[T2], T3], func2: t.Callable[[T1], T2], func1: t.Callable[P, T1], ) -> "Chain[Flow[P, T]]": ... @t.overload def flow_right( self: "Chain[t.Callable[[T2], T]]", func2: t.Callable[[T1], T2], func1: t.Callable[P, T1] ) -> "Chain[Flow[P, T]]": ... @t.overload def flow_right( self: "Chain[t.Callable[[T1], T]]", func1: t.Callable[P, T1] ) -> "Chain[Flow[P, T]]": ... @t.overload def flow_right(self: "Chain[t.Callable[P, T]]") -> "Chain[Flow[P, T]]": ... def flow_right(self, *funcs): return self._wrap(pyd.flow_right)(*funcs) def iterated(self: "Chain[t.Callable[[T], T]]") -> "Chain[Iterated[T]]": return self._wrap(pyd.iterated)() def juxtapose( self: "Chain[t.Callable[P, T]]", *funcs: t.Callable[P, T] ) -> "Chain[Juxtapose[P, T]]": return self._wrap(pyd.juxtapose)(*funcs) def negate(self: "Chain[t.Callable[P, t.Any]]") -> "Chain[Negate[P]]": return self._wrap(pyd.negate)() def once(self: "Chain[t.Callable[P, T]]") -> "Chain[Once[P, T]]": return self._wrap(pyd.once)() @t.overload def over_args( self: "Chain[t.Callable[[T1, T2, T3, T4, T5], T]]", transform_one: t.Callable[[T1], T1], transform_two: t.Callable[[T2], T2], transform_three: t.Callable[[T3], T3], transform_four: t.Callable[[T4], T4], transform_five: t.Callable[[T5], T5], ) -> "Chain[t.Callable[[T1, T2, T3, T4, T5], T]]": ... @t.overload def over_args( self: "Chain[t.Callable[[T1, T2, T3, T4], T]]", transform_one: t.Callable[[T1], T1], transform_two: t.Callable[[T2], T2], transform_three: t.Callable[[T3], T3], transform_four: t.Callable[[T4], T4], ) -> "Chain[t.Callable[[T1, T2, T3, T4], T]]": ... @t.overload def over_args( self: "Chain[t.Callable[[T1, T2, T3], T]]", transform_one: t.Callable[[T1], T1], transform_two: t.Callable[[T2], T2], transform_three: t.Callable[[T3], T3], ) -> "Chain[t.Callable[[T1, T2, T3], T]]": ... @t.overload def over_args( self: "Chain[t.Callable[[T1, T2], T]]", transform_one: t.Callable[[T1], T1], transform_two: t.Callable[[T2], T2], ) -> "Chain[t.Callable[[T1, T2], T]]": ... @t.overload def over_args( self: "Chain[t.Callable[[T1], T]]", transform_one: t.Callable[[T1], T1] ) -> "Chain[t.Callable[[T1], T]]": ... def over_args(self, *transforms): return self._wrap(pyd.over_args)(*transforms) def partial( self: "Chain[t.Callable[..., T]]", *args: t.Any, **kwargs: t.Any ) -> "Chain[Partial[T]]": return self._wrap(pyd.partial)(*args, **kwargs) def partial_right( self: "Chain[t.Callable[..., T]]", *args: t.Any, **kwargs: t.Any ) -> "Chain[Partial[T]]": return self._wrap(pyd.partial_right)(*args, **kwargs) def rearg(self: "Chain[t.Callable[P, T]]", *indexes: int) -> "Chain[Rearg[P, T]]": return self._wrap(pyd.rearg)(*indexes) def spread(self: "Chain[t.Callable[..., T]]") -> "Chain[Spread[T]]": return self._wrap(pyd.spread)() def throttle(self: "Chain[t.Callable[P, T]]", wait: int) -> "Chain[Throttle[P, T]]": return self._wrap(pyd.throttle)(wait) def unary(self: "Chain[t.Callable[..., T]]") -> "Chain[Ary[T]]": return self._wrap(pyd.unary)() def wrap(self: "Chain[T1]", func: t.Callable[Concatenate[T1, P], T]) -> "Chain[Partial[T]]": return self._wrap(pyd.wrap)(func) @t.overload def add(self: "Chain['SupportsAdd[T, T2]']", b: T) -> "Chain[T2]": ... @t.overload def add(self: "Chain[T]", b: "SupportsAdd[T, T2]") -> "Chain[T2]": ... def add(self, b): return self._wrap(pyd.add)(b) @t.overload def sum_(self: "Chain[t.Mapping[t.Any, 'SupportsAdd[int, T]']]") -> "Chain[T]": ... @t.overload def sum_(self: "Chain[t.Iterable['SupportsAdd[int, T]']]") -> "Chain[T]": ... def sum_(self): return self._wrap(pyd.sum_)() sum = sum_ @t.overload def sum_by( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T, t.Dict[T, T2]], "SupportsAdd[int, T3]"], ) -> "Chain[T3]": ... @t.overload def sum_by( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T], "SupportsAdd[int, T3]"] ) -> "Chain[T3]": ... @t.overload def sum_by( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2], "SupportsAdd[int, T3]"] ) -> "Chain[T3]": ... @t.overload def sum_by( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int, t.List[T]], "SupportsAdd[int, T2]"], ) -> "Chain[T2]": ... @t.overload def sum_by( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int], "SupportsAdd[int, T2]"] ) -> "Chain[T2]": ... @t.overload def sum_by( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], "SupportsAdd[int, T2]"] ) -> "Chain[T2]": ... @t.overload def sum_by( self: "Chain[t.Mapping[t.Any, 'SupportsAdd[int, T]']]", iteratee: None = None ) -> "Chain[T]": ... @t.overload def sum_by( self: "Chain[t.Iterable['SupportsAdd[int, T]']]", iteratee: None = None ) -> "Chain[T]": ... def sum_by(self, iteratee=None): return self._wrap(pyd.sum_by)(iteratee) @t.overload def mean(self: "Chain[t.Mapping[t.Any, 'SupportsAdd[int, t.Any]']]") -> "Chain[float]": ... @t.overload def mean(self: "Chain[t.Iterable['SupportsAdd[int, t.Any]']]") -> "Chain[float]": ... def mean(self): return self._wrap(pyd.mean)() @t.overload def mean_by( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T, t.Dict[T, T2]], "SupportsAdd[int, t.Any]"], ) -> "Chain[float]": ... @t.overload def mean_by( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T], "SupportsAdd[int, t.Any]"] ) -> "Chain[float]": ... @t.overload def mean_by( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2], "SupportsAdd[int, t.Any]"] ) -> "Chain[float]": ... @t.overload def mean_by( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int, t.List[T]], "SupportsAdd[int, t.Any]"], ) -> "Chain[float]": ... @t.overload def mean_by( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int], "SupportsAdd[int, t.Any]"] ) -> "Chain[float]": ... @t.overload def mean_by( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], "SupportsAdd[int, t.Any]"] ) -> "Chain[float]": ... @t.overload def mean_by( self: "Chain[t.Mapping[t.Any, 'SupportsAdd[int, t.Any]']]", iteratee: None = None ) -> "Chain[float]": ... @t.overload def mean_by( self: "Chain[t.Iterable['SupportsAdd[int, t.Any]']]", iteratee: None = None ) -> "Chain[float]": ... def mean_by(self, iteratee=None): return self._wrap(pyd.mean_by)(iteratee) def ceil(self: "Chain[NumberT]", precision: int = 0) -> "Chain[float]": return self._wrap(pyd.ceil)(precision) def clamp( self: "Chain[NumT]", lower: NumT2, upper: t.Union[NumT3, None] = None ) -> "Chain[t.Union[NumT, NumT2, NumT3]]": return self._wrap(pyd.clamp)(lower, upper) def divide( self: "Chain[t.Union[NumberT, None]]", divisor: t.Union[NumberT, None] ) -> "Chain[float]": return self._wrap(pyd.divide)(divisor) def floor(self: "Chain[NumberT]", precision: int = 0) -> "Chain[float]": return self._wrap(pyd.floor)(precision) @t.overload def max_( self: "Chain[t.Mapping[t.Any, 'SupportsRichComparisonT']]", default: Unset = UNSET ) -> "Chain['SupportsRichComparisonT']": ... @t.overload def max_( self: "Chain[t.Mapping[t.Any, 'SupportsRichComparisonT']]", default: T ) -> "Chain[t.Union['SupportsRichComparisonT', T]]": ... @t.overload def max_( self: "Chain[t.Iterable['SupportsRichComparisonT']]", default: Unset = UNSET ) -> "Chain['SupportsRichComparisonT']": ... @t.overload def max_( self: "Chain[t.Iterable['SupportsRichComparisonT']]", default: T ) -> "Chain[t.Union['SupportsRichComparisonT', T]]": ... def max_(self, default=UNSET): return self._wrap(pyd.max_)(default) max = max_ @t.overload def max_by( self: "Chain[t.Mapping[t.Any, 'SupportsRichComparisonT']]", iteratee: None = None, default: Unset = UNSET, ) -> "Chain['SupportsRichComparisonT']": ... @t.overload def max_by( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2], "SupportsRichComparisonT"], default: Unset = UNSET, ) -> "Chain[T2]": ... @t.overload def max_by( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2], "SupportsRichComparisonT"], *, default: T, ) -> "Chain[t.Union[T2, T]]": ... @t.overload def max_by( self: "Chain[t.Mapping[t.Any, 'SupportsRichComparisonT']]", iteratee: None = None, *, default: T, ) -> "Chain[t.Union['SupportsRichComparisonT', T]]": ... @t.overload def max_by( self: "Chain[t.Iterable['SupportsRichComparisonT']]", iteratee: None = None, default: Unset = UNSET, ) -> "Chain['SupportsRichComparisonT']": ... @t.overload def max_by( self: "Chain[t.Iterable[T2]]", iteratee: t.Callable[[T2], "SupportsRichComparisonT"], default: Unset = UNSET, ) -> "Chain[T2]": ... @t.overload def max_by( self: "Chain[t.Iterable[T2]]", iteratee: t.Callable[[T2], "SupportsRichComparisonT"], *, default: T, ) -> "Chain[t.Union[T2, T]]": ... @t.overload def max_by( self: "Chain[t.Iterable['SupportsRichComparisonT']]", iteratee: None = None, *, default: T ) -> "Chain[t.Union['SupportsRichComparisonT', T]]": ... @t.overload def max_by( self: "Chain[t.Iterable[T]]", iteratee: IterateeObjT, default: Unset = UNSET ) -> "Chain[T]": ... @t.overload def max_by( self: "Chain[t.Iterable[T]]", iteratee: IterateeObjT, default: T2 ) -> "Chain[t.Union[T, T2]]": ... def max_by(self, iteratee=None, default=UNSET): return self._wrap(pyd.max_by)(iteratee, default) @t.overload def median( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T, t.Dict[T, T2]], NumberT] ) -> "Chain[t.Union[float, int]]": ... @t.overload def median( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T], NumberT] ) -> "Chain[t.Union[float, int]]": ... @t.overload def median( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2], NumberT] ) -> "Chain[t.Union[float, int]]": ... @t.overload def median( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int, t.List[T]], NumberT] ) -> "Chain[t.Union[float, int]]": ... @t.overload def median( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int], NumberT] ) -> "Chain[t.Union[float, int]]": ... @t.overload def median( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], NumberT] ) -> "Chain[t.Union[float, int]]": ... @t.overload def median( self: "Chain[t.Iterable[NumberT]]", iteratee: None = None ) -> "Chain[t.Union[float, int]]": ... def median(self, iteratee=None): return self._wrap(pyd.median)(iteratee) @t.overload def min_( self: "Chain[t.Mapping[t.Any, 'SupportsRichComparisonT']]", default: Unset = UNSET ) -> "Chain['SupportsRichComparisonT']": ... @t.overload def min_( self: "Chain[t.Mapping[t.Any, 'SupportsRichComparisonT']]", default: T ) -> "Chain[t.Union['SupportsRichComparisonT', T]]": ... @t.overload def min_( self: "Chain[t.Iterable['SupportsRichComparisonT']]", default: Unset = UNSET ) -> "Chain['SupportsRichComparisonT']": ... @t.overload def min_( self: "Chain[t.Iterable['SupportsRichComparisonT']]", default: T ) -> "Chain[t.Union['SupportsRichComparisonT', T]]": ... def min_(self, default=UNSET): return self._wrap(pyd.min_)(default) min = min_ @t.overload def min_by( self: "Chain[t.Mapping[t.Any, 'SupportsRichComparisonT']]", iteratee: None = None, default: Unset = UNSET, ) -> "Chain['SupportsRichComparisonT']": ... @t.overload def min_by( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2], "SupportsRichComparisonT"], default: Unset = UNSET, ) -> "Chain[T2]": ... @t.overload def min_by( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2], "SupportsRichComparisonT"], *, default: T, ) -> "Chain[t.Union[T2, T]]": ... @t.overload def min_by( self: "Chain[t.Mapping[t.Any, 'SupportsRichComparisonT']]", iteratee: None = None, *, default: T, ) -> "Chain[t.Union['SupportsRichComparisonT', T]]": ... @t.overload def min_by( self: "Chain[t.Iterable['SupportsRichComparisonT']]", iteratee: None = None, default: Unset = UNSET, ) -> "Chain['SupportsRichComparisonT']": ... @t.overload def min_by( self: "Chain[t.Iterable[T2]]", iteratee: t.Callable[[T2], "SupportsRichComparisonT"], default: Unset = UNSET, ) -> "Chain[T2]": ... @t.overload def min_by( self: "Chain[t.Iterable[T2]]", iteratee: t.Callable[[T2], "SupportsRichComparisonT"], *, default: T, ) -> "Chain[t.Union[T2, T]]": ... @t.overload def min_by( self: "Chain[t.Iterable['SupportsRichComparisonT']]", iteratee: None = None, *, default: T ) -> "Chain[t.Union['SupportsRichComparisonT', T]]": ... @t.overload def min_by( self: "Chain[t.Iterable[T]]", iteratee: IterateeObjT, default: Unset = UNSET ) -> "Chain[T]": ... @t.overload def min_by( self: "Chain[t.Iterable[T]]", iteratee: IterateeObjT, default: T2 ) -> "Chain[t.Union[T, T2]]": ... def min_by(self, iteratee=None, default=UNSET): return self._wrap(pyd.min_by)(iteratee, default) def moving_mean( self: "Chain[t.Sequence['SupportsAdd[int, t.Any]']]", size: t.SupportsInt ) -> "Chain[t.List[float]]": return self._wrap(pyd.moving_mean)(size) @t.overload def multiply(self: "Chain[SupportsMul[int, T2]]", multiplicand: None) -> "Chain[T2]": ... @t.overload def multiply(self: "Chain[None]", multiplicand: SupportsMul[int, T2]) -> "Chain[T2]": ... @t.overload def multiply(self: "Chain[None]", multiplicand: None) -> "Chain[int]": ... @t.overload def multiply(self: "Chain[SupportsMul[T, T2]]", multiplicand: T) -> "Chain[T2]": ... @t.overload def multiply(self: "Chain[T]", multiplicand: SupportsMul[T, T2]) -> "Chain[T2]": ... def multiply(self, multiplicand): return self._wrap(pyd.multiply)(multiplicand) @t.overload def power(self: "Chain[int]", n: int) -> "Chain[t.Union[int, float]]": ... @t.overload def power(self: "Chain[float]", n: t.Union[int, float]) -> "Chain[float]": ... @t.overload def power(self: "Chain[t.List[int]]", n: int) -> "Chain[t.List[t.Union[int, float]]]": ... @t.overload def power( self: "Chain[t.List[float]]", n: t.List[t.Union[int, float]] ) -> "Chain[t.List[float]]": ... def power(self, n): return self._wrap(pyd.power)(n) @t.overload def round_( self: "Chain[t.List[SupportsRound[NumberT]]]", precision: int = 0 ) -> "Chain[t.List[float]]": ... @t.overload def round_(self: "Chain[SupportsRound[NumberT]]", precision: int = 0) -> "Chain[float]": ... def round_(self, precision=0): return self._wrap(pyd.round_)(precision) round = round_ @t.overload def scale( self: "Chain[t.Iterable['Decimal']]", maximum: "Decimal" ) -> "Chain[t.List['Decimal']]": ... @t.overload def scale( self: "Chain[t.Iterable[NumberNoDecimalT]]", maximum: NumberNoDecimalT ) -> "Chain[t.List[float]]": ... @t.overload def scale(self: "Chain[t.Iterable[NumberT]]", maximum: int = 1) -> "Chain[t.List[float]]": ... def scale(self, maximum: NumberT = 1): return self._wrap(pyd.scale)(maximum) @t.overload def slope( self: "Chain[t.Union[t.Tuple['Decimal', 'Decimal'], t.List['Decimal']]]", point2: t.Union[t.Tuple["Decimal", "Decimal"], t.List["Decimal"]], ) -> "Chain['Decimal']": ... @t.overload def slope( self: "Chain[t.Union[t.Tuple[NumberNoDecimalT, NumberNoDecimalT], t.List[NumberNoDecimalT]]]", point2: t.Union[t.Tuple[NumberNoDecimalT, NumberNoDecimalT], t.List[NumberNoDecimalT]], ) -> "Chain[float]": ... def slope(self, point2): return self._wrap(pyd.slope)(point2) def std_deviation(self: "Chain[t.List[NumberT]]") -> "Chain[float]": return self._wrap(pyd.std_deviation)() @t.overload def subtract(self: "Chain['SupportsSub[T, T2]']", subtrahend: T) -> "Chain[T2]": ... @t.overload def subtract(self: "Chain[T]", subtrahend: "SupportsSub[T, T2]") -> "Chain[T2]": ... def subtract(self, subtrahend): return self._wrap(pyd.subtract)(subtrahend) def transpose(self: "Chain[t.Iterable[t.Iterable[T]]]") -> "Chain[t.List[t.List[T]]]": return self._wrap(pyd.transpose)() @t.overload def variance(self: "Chain[t.Mapping[t.Any, 'SupportsAdd[int, t.Any]']]") -> "Chain[float]": ... @t.overload def variance(self: "Chain[t.Iterable['SupportsAdd[int, t.Any]']]") -> "Chain[float]": ... def variance(self): return self._wrap(pyd.variance)() @t.overload def zscore( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T, t.Dict[T, T2]], NumberT] ) -> "Chain[t.List[float]]": ... @t.overload def zscore( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T], NumberT] ) -> "Chain[t.List[float]]": ... @t.overload def zscore( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2], NumberT] ) -> "Chain[t.List[float]]": ... @t.overload def zscore( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int, t.List[T]], NumberT] ) -> "Chain[t.List[float]]": ... @t.overload def zscore( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int], NumberT] ) -> "Chain[t.List[float]]": ... @t.overload def zscore( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], NumberT] ) -> "Chain[t.List[float]]": ... @t.overload def zscore( self: "Chain[t.Iterable[NumberT]]", iteratee: None = None ) -> "Chain[t.List[float]]": ... def zscore(self, iteratee=None): return self._wrap(pyd.zscore)(iteratee) @t.overload def assign( self: "Chain[t.Mapping[T, T2]]", *sources: t.Mapping[T3, T4] ) -> "Chain[t.Dict[t.Union[T, T3], t.Union[T2, T4]]]": ... @t.overload def assign( self: "Chain[t.Union[t.Tuple[T, ...], t.List[T]]]", *sources: t.Mapping[int, T2] ) -> "Chain[t.List[t.Union[T, T2]]]": ... def assign(self, *sources) -> "Chain[t.Union[t.List[t.Any], t.Dict[t.Any, t.Any]]]": return self._wrap(pyd.assign)(*sources) @t.overload def assign_with( self: "Chain[t.Mapping[T, T2]]", *sources: t.Mapping[T3, t.Any], customizer: t.Callable[[t.Union[T2, None]], T5], ) -> "Chain[t.Dict[t.Union[T, T3], t.Union[T2, T5]]]": ... @t.overload def assign_with( self: "Chain[t.Mapping[T, T2]]", *sources: t.Mapping[T3, T4], customizer: t.Callable[[t.Union[T2, None], T4], T5], ) -> "Chain[t.Dict[t.Union[T, T3], t.Union[T2, T5]]]": ... @t.overload def assign_with( self: "Chain[t.Mapping[T, T2]]", *sources: t.Mapping[T3, T4], customizer: t.Callable[[t.Union[T2, None], T4, T3], T5], ) -> "Chain[t.Dict[t.Union[T, T3], t.Union[T2, T5]]]": ... @t.overload def assign_with( self: "Chain[t.Mapping[T, T2]]", *sources: t.Mapping[T3, T4], customizer: t.Callable[[t.Union[T2, None], T4, T3, t.Dict[T, T2]], T5], ) -> "Chain[t.Dict[t.Union[T, T3], t.Union[T2, T5]]]": ... @t.overload def assign_with( self: "Chain[t.Mapping[T, T2]]", *sources: t.Mapping[T3, T4], customizer: t.Callable[[t.Union[T2, None], T4, T3, t.Dict[T, T2], t.Dict[T3, T4]], T5], ) -> "Chain[t.Dict[t.Union[T, T3], t.Union[T2, T5]]]": ... @t.overload def assign_with( self: "Chain[t.Mapping[T, T2]]", *sources: t.Mapping[T3, T4], customizer: None = None ) -> "Chain[t.Dict[t.Union[T, T3], t.Union[T2, T4]]]": ... def assign_with(self, *sources, customizer=None): return self._wrap(pyd.assign_with)(*sources, customizer=customizer) @t.overload def callables( self: "Chain[t.Mapping['SupportsRichComparisonT', t.Any]]", ) -> "Chain[t.List['SupportsRichComparisonT']]": ... @t.overload def callables(self: "Chain[t.Iterable[T]]") -> "Chain[t.List[T]]": ... def callables(self): return self._wrap(pyd.callables)() def clone(self: "Chain[T]") -> "Chain[T]": return self._wrap(pyd.clone)() @t.overload def clone_with( self: "Chain[t.Mapping[T, T2]]", customizer: t.Callable[[T2, T, t.Mapping[T, T2]], T3] ) -> "Chain[t.Dict[T, t.Union[T2, T3]]]": ... @t.overload def clone_with( self: "Chain[t.Mapping[T, T2]]", customizer: t.Callable[[T2, T], T3] ) -> "Chain[t.Dict[T, t.Union[T2, T3]]]": ... @t.overload def clone_with( self: "Chain[t.Mapping[T, T2]]", customizer: t.Callable[[T2], T3] ) -> "Chain[t.Dict[T, t.Union[T2, T3]]]": ... @t.overload def clone_with( self: "Chain[t.List[T]]", customizer: t.Callable[[T, int, t.List[T]], T2] ) -> "Chain[t.List[t.Union[T, T2]]]": ... @t.overload def clone_with( self: "Chain[t.List[T]]", customizer: t.Callable[[T, int], T2] ) -> "Chain[t.List[t.Union[T, T2]]]": ... @t.overload def clone_with( self: "Chain[t.List[T]]", customizer: t.Callable[[T], T2] ) -> "Chain[t.List[t.Union[T, T2]]]": ... @t.overload def clone_with(self: "Chain[T]", customizer: None = None) -> "Chain[T]": ... @t.overload def clone_with(self: "Chain[t.Any]", customizer: t.Callable[..., t.Any]) -> "Chain[t.Any]": ... def clone_with(self, customizer=None): return self._wrap(pyd.clone_with)(customizer) def clone_deep(self: "Chain[T]") -> "Chain[T]": return self._wrap(pyd.clone_deep)() @t.overload def clone_deep_with( self: "Chain[t.Mapping[T, T2]]", customizer: t.Callable[[T2, T, t.Mapping[T, T2]], T3] ) -> "Chain[t.Dict[T, t.Union[T2, T3]]]": ... @t.overload def clone_deep_with( self: "Chain[t.Mapping[T, T2]]", customizer: t.Callable[[T2, T], T3] ) -> "Chain[t.Dict[T, t.Union[T2, T3]]]": ... @t.overload def clone_deep_with( self: "Chain[t.Mapping[T, T2]]", customizer: t.Callable[[T2], T3] ) -> "Chain[t.Dict[T, t.Union[T2, T3]]]": ... @t.overload def clone_deep_with( self: "Chain[t.List[T]]", customizer: t.Callable[[T, int, t.List[T]], T2] ) -> "Chain[t.List[t.Union[T, T2]]]": ... @t.overload def clone_deep_with( self: "Chain[t.List[T]]", customizer: t.Callable[[T, int], T2] ) -> "Chain[t.List[t.Union[T, T2]]]": ... @t.overload def clone_deep_with( self: "Chain[t.List[T]]", customizer: t.Callable[[T], T2] ) -> "Chain[t.List[t.Union[T, T2]]]": ... @t.overload def clone_deep_with(self: "Chain[T]", customizer: None = None) -> "Chain[T]": ... @t.overload def clone_deep_with( self: "Chain[t.Any]", customizer: t.Callable[..., t.Any] ) -> "Chain[t.Any]": ... def clone_deep_with(self, customizer=None): return self._wrap(pyd.clone_deep_with)(customizer) def defaults( self: "Chain[t.Dict[T, T2]]", *sources: t.Dict[T3, T4] ) -> "Chain[t.Dict[t.Union[T, T3], t.Union[T2, T4]]]": return self._wrap(pyd.defaults)(*sources) def defaults_deep( self: "Chain[t.Dict[T, T2]]", *sources: t.Dict[T3, T4] ) -> "Chain[t.Dict[t.Union[T, T3], t.Union[T2, T4]]]": return self._wrap(pyd.defaults_deep)(*sources) @t.overload def find_key( self: "Chain[t.Mapping[T, T2]]", predicate: t.Callable[[T2, T, t.Dict[T, T2]], t.Any] ) -> "Chain[t.Union[T, None]]": ... @t.overload def find_key( self: "Chain[t.Mapping[T, T2]]", predicate: t.Callable[[T2, T], t.Any] ) -> "Chain[t.Union[T, None]]": ... @t.overload def find_key( self: "Chain[t.Mapping[T, T2]]", predicate: t.Callable[[T2], t.Any] ) -> "Chain[t.Union[T, None]]": ... @t.overload def find_key( self: "Chain[t.Mapping[T, t.Any]]", predicate: None = None ) -> "Chain[t.Union[T, None]]": ... @t.overload def find_key( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int, t.List[T]], t.Any] ) -> "Chain[t.Union[int, None]]": ... @t.overload def find_key( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int], t.Any] ) -> "Chain[t.Union[int, None]]": ... @t.overload def find_key( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], t.Any] ) -> "Chain[t.Union[int, None]]": ... @t.overload def find_key( self: "Chain[t.Iterable[t.Any]]", iteratee: None = None ) -> "Chain[t.Union[int, None]]": ... def find_key(self, predicate=None): return self._wrap(pyd.find_key)(predicate) @t.overload def find_last_key( self: "Chain[t.Mapping[T, T2]]", predicate: t.Callable[[T2, T, t.Dict[T, T2]], t.Any] ) -> "Chain[t.Union[T, None]]": ... @t.overload def find_last_key( self: "Chain[t.Mapping[T, T2]]", predicate: t.Callable[[T2, T], t.Any] ) -> "Chain[t.Union[T, None]]": ... @t.overload def find_last_key( self: "Chain[t.Mapping[T, T2]]", predicate: t.Callable[[T2], t.Any] ) -> "Chain[t.Union[T, None]]": ... @t.overload def find_last_key( self: "Chain[t.Mapping[T, t.Any]]", predicate: None = None ) -> "Chain[t.Union[T, None]]": ... @t.overload def find_last_key( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int, t.List[T]], t.Any] ) -> "Chain[t.Union[int, None]]": ... @t.overload def find_last_key( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int], t.Any] ) -> "Chain[t.Union[int, None]]": ... @t.overload def find_last_key( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], t.Any] ) -> "Chain[t.Union[int, None]]": ... @t.overload def find_last_key( self: "Chain[t.Iterable[t.Any]]", iteratee: None = None ) -> "Chain[t.Union[int, None]]": ... def find_last_key(self, predicate=None): return self._wrap(pyd.find_last_key)(predicate) @t.overload def for_in( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T, t.Dict[T, T2]], t.Any] ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def for_in( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T], t.Any] ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def for_in( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2], t.Any] ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def for_in( self: "Chain[t.Mapping[T, T2]]", iteratee: None = None ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def for_in( self: "Chain[t.Sequence[T]]", iteratee: t.Callable[[T, int, t.List[T]], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def for_in( self: "Chain[t.Sequence[T]]", iteratee: t.Callable[[T, int], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def for_in( self: "Chain[t.Sequence[T]]", iteratee: t.Callable[[T], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def for_in(self: "Chain[t.Sequence[T]]", iteratee: None = None) -> "Chain[t.List[T]]": ... def for_in(self, iteratee=None): return self._wrap(pyd.for_in)(iteratee) @t.overload def for_in_right( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T, t.Dict[T, T2]], t.Any] ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def for_in_right( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T], t.Any] ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def for_in_right( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2], t.Any] ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def for_in_right( self: "Chain[t.Mapping[T, T2]]", iteratee: None = None ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def for_in_right( self: "Chain[t.Sequence[T]]", iteratee: t.Callable[[T, int, t.List[T]], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def for_in_right( self: "Chain[t.Sequence[T]]", iteratee: t.Callable[[T, int], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def for_in_right( self: "Chain[t.Sequence[T]]", iteratee: t.Callable[[T], t.Any] ) -> "Chain[t.List[T]]": ... @t.overload def for_in_right(self: "Chain[t.Sequence[T]]", iteratee: None = None) -> "Chain[t.List[T]]": ... def for_in_right(self, iteratee=None): return self._wrap(pyd.for_in_right)(iteratee) @t.overload def get(self: "Chain[t.List[T]]", path: int, default: T2) -> "Chain[t.Union[T, T2]]": ... @t.overload def get( self: "Chain[t.List[T]]", path: int, default: None = None ) -> "Chain[t.Union[T, None]]": ... @t.overload def get(self: "Chain[t.Any]", path: PathT, default: t.Any = None) -> "Chain[t.Any]": ... def get(self: "Chain[t.Any]", path: PathT, default: t.Any = None) -> "Chain[t.Any]": return self._wrap(pyd.get)(path, default) def has(self: "Chain[t.Any]", path: PathT) -> "Chain[bool]": return self._wrap(pyd.has)(path) @t.overload def invert(self: "Chain[t.Mapping[T, T2]]") -> "Chain[t.Dict[T2, T]]": ... @t.overload def invert(self: "Chain[t.Union[t.Iterator[T], t.Sequence[T]]]") -> "Chain[t.Dict[T, int]]": ... def invert(self): return self._wrap(pyd.invert)() @t.overload def invert_by( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2], T3] ) -> "Chain[t.Dict[T3, t.List[T]]]": ... @t.overload def invert_by( self: "Chain[t.Mapping[T, T2]]", iteratee: None = None ) -> "Chain[t.Dict[T2, t.List[T]]]": ... @t.overload def invert_by( self: "Chain[t.Union[t.Iterator[T], t.Sequence[T]]]", iteratee: t.Callable[[T], T2] ) -> "Chain[t.Dict[T2, t.List[int]]]": ... @t.overload def invert_by( self: "Chain[t.Union[t.Iterator[T], t.Sequence[T]]]", iteratee: None = None ) -> "Chain[t.Dict[T, t.List[int]]]": ... def invert_by(self, iteratee=None): return self._wrap(pyd.invert_by)(iteratee) def invoke(self: "Chain[t.Any]", path: PathT, *args: t.Any, **kwargs: t.Any) -> "Chain[t.Any]": return self._wrap(pyd.invoke)(path, *args, **kwargs) @t.overload def keys(self: "Chain[t.Iterable[T]]") -> "Chain[t.List[T]]": ... @t.overload def keys(self: "Chain[t.Any]") -> "Chain[t.List[t.Any]]": ... def keys(self): return self._wrap(pyd.keys)() @t.overload def map_keys( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T, t.Dict[T, T2]], T3] ) -> "Chain[t.Dict[T3, T2]]": ... @t.overload def map_keys( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T], T3] ) -> "Chain[t.Dict[T3, T2]]": ... @t.overload def map_keys( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T2], T3] ) -> "Chain[t.Dict[T3, T2]]": ... @t.overload def map_keys( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int, t.List[T]], T2] ) -> "Chain[t.Dict[T2, T]]": ... @t.overload def map_keys( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int], T2] ) -> "Chain[t.Dict[T2, T]]": ... @t.overload def map_keys( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], T2] ) -> "Chain[t.Dict[T2, T]]": ... @t.overload def map_keys( self: "Chain[t.Iterable[t.Any]]", iteratee: t.Union[IterateeObjT, None] = None ) -> "Chain[t.Dict[t.Any, t.Any]]": ... def map_keys(self, iteratee=None): return self._wrap(pyd.map_keys)(iteratee) @t.overload def map_values( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T, t.Dict[T, T2]], T3] ) -> "Chain[t.Dict[T, T3]]": ... @t.overload def map_values( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T], T3] ) -> "Chain[t.Dict[T, T3]]": ... @t.overload def map_values( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2], T3] ) -> "Chain[t.Dict[T, T3]]": ... @t.overload def map_values( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int, t.List[T]], T2] ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def map_values( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T, int], T2] ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def map_values( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T], T2] ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def map_values( self: "Chain[t.Iterable[t.Any]]", iteratee: t.Union[IterateeObjT, None] = None ) -> "Chain[t.Dict[t.Any, t.Any]]": ... def map_values(self, iteratee=None): return self._wrap(pyd.map_values)(iteratee) def map_values_deep( self: "Chain[t.Iterable[t.Any]]", iteratee: t.Union[t.Callable[..., t.Any], None] = None, property_path: t.Any = UNSET, ) -> "Chain[t.Any]": return self._wrap(pyd.map_values_deep)(iteratee, property_path) def apply(self: "Chain[T]", func: t.Callable[[T], T2]) -> "Chain[T2]": return self._wrap(pyd.apply)(func) def apply_if( self: "Chain[T]", func: t.Callable[[T], T2], predicate: t.Callable[[T], bool] ) -> "Chain[t.Union[T, T2]]": return self._wrap(pyd.apply_if)(func, predicate) def apply_if_not_none( self: "Chain[t.Optional[T]]", func: t.Callable[[T], T2] ) -> "Chain[t.Optional[T2]]": return self._wrap(pyd.apply_if_not_none)(func) @t.overload def apply_catch( self: "Chain[T]", func: t.Callable[[T], T2], exceptions: t.Iterable[t.Type[Exception]], default: T3, ) -> "Chain[t.Union[T2, T3]]": ... @t.overload def apply_catch( self: "Chain[T]", func: t.Callable[[T], T2], exceptions: t.Iterable[t.Type[Exception]], default: Unset = UNSET, ) -> "Chain[t.Union[T, T2]]": ... def apply_catch(self, func, exceptions, default=UNSET): return self._wrap(pyd.apply_catch)(func, exceptions, default) @t.overload def merge( self: "Chain[t.Mapping[T, T2]]", *sources: t.Mapping[T3, T4] ) -> "Chain[t.Dict[t.Union[T, T3], t.Union[T2, T4]]]": ... @t.overload def merge( self: "Chain[t.Sequence[T]]", *sources: t.Sequence[T2] ) -> "Chain[t.List[t.Union[T, T2]]]": ... def merge(self, *sources): return self._wrap(pyd.merge)(*sources) def merge_with(self: "Chain[t.Any]", *sources: t.Any, **kwargs: t.Any) -> "Chain[t.Any]": return self._wrap(pyd.merge_with)(*sources, **kwargs) @t.overload def omit(self: "Chain[t.Mapping[T, T2]]", *properties: PathT) -> "Chain[t.Dict[T, T2]]": ... @t.overload def omit( self: "Chain[t.Union[t.Iterator[T], t.Sequence[T]]]", *properties: PathT ) -> "Chain[t.Dict[int, T]]": ... @t.overload def omit(self: "Chain[t.Any]", *properties: PathT) -> "Chain[t.Dict[t.Any, t.Any]]": ... def omit(self, *properties): return self._wrap(pyd.omit)(*properties) @t.overload def omit_by( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T], t.Any] ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def omit_by( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2], t.Any] ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def omit_by(self: "Chain[t.Dict[T, T2]]", iteratee: None = None) -> "Chain[t.Dict[T, T2]]": ... @t.overload def omit_by( self: "Chain[t.Union[t.Iterator[T], t.Sequence[T]]]", iteratee: t.Callable[[T, int], t.Any] ) -> "Chain[t.Dict[int, T]]": ... @t.overload def omit_by( self: "Chain[t.Union[t.Iterator[T], t.Sequence[T]]]", iteratee: t.Callable[[T], t.Any] ) -> "Chain[t.Dict[int, T]]": ... @t.overload def omit_by(self: "Chain[t.List[T]]", iteratee: None = None) -> "Chain[t.Dict[int, T]]": ... @t.overload def omit_by( self: "Chain[t.Any]", iteratee: t.Union[t.Callable[..., t.Any], None] = None ) -> "Chain[t.Dict[t.Any, t.Any]]": ... def omit_by(self, iteratee=None): return self._wrap(pyd.omit_by)(iteratee) def parse_int( self: "Chain[t.Any]", radix: t.Union[int, None] = None ) -> "Chain[t.Union[int, None]]": return self._wrap(pyd.parse_int)(radix) @t.overload def pick(self: "Chain[t.Mapping[T, T2]]", *properties: PathT) -> "Chain[t.Dict[T, T2]]": ... @t.overload def pick( self: "Chain[t.Union[t.Tuple[T, ...], t.List[T]]]", *properties: PathT ) -> "Chain[t.Dict[int, T]]": ... @t.overload def pick(self: "Chain[t.Any]", *properties: PathT) -> "Chain[t.Dict[t.Any, t.Any]]": ... def pick(self, *properties): return self._wrap(pyd.pick)(*properties) @t.overload def pick_by( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2], t.Any] ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def pick_by( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T2, T], t.Any] ) -> "Chain[t.Dict[T, T2]]": ... @t.overload def pick_by(self: "Chain[t.Dict[T, T2]]", iteratee: None = None) -> "Chain[t.Dict[T, T2]]": ... @t.overload def pick_by( self: "Chain[t.Union[t.Tuple[T, ...], t.List[T]]]", iteratee: t.Callable[[T, int], t.Any] ) -> "Chain[t.Dict[int, T]]": ... @t.overload def pick_by( self: "Chain[t.Union[t.Tuple[T, ...], t.List[T]]]", iteratee: t.Callable[[T], t.Any] ) -> "Chain[t.Dict[int, T]]": ... @t.overload def pick_by( self: "Chain[t.Union[t.Tuple[T, ...], t.List[T]]]", iteratee: None = None ) -> "Chain[t.Dict[int, T]]": ... @t.overload def pick_by( self: "Chain[t.Any]", iteratee: t.Union[t.Callable[..., t.Any], None] = None ) -> "Chain[t.Dict[t.Any, t.Any]]": ... def pick_by(self, iteratee=None): return self._wrap(pyd.pick_by)(iteratee) def rename_keys( self: "Chain[t.Dict[T, T2]]", key_map: t.Dict[t.Any, T3] ) -> "Chain[t.Dict[t.Union[T, T3], T2]]": return self._wrap(pyd.rename_keys)(key_map) def set_(self: "Chain[T]", path: PathT, value: t.Any) -> "Chain[T]": return self._wrap(pyd.set_)(path, value) set = set_ def set_with( self: "Chain[T]", path: PathT, value: t.Any, customizer: t.Union[t.Callable[..., t.Any], None] = None, ) -> "Chain[T]": return self._wrap(pyd.set_with)(path, value, customizer) def to_boolean( self: "Chain[t.Any]", true_values: t.Tuple[str, ...] = ("true", "1"), false_values: t.Tuple[str, ...] = ("false", "0"), ) -> "Chain[t.Union[bool, None]]": return self._wrap(pyd.to_boolean)(true_values, false_values) @t.overload def to_dict(self: "Chain[t.Mapping[T, T2]]") -> "Chain[t.Dict[T, T2]]": ... @t.overload def to_dict( self: "Chain[t.Union[t.Iterator[T], t.Sequence[T]]]", ) -> "Chain[t.Dict[int, T]]": ... @t.overload def to_dict(self: "Chain[t.Any]") -> "Chain[t.Dict[t.Any, t.Any]]": ... def to_dict(self): return self._wrap(pyd.to_dict)() def to_integer(self: "Chain[t.Any]") -> "Chain[int]": return self._wrap(pyd.to_integer)() @t.overload def to_list( self: "Chain[t.Dict[t.Any, T]]", split_strings: bool = True ) -> "Chain[t.List[T]]": ... @t.overload def to_list(self: "Chain[t.Iterable[T]]", split_strings: bool = True) -> "Chain[t.List[T]]": ... @t.overload def to_list(self: "Chain[T]", split_strings: bool = True) -> "Chain[t.List[T]]": ... def to_list(self, split_strings=True): return self._wrap(pyd.to_list)(split_strings) def to_number(self: "Chain[t.Any]", precision: int = 0) -> "Chain[t.Union[float, None]]": return self._wrap(pyd.to_number)(precision) @t.overload def to_pairs(self: "Chain[t.Mapping[T, T2]]") -> "Chain[t.List[t.Tuple[T, T2]]]": ... @t.overload def to_pairs( self: "Chain[t.Union[t.Iterator[T], t.Sequence[T]]]", ) -> "Chain[t.List[t.Tuple[int, T]]]": ... @t.overload def to_pairs(self: "Chain[t.Any]") -> "Chain[t.List[t.Any]]": ... def to_pairs(self): return self._wrap(pyd.to_pairs)() @t.overload def transform( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T3, T2, T, t.Dict[T, T2]], t.Any], accumulator: T3, ) -> "Chain[T3]": ... @t.overload def transform( self: "Chain[t.Mapping[T, T2]]", iteratee: t.Callable[[T3, T2, T], t.Any], accumulator: T3 ) -> "Chain[T3]": ... @t.overload def transform( self: "Chain[t.Mapping[t.Any, T2]]", iteratee: t.Callable[[T3, T2], t.Any], accumulator: T3 ) -> "Chain[T3]": ... @t.overload def transform( self: "Chain[t.Mapping[t.Any, t.Any]]", iteratee: t.Callable[[T3], t.Any], accumulator: T3 ) -> "Chain[T3]": ... @t.overload def transform( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T3, T, int, t.List[T]], t.Any], accumulator: T3, ) -> "Chain[T3]": ... @t.overload def transform( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T3, T, int], t.Any], accumulator: T3 ) -> "Chain[T3]": ... @t.overload def transform( self: "Chain[t.Iterable[T]]", iteratee: t.Callable[[T3, T], t.Any], accumulator: T3 ) -> "Chain[T3]": ... @t.overload def transform( self: "Chain[t.Iterable[t.Any]]", iteratee: t.Callable[[T3], t.Any], accumulator: T3 ) -> "Chain[T3]": ... @t.overload def transform( self: "Chain[t.Any]", iteratee: t.Any = None, accumulator: t.Any = None ) -> "Chain[t.Any]": ... def transform(self, iteratee=None, accumulator=None): return self._wrap(pyd.transform)(iteratee, accumulator) @t.overload def update( self: "Chain[t.Dict[t.Any, T2]]", path: PathT, updater: t.Callable[[T2], t.Any] ) -> "Chain[t.Dict[t.Any, t.Any]]": ... @t.overload def update( self: "Chain[t.List[T]]", path: PathT, updater: t.Callable[[T], t.Any] ) -> "Chain[t.List[t.Any]]": ... @t.overload def update(self: "Chain[T]", path: PathT, updater: t.Callable[..., t.Any]) -> "Chain[T]": ... def update(self, path, updater): return self._wrap(pyd.update)(path, updater) @t.overload def update_with( self: "Chain[t.Dict[t.Any, T2]]", path: PathT, updater: t.Callable[[T2], t.Any], customizer: t.Union[t.Callable[..., t.Any], None], ) -> "Chain[t.Dict[t.Any, t.Any]]": ... @t.overload def update_with( self: "Chain[t.List[T]]", path: PathT, updater: t.Callable[[T], t.Any], customizer: t.Union[t.Callable[..., t.Any], None] = None, ) -> "Chain[t.List[t.Any]]": ... @t.overload def update_with( self: "Chain[T]", path: PathT, updater: t.Callable[..., t.Any], customizer: t.Union[t.Callable[..., t.Any], None] = None, ) -> "Chain[T]": ... def update_with(self, path, updater, customizer=None): return self._wrap(pyd.update_with)(path, updater, customizer) def unset( self: "Chain[t.Union[t.List[t.Any], t.Dict[t.Any, t.Any]]]", path: PathT ) -> "Chain[bool]": return self._wrap(pyd.unset)(path) @t.overload def values(self: "Chain[t.Mapping[t.Any, T2]]") -> "Chain[t.List[T2]]": ... @t.overload def values(self: "Chain[t.Iterable[T]]") -> "Chain[t.List[T]]": ... @t.overload def values(self: "Chain[t.Any]") -> "Chain[t.List[t.Any]]": ... def values(self): return self._wrap(pyd.values)() def eq(self: "Chain[t.Any]", other: t.Any) -> "Chain[bool]": return self._wrap(pyd.eq)(other) def eq_cmp(self: "Chain[T]") -> "Chain[t.Callable[[T], bool]]": return self._wrap(pyd.eq_cmp)() def gt(self: "Chain['SupportsDunderGT[T]']", other: T) -> "Chain[bool]": return self._wrap(pyd.gt)(other) def gt_cmp(self: "Chain[T]") -> "Chain[t.Callable[['SupportsDunderGT[T]'], bool]]": return self._wrap(pyd.gt_cmp)() def gte(self: "Chain['SupportsDunderGE[T]']", other: T) -> "Chain[bool]": return self._wrap(pyd.gte)(other) def gte_cmp(self: "Chain[T]") -> "Chain[t.Callable[['SupportsDunderGE[T]'], bool]]": return self._wrap(pyd.gte_cmp)() def lt(self: "Chain['SupportsDunderLT[T]']", other: T) -> "Chain[bool]": return self._wrap(pyd.lt)(other) def lt_cmp(self: "Chain[T]") -> "Chain[t.Callable[['SupportsDunderLT[T]'], bool]]": return self._wrap(pyd.lt_cmp)() def lte(self: "Chain['SupportsDunderLE[T]']", other: T) -> "Chain[bool]": return self._wrap(pyd.lte)(other) def lte_cmp(self: "Chain[T]") -> "Chain[t.Callable[['SupportsDunderLE[T]'], bool]]": return self._wrap(pyd.lte_cmp)() def in_range(self: "Chain[t.Any]", start: t.Any = 0, end: t.Any = None) -> "Chain[bool]": return self._wrap(pyd.in_range)(start, end) def in_range_cmp(self: "Chain[t.Any]", end: t.Any = None) -> "Chain[t.Callable[[t.Any], bool]]": return self._wrap(pyd.in_range_cmp)(end) def is_associative(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_associative)() def is_blank(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_blank)() def is_boolean(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_boolean)() def is_builtin(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_builtin)() def is_date(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_date)() def is_decreasing( self: "Chain[t.Union['SupportsRichComparison', t.List['SupportsRichComparison']]]", ) -> "Chain[bool]": return self._wrap(pyd.is_decreasing)() def is_dict(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_dict)() def is_empty(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_empty)() def is_equal(self: "Chain[t.Any]", other: t.Any) -> "Chain[bool]": return self._wrap(pyd.is_equal)(other) def is_equal_cmp(self: "Chain[T]") -> "Chain[t.Callable[[T], bool]]": return self._wrap(pyd.is_equal_cmp)() @t.overload def is_equal_with( self: "Chain[T]", other: T2, customizer: t.Callable[[T, T2], T3] ) -> "Chain[T3]": ... @t.overload def is_equal_with( self: "Chain[t.Any]", other: t.Any, customizer: t.Callable[..., t.Any] ) -> "Chain[bool]": ... @t.overload def is_equal_with(self: "Chain[t.Any]", other: t.Any, customizer: None) -> "Chain[bool]": ... def is_equal_with(self, other, customizer): return self._wrap(pyd.is_equal_with)(other, customizer) def is_equal_with_cmp( self: "Chain[T]", customizer: t.Callable[[T, T], T3] ) -> "Chain[t.Callable[[T], T3]]": return self._wrap(pyd.is_equal_with_cmp)(customizer) def is_error(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_error)() def is_even(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_even)() def is_float(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_float)() def is_function(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_function)() def is_increasing( self: "Chain[t.Union['SupportsRichComparison', t.List['SupportsRichComparison']]]", ) -> "Chain[bool]": return self._wrap(pyd.is_increasing)() def is_indexed(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_indexed)() def is_instance_of( self: "Chain[t.Any]", types: t.Union[type, t.Tuple[type, ...]] ) -> "Chain[bool]": return self._wrap(pyd.is_instance_of)(types) def is_instance_of_cmp( self: "Chain[t.Union[type, t.Tuple[type, ...]]]", ) -> "Chain[t.Callable[[t.Any], bool]]": return self._wrap(pyd.is_instance_of_cmp)() def is_integer(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_integer)() def is_iterable(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_iterable)() def is_json(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_json)() def is_list(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_list)() def is_match(self: "Chain[t.Any]", source: t.Any) -> "Chain[bool]": return self._wrap(pyd.is_match)(source) def is_match_cmp(self: "Chain[t.Any]") -> "Chain[t.Callable[[t.Any], bool]]": return self._wrap(pyd.is_match_cmp)() def is_match_with( self: "Chain[t.Any]", source: t.Any, customizer: t.Any = None, _key: t.Any = UNSET, _obj: t.Any = UNSET, _source: t.Any = UNSET, ) -> "Chain[bool]": return self._wrap(pyd.is_match_with)(source, customizer, _key, _obj, _source) def is_match_with_cmp( self: "Chain[t.Any]", customizer: t.Any = None ) -> "Chain[t.Callable[[t.Any], bool]]": return self._wrap(pyd.is_match_with_cmp)(customizer) def is_monotone( self: "Chain[t.Union[T, t.List[T]]]", op: t.Callable[[T, T], t.Any] ) -> "Chain[bool]": return self._wrap(pyd.is_monotone)(op) def is_monotone_cmp( self: "Chain[t.Callable[[T, T], t.Any]]", ) -> "Chain[t.Callable[[t.Union[T, t.List[T]]], bool]]": return self._wrap(pyd.is_monotone_cmp)() def is_nan(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_nan)() def is_negative(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_negative)() def is_none(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_none)() def is_number(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_number)() def is_object(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_object)() def is_odd(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_odd)() def is_positive(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_positive)() def is_reg_exp(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_reg_exp)() def is_set(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_set)() def is_strictly_decreasing( self: "Chain[t.Union['SupportsRichComparison', t.List['SupportsRichComparison']]]", ) -> "Chain[bool]": return self._wrap(pyd.is_strictly_decreasing)() def is_strictly_increasing( self: "Chain[t.Union['SupportsRichComparison', t.List['SupportsRichComparison']]]", ) -> "Chain[bool]": return self._wrap(pyd.is_strictly_increasing)() def is_string(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_string)() def is_tuple(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_tuple)() def is_zero(self: "Chain[t.Any]") -> "Chain[bool]": return self._wrap(pyd.is_zero)() def camel_case(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.camel_case)() def capitalize(self: "Chain[t.Any]", strict: bool = True) -> "Chain[str]": return self._wrap(pyd.capitalize)(strict) def chars(self: "Chain[t.Any]") -> "Chain[t.List[str]]": return self._wrap(pyd.chars)() def chop(self: "Chain[t.Any]", step: int) -> "Chain[t.List[str]]": return self._wrap(pyd.chop)(step) def chop_right(self: "Chain[t.Any]", step: int) -> "Chain[t.List[str]]": return self._wrap(pyd.chop_right)(step) def clean(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.clean)() def count_substr(self: "Chain[t.Any]", subtext: t.Any) -> "Chain[int]": return self._wrap(pyd.count_substr)(subtext) def deburr(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.deburr)() def decapitalize(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.decapitalize)() def ends_with( self: "Chain[t.Any]", target: t.Any, position: t.Union[int, None] = None ) -> "Chain[bool]": return self._wrap(pyd.ends_with)(target, position) def ensure_ends_with(self: "Chain[t.Any]", suffix: t.Any) -> "Chain[str]": return self._wrap(pyd.ensure_ends_with)(suffix) def ensure_starts_with(self: "Chain[t.Any]", prefix: t.Any) -> "Chain[str]": return self._wrap(pyd.ensure_starts_with)(prefix) def escape(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.escape)() def escape_reg_exp(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.escape_reg_exp)() def has_substr(self: "Chain[t.Any]", subtext: t.Any) -> "Chain[bool]": return self._wrap(pyd.has_substr)(subtext) def human_case(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.human_case)() def insert_substr(self: "Chain[t.Any]", index: int, subtext: t.Any) -> "Chain[str]": return self._wrap(pyd.insert_substr)(index, subtext) def join(self: "Chain[t.Iterable[t.Any]]", separator: t.Any = "") -> "Chain[str]": return self._wrap(pyd.join)(separator) def kebab_case(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.kebab_case)() def lines(self: "Chain[t.Any]") -> "Chain[t.List[str]]": return self._wrap(pyd.lines)() def lower_case(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.lower_case)() def lower_first(self: "Chain[str]") -> "Chain[str]": return self._wrap(pyd.lower_first)() def number_format( self: "Chain[NumberT]", scale: int = 0, decimal_separator: str = ".", order_separator: str = ",", ) -> "Chain[str]": return self._wrap(pyd.number_format)(scale, decimal_separator, order_separator) def pad(self: "Chain[t.Any]", length: int, chars: t.Any = " ") -> "Chain[str]": return self._wrap(pyd.pad)(length, chars) def pad_end(self: "Chain[t.Any]", length: int, chars: t.Any = " ") -> "Chain[str]": return self._wrap(pyd.pad_end)(length, chars) def pad_start(self: "Chain[t.Any]", length: int, chars: t.Any = " ") -> "Chain[str]": return self._wrap(pyd.pad_start)(length, chars) def pascal_case(self: "Chain[t.Any]", strict: bool = True) -> "Chain[str]": return self._wrap(pyd.pascal_case)(strict) def predecessor(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.predecessor)() def prune(self: "Chain[t.Any]", length: int = 0, omission: str = "...") -> "Chain[str]": return self._wrap(pyd.prune)(length, omission) def quote(self: "Chain[t.Any]", quote_char: t.Any = '"') -> "Chain[str]": return self._wrap(pyd.quote)(quote_char) def reg_exp_js_match(self: "Chain[t.Any]", reg_exp: str) -> "Chain[t.List[str]]": return self._wrap(pyd.reg_exp_js_match)(reg_exp) def reg_exp_js_replace( self: "Chain[t.Any]", reg_exp: str, repl: t.Union[str, t.Callable[[re.Match[str]], str]] ) -> "Chain[str]": return self._wrap(pyd.reg_exp_js_replace)(reg_exp, repl) def reg_exp_replace( self: "Chain[t.Any]", pattern: t.Any, repl: t.Union[str, t.Callable[[re.Match[str]], str]], ignore_case: bool = False, count: int = 0, ) -> "Chain[str]": return self._wrap(pyd.reg_exp_replace)(pattern, repl, ignore_case, count) def repeat(self: "Chain[t.Any]", n: t.SupportsInt = 0) -> "Chain[str]": return self._wrap(pyd.repeat)(n) def replace( self: "Chain[t.Any]", pattern: t.Any, repl: t.Union[str, t.Callable[[re.Match[str]], str]], ignore_case: bool = False, count: int = 0, escape: bool = True, from_start: bool = False, from_end: bool = False, ) -> "Chain[str]": return self._wrap(pyd.replace)( pattern, repl, ignore_case, count, escape, from_start, from_end ) def replace_end( self: "Chain[t.Any]", pattern: t.Any, repl: t.Union[str, t.Callable[[re.Match[str]], str]], ignore_case: bool = False, escape: bool = True, ) -> "Chain[str]": return self._wrap(pyd.replace_end)(pattern, repl, ignore_case, escape) def replace_start( self: "Chain[t.Any]", pattern: t.Any, repl: t.Union[str, t.Callable[[re.Match[str]], str]], ignore_case: bool = False, escape: bool = True, ) -> "Chain[str]": return self._wrap(pyd.replace_start)(pattern, repl, ignore_case, escape) def separator_case(self: "Chain[t.Any]", separator: str) -> "Chain[str]": return self._wrap(pyd.separator_case)(separator) def series_phrase( self: "Chain[t.List[t.Any]]", separator: t.Any = ", ", last_separator: t.Any = " and ", serial: bool = False, ) -> "Chain[str]": return self._wrap(pyd.series_phrase)(separator, last_separator, serial) def series_phrase_serial( self: "Chain[t.List[t.Any]]", separator: t.Any = ", ", last_separator: t.Any = " and " ) -> "Chain[str]": return self._wrap(pyd.series_phrase_serial)(separator, last_separator) def slugify(self: "Chain[t.Any]", separator: str = "-") -> "Chain[str]": return self._wrap(pyd.slugify)(separator) def snake_case(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.snake_case)() def split( self: "Chain[t.Any]", separator: t.Union[str, Unset, None] = UNSET ) -> "Chain[t.List[str]]": return self._wrap(pyd.split)(separator) def start_case(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.start_case)() def starts_with(self: "Chain[t.Any]", target: t.Any, position: int = 0) -> "Chain[bool]": return self._wrap(pyd.starts_with)(target, position) def strip_tags(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.strip_tags)() def substr_left(self: "Chain[t.Any]", subtext: str) -> "Chain[str]": return self._wrap(pyd.substr_left)(subtext) def substr_left_end(self: "Chain[t.Any]", subtext: str) -> "Chain[str]": return self._wrap(pyd.substr_left_end)(subtext) def substr_right(self: "Chain[t.Any]", subtext: str) -> "Chain[str]": return self._wrap(pyd.substr_right)(subtext) def substr_right_end(self: "Chain[t.Any]", subtext: str) -> "Chain[str]": return self._wrap(pyd.substr_right_end)(subtext) def successor(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.successor)() def surround(self: "Chain[t.Any]", wrapper: t.Any) -> "Chain[str]": return self._wrap(pyd.surround)(wrapper) def swap_case(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.swap_case)() def title_case(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.title_case)() def to_lower(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.to_lower)() def to_upper(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.to_upper)() def trim(self: "Chain[t.Any]", chars: t.Union[str, None] = None) -> "Chain[str]": return self._wrap(pyd.trim)(chars) def trim_end(self: "Chain[t.Any]", chars: t.Union[str, None] = None) -> "Chain[str]": return self._wrap(pyd.trim_end)(chars) def trim_start(self: "Chain[t.Any]", chars: t.Union[str, None] = None) -> "Chain[str]": return self._wrap(pyd.trim_start)(chars) def truncate( self: "Chain[t.Any]", length: int = 30, omission: str = "...", separator: t.Union[str, re.Pattern[str], None] = None, ) -> "Chain[str]": return self._wrap(pyd.truncate)(length, omission, separator) def unescape(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.unescape)() def upper_case(self: "Chain[t.Any]") -> "Chain[str]": return self._wrap(pyd.upper_case)() def upper_first(self: "Chain[str]") -> "Chain[str]": return self._wrap(pyd.upper_first)() def unquote(self: "Chain[t.Any]", quote_char: t.Any = '"') -> "Chain[str]": return self._wrap(pyd.unquote)(quote_char) def url(self: "Chain[t.Any]", *paths: t.Any, **params: t.Any) -> "Chain[str]": return self._wrap(pyd.url)(*paths, **params) def words(self: "Chain[t.Any]", pattern: t.Union[str, None] = None) -> "Chain[t.List[str]]": return self._wrap(pyd.words)(pattern) def attempt( self: "Chain[t.Callable[P, T]]", *args: "P.args", **kwargs: "P.kwargs" ) -> "Chain[t.Union[T, Exception]]": return self._wrap(pyd.attempt)(*args, **kwargs) @t.overload def cond( self: "Chain[t.List[t.Tuple[t.Callable[P, t.Any], t.Callable[P, T]]]]", *extra_pairs: t.Tuple[t.Callable[P, t.Any], t.Callable[P, T]], ) -> "Chain[t.Callable[P, T]]": ... @t.overload def cond( self: "Chain[t.List[t.List[t.Callable[P, t.Any]]]]", *extra_pairs: t.List[t.Callable[P, t.Any]], ) -> "Chain[t.Callable[P, t.Any]]": ... def cond(self, *extra_pairs): return self._wrap(pyd.cond)(*extra_pairs) @t.overload def conforms( self: "Chain[t.Dict[T, t.Callable[[T2], t.Any]]]", ) -> "Chain[t.Callable[[t.Dict[T, T2]], bool]]": ... @t.overload def conforms( self: "Chain[t.List[t.Callable[[T], t.Any]]]", ) -> "Chain[t.Callable[[t.List[T]], bool]]": ... def conforms( self: "Chain[t.Union[t.List[t.Any], t.Dict[t.Any, t.Any]]]", ) -> "Chain[t.Callable[..., t.Any]]": return self._wrap(pyd.conforms)() @t.overload def conforms_to( self: "Chain[t.Dict[T, T2]]", source: t.Dict[T, t.Callable[[T2], t.Any]] ) -> "Chain[bool]": ... @t.overload def conforms_to( self: "Chain[t.List[T]]", source: t.List[t.Callable[[T], t.Any]] ) -> "Chain[bool]": ... def conforms_to(self, source): return self._wrap(pyd.conforms_to)(source) def constant(self: "Chain[T]") -> "Chain[t.Callable[..., T]]": return self._wrap(pyd.constant)() def default_to(self: "Chain[t.Union[T, None]]", default_value: T2) -> "Chain[t.Union[T, T2]]": return self._wrap(pyd.default_to)(default_value) @t.overload def default_to_any(self: "Chain[None]", *default_values: None) -> "Chain[None]": ... @t.overload def default_to_any( self: "Chain[t.Union[T, None]]", default_value1: None, default_value2: T2 ) -> "Chain[t.Union[T, T2]]": ... @t.overload def default_to_any( self: "Chain[t.Union[T, None]]", default_value1: None, default_value2: None, default_value3: T2, ) -> "Chain[t.Union[T, T2]]": ... @t.overload def default_to_any( self: "Chain[t.Union[T, None]]", default_value1: None, default_value2: None, default_value3: None, default_value4: T2, ) -> "Chain[t.Union[T, T2]]": ... @t.overload def default_to_any( self: "Chain[t.Union[T, None]]", default_value1: None, default_value2: None, default_value3: None, default_value4: None, default_value5: T2, ) -> "Chain[t.Union[T, T2]]": ... @t.overload def default_to_any( self: "Chain[t.Union[T, None]]", *default_values: T2 ) -> "Chain[t.Union[T, T2]]": ... def default_to_any(self, *default_values): return self._wrap(pyd.default_to_any)(*default_values) @t.overload def identity(self: "Chain[T]", *args: t.Any) -> "Chain[T]": ... @t.overload def iteratee(self: "Chain[t.Callable[P, T]]") -> "Chain[t.Callable[P, T]]": ... @t.overload def iteratee(self: "Chain[t.Any]") -> "Chain[t.Callable[..., t.Any]]": ... def iteratee(self): return self._wrap(pyd.iteratee)() def matches(self: "Chain[t.Any]") -> "Chain[t.Callable[[t.Any], bool]]": return self._wrap(pyd.matches)() def matches_property(self: "Chain[t.Any]", value: t.Any) -> "Chain[t.Callable[[t.Any], bool]]": return self._wrap(pyd.matches_property)(value) @t.overload def memoize( self: "Chain[t.Callable[P, T]]", resolver: None = None ) -> "Chain[MemoizedFunc[P, T, str]]": ... @t.overload def memoize( self: "Chain[t.Callable[P, T]]", resolver: t.Union[t.Callable[P, T2], None] = None ) -> "Chain[MemoizedFunc[P, T, T2]]": ... def memoize(self, resolver=None): return self._wrap(pyd.memoize)(resolver) def method( self: "Chain[PathT]", *args: t.Any, **kwargs: t.Any ) -> "Chain[t.Callable[..., t.Any]]": return self._wrap(pyd.method)(*args, **kwargs) def method_of( self: "Chain[t.Any]", *args: t.Any, **kwargs: t.Any ) -> "Chain[t.Callable[..., t.Any]]": return self._wrap(pyd.method_of)(*args, **kwargs) def noop(self: "Chain[t.Any]", *args: t.Any, **kwargs: t.Any) -> "Chain[None]": return self._wrap(pyd.noop)(*args, **kwargs) def over(self: "Chain[t.Iterable[t.Callable[P, T]]]") -> "Chain[t.Callable[P, t.List[T]]]": return self._wrap(pyd.over)() def over_every(self: "Chain[t.Iterable[t.Callable[P, t.Any]]]") -> "Chain[t.Callable[P, bool]]": return self._wrap(pyd.over_every)() def over_some(self: "Chain[t.Iterable[t.Callable[P, t.Any]]]") -> "Chain[t.Callable[P, bool]]": return self._wrap(pyd.over_some)() def property_(self: "Chain[PathT]") -> "Chain[t.Callable[[t.Any], t.Any]]": return self._wrap(pyd.property_)() property = property_ def properties(self: "Chain[t.Any]", *paths: t.Any) -> "Chain[t.Callable[[t.Any], t.Any]]": return self._wrap(pyd.properties)(*paths) def property_of(self: "Chain[t.Any]") -> "Chain[t.Callable[[PathT], t.Any]]": return self._wrap(pyd.property_of)() @t.overload def random( self: "Chain[int]", stop: int = 1, *, floating: Literal[False] = False ) -> "Chain[int]": ... @t.overload def random(self: "Chain[float]", stop: int = 1, floating: bool = False) -> "Chain[float]": ... @t.overload def random(self: "Chain[float]", stop: float, floating: bool = False) -> "Chain[float]": ... @t.overload def random( self: "Chain[t.Union[float, int]]", stop: t.Union[float, int] = 1, *, floating: Literal[True], ) -> "Chain[float]": ... def random( self: "Chain[t.Union[float, int]]", stop: t.Union[float, int] = 1, floating: bool = False ): return self._wrap(pyd.random)(stop, floating) @t.overload def range_(self: "Chain[int]") -> "Chain[t.Generator[int, None, None]]": ... @t.overload def range_( self: "Chain[int]", stop: int, step: int = 1 ) -> "Chain[t.Generator[int, None, None]]": ... def range_(self, *args): return self._wrap(pyd.range_)(*args) range = range_ @t.overload def range_right(self: "Chain[int]") -> "Chain[t.Generator[int, None, None]]": ... @t.overload def range_right( self: "Chain[int]", stop: int, step: int = 1 ) -> "Chain[t.Generator[int, None, None]]": ... def range_right(self, *args): return self._wrap(pyd.range_right)(*args) @t.overload def result(self: "Chain[None]", key: t.Any, default: None = None) -> "Chain[None]": ... @t.overload def result(self: "Chain[None]", key: t.Any, default: T) -> "Chain[T]": ... @t.overload def result(self: "Chain[t.Any]", key: t.Any, default: t.Any = None) -> "Chain[t.Any]": ... def result(self, key, default=None): return self._wrap(pyd.result)(key, default) def retry( self: "Chain[int]", delay: t.Union[int, float] = 0.5, max_delay: t.Union[int, float] = 150.0, scale: t.Union[int, float] = 2.0, jitter: t.Union[int, float, t.Tuple[t.Union[int, float], t.Union[int, float]]] = 0, exceptions: t.Iterable[Type[Exception]] = (Exception,), on_exception: t.Union[t.Callable[[Exception, int], t.Any], None] = None, ) -> "Chain[t.Callable[[CallableT], CallableT]]": return self._wrap(pyd.retry)(delay, max_delay, scale, jitter, exceptions, on_exception) @t.overload def times(self: "Chain[int]", iteratee: t.Callable[..., T]) -> "Chain[t.List[T]]": ... @t.overload def times(self: "Chain[int]", iteratee: None = None) -> "Chain[t.List[int]]": ... def times(self: "Chain[int]", iteratee=None): return self._wrap(pyd.times)(iteratee) def to_path(self: "Chain[PathT]") -> "Chain[t.List[t.Hashable]]": return self._wrap(pyd.to_path)() pydash-8.0.3/src/pydash/chaining/chaining.py000066400000000000000000000171011464745015500210160ustar00rootroot00000000000000""" Method chaining interface. .. versionadded:: 1.0.0 """ import typing as t import pydash as pyd from pydash.exceptions import InvalidMethod from ..helpers import UNSET, Unset from .all_funcs import AllFuncs __all__ = ( "chain", "tap", ) ValueT_co = t.TypeVar("ValueT_co", covariant=True) T = t.TypeVar("T") T2 = t.TypeVar("T2") class Chain(AllFuncs, t.Generic[ValueT_co]): """Enables chaining of :attr:`module` functions.""" #: Object that contains attribute references to available methods. module = pyd invalid_method_exception = InvalidMethod def __init__(self, value: t.Union[ValueT_co, Unset] = UNSET) -> None: self._value = value def _wrap(self, func) -> "ChainWrapper[t.Union[ValueT_co, Unset]]": """Implement `AllFuncs` interface.""" return ChainWrapper(self._value, func) def value(self) -> ValueT_co: """ Return current value of the chain operations. Returns: Current value of chain operations. """ return self(self._value) def to_string(self) -> str: """ Return current value as string. Returns: Current value of chain operations casted to ``str``. """ return self.module.to_string(self.value()) def commit(self) -> "Chain[ValueT_co]": """ Executes the chained sequence and returns the wrapped result. Returns: New instance of :class:`Chain` with resolved value from previous :class:`Class`. """ return Chain(self.value()) def plant(self, value: t.Any) -> "Chain[ValueT_co]": """ Return a clone of the chained sequence planting `value` as the wrapped value. Args: value: Value to plant as the initial chain value. """ # pylint: disable=no-member,maybe-no-member wrapper = self._value wrappers = [] if hasattr(wrapper, "_value"): wrappers = [wrapper] while isinstance(wrapper._value, ChainWrapper): wrapper = wrapper._value # type: ignore wrappers.insert(0, wrapper) clone: Chain[t.Any] = Chain(value) for wrap in wrappers: clone = ChainWrapper(clone._value, wrap.method)( # type: ignore *wrap.args, # type: ignore **wrap.kwargs, # type: ignore ) return clone def __call__(self, value) -> ValueT_co: """ Return result of passing `value` through chained methods. Args: value: Initial value to pass through chained methods. Returns: Result of method chain evaluation of `value`. """ if isinstance(self._value, ChainWrapper): # pylint: disable=maybe-no-member value = self._value.unwrap(value) return value class ChainWrapper(t.Generic[ValueT_co]): """Wrap :class:`Chain` method call within a :class:`ChainWrapper` context.""" def __init__(self, value: ValueT_co, method) -> None: self._value = value self.method = method self.args = () self.kwargs: t.Dict[t.Any, t.Any] = {} def _generate(self): """Generate a copy of this instance.""" # pylint: disable=attribute-defined-outside-init new = self.__class__.__new__(self.__class__) new.__dict__ = self.__dict__.copy() return new def unwrap(self, value=UNSET): """ Execute :meth:`method` with :attr:`_value`, :attr:`args`, and :attr:`kwargs`. If :attr:`_value` is an instance of :class:`ChainWrapper`, then unwrap it before calling :attr:`method`. """ # Generate a copy of ourself so that we don't modify the chain wrapper # _value directly. This way if we are late passing a value, we don't # "freeze" the chain wrapper value when a value is first passed. # Otherwise, we'd locked the chain wrapper value permanently and not be # able to reuse it. wrapper = self._generate() if isinstance(wrapper._value, ChainWrapper): # pylint: disable=no-member,maybe-no-member wrapper._value = wrapper._value.unwrap(value) elif not isinstance(value, ChainWrapper) and value is not UNSET: # Override wrapper's initial value. wrapper._value = value if wrapper._value is not UNSET: value = wrapper._value return wrapper.method(value, *wrapper.args, **wrapper.kwargs) def __call__(self, *args, **kwargs): """ Invoke the :attr:`method` with :attr:`value` as the first argument and return a new :class:`Chain` object with the return value. Returns: New instance of :class:`Chain` with the results of :attr:`method` passed in as value. """ self.args = args self.kwargs = kwargs return Chain(self) class _Dash(object): """Class that provides attribute access to valid :mod:`pydash` methods and callable access to :mod:`pydash` method chaining.""" def __getattr__(self, attr): """Proxy to :meth:`Chain.get_method`.""" return Chain.get_method(attr) def __call__(self, value: t.Union[ValueT_co, Unset] = UNSET) -> Chain[ValueT_co]: """Return a new instance of :class:`Chain` with `value` as the seed.""" return Chain(value) def chain(value: t.Union[T, Unset] = UNSET) -> Chain[T]: """ Creates a :class:`Chain` object which wraps the given value to enable intuitive method chaining. Chaining is lazy and won't compute a final value until :meth:`Chain.value` is called. Args: value: Value to initialize chain operations with. Returns: Instance of :class:`Chain` initialized with `value`. Example: >>> chain([1, 2, 3, 4]).map(lambda x: x * 2).sum().value() 20 >>> chain().map(lambda x: x * 2).sum()([1, 2, 3, 4]) 20 >>> summer = chain([1, 2, 3, 4]).sum() >>> new_summer = summer.plant([1, 2]) >>> new_summer.value() 3 >>> summer.value() 10 >>> def echo(item): ... print(item) >>> summer = chain([1, 2, 3, 4]).for_each(echo).sum() >>> committed = summer.commit() 1 2 3 4 >>> committed.value() 10 >>> summer.value() 1 2 3 4 10 .. versionadded:: 1.0.0 .. versionchanged:: 2.0.0 Made chaining lazy. .. versionchanged:: 3.0.0 - Added support for late passing of `value`. - Added :meth:`Chain.plant` for replacing initial chain value. - Added :meth:`Chain.commit` for returning a new :class:`Chain` instance initialized with the results from calling :meth:`Chain.value`. """ return Chain(value) def tap(value: T, interceptor: t.Callable[[T], t.Any]) -> T: """ Invokes `interceptor` with the `value` as the first argument and then returns `value`. The purpose of this method is to "tap into" a method chain in order to perform operations on intermediate results within the chain. Args: value: Current value of chain operation. interceptor: Function called on `value`. Returns: `value` after `interceptor` call. Example: >>> data = [] >>> def log(value): ... data.append(value) >>> chain([1, 2, 3, 4]).map(lambda x: x * 2).tap(log).value() [2, 4, 6, 8] >>> data [[2, 4, 6, 8]] .. versionadded:: 1.0.0 """ interceptor(value) return value pydash-8.0.3/src/pydash/collections.py000066400000000000000000001524411464745015500200030ustar00rootroot00000000000000""" Functions that operate on lists and dicts. .. versionadded:: 1.0.0 """ from __future__ import annotations from functools import cmp_to_key import random import typing as t import pydash as pyd from .helpers import callit, cmp, getargcount, iterator, iteriteratee from .types import IterateeObjT, PathT __all__ = ( "at", "count_by", "every", "filter_", "find", "find_last", "flat_map", "flat_map_deep", "flat_map_depth", "for_each", "for_each_right", "group_by", "includes", "invoke_map", "key_by", "map_", "nest", "order_by", "partition", "pluck", "reduce_", "reduce_right", "reductions", "reductions_right", "reject", "sample", "sample_size", "shuffle", "size", "some", "sort_by", ) T = t.TypeVar("T") T2 = t.TypeVar("T2") T3 = t.TypeVar("T3") T4 = t.TypeVar("T4") @t.overload def at(collection: t.Mapping[T, T2], *paths: T) -> t.List[t.Union[T2, None]]: ... @t.overload def at(collection: t.Mapping[T, t.Any], *paths: t.Union[T, t.Iterable[T]]) -> t.List[t.Any]: ... @t.overload def at(collection: t.Iterable[T], *paths: int) -> t.List[t.Union[T, None]]: ... @t.overload def at(collection: t.Iterable[t.Any], *paths: t.Union[int, t.Iterable[int]]) -> t.List[t.Any]: ... def at(collection, *paths): """ Creates a list of elements from the specified indexes, or keys, of the collection. Indexes may be specified as individual arguments or as arrays of indexes. Args: collection: Collection to iterate over. *paths: The indexes of `collection` to retrieve, specified as individual indexes or arrays of indexes. Returns: filtered list Example: >>> at([1, 2, 3, 4], 0, 2) [1, 3] >>> at({"a": 1, "b": 2, "c": 3, "d": 4}, "a", "c") [1, 3] >>> at({"a": 1, "b": 2, "c": {"d": {"e": 3}}}, "a", ["c", "d", "e"]) [1, 3] .. versionadded:: 1.0.0 .. versionchanged:: 4.1.0 Support deep path access. """ return pyd.properties(*paths)(collection) @t.overload def count_by(collection: t.Mapping[t.Any, T2], iteratee: None = None) -> t.Dict[T2, int]: ... @t.overload def count_by( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T, t.Dict[T, T2]], T3] ) -> t.Dict[T3, int]: ... @t.overload def count_by( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T], T3] ) -> t.Dict[T3, int]: ... @t.overload def count_by( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2], T3] ) -> t.Dict[T3, int]: ... @t.overload def count_by(collection: t.Iterable[T], iteratee: None = None) -> t.Dict[T, int]: ... @t.overload def count_by( collection: t.Iterable[T], iteratee: t.Callable[[T, int, t.List[T]], T2] ) -> t.Dict[T2, int]: ... @t.overload def count_by(collection: t.Iterable[T], iteratee: t.Callable[[T, int], T2]) -> t.Dict[T2, int]: ... @t.overload def count_by(collection: t.Iterable[T], iteratee: t.Callable[[T], T2]) -> t.Dict[T2, int]: ... def count_by(collection, iteratee=None): """ Creates an object composed of keys generated from the results of running each element of `collection` through the iteratee. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. Returns: Dict containing counts by key. Example: >>> results = count_by([1, 2, 1, 2, 3, 4]) >>> assert results == {1: 2, 2: 2, 3: 1, 4: 1} >>> results = count_by(["a", "A", "B", "b"], lambda x: x.lower()) >>> assert results == {"a": 2, "b": 2} >>> results = count_by({"a": 1, "b": 1, "c": 3, "d": 3}) >>> assert results == {1: 2, 3: 2} .. versionadded:: 1.0.0 """ ret = {} for result in iteriteratee(collection, iteratee): ret.setdefault(result[0], 0) ret[result[0]] += 1 return ret def every( collection: t.Iterable[T], predicate: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None ) -> bool: """ Checks if the predicate returns a truthy value for all elements of a collection. The predicate is invoked with three arguments: ``(value, index|key, collection)``. If a property name is passed for predicate, the created :func:`pluck` style predicate will return the property value of the given element. If an object is passed for predicate, the created :func:`.matches` style predicate will return ``True`` for elements that have the properties of the given object, else ``False``. Args: collection: Collection to iterate over. predicate: Predicate applied per iteration. Returns: Whether all elements are truthy. Example: >>> every([1, True, "hello"]) True >>> every([1, False, "hello"]) False >>> every([{"a": 1}, {"a": True}, {"a": "hello"}], "a") True >>> every([{"a": 1}, {"a": False}, {"a": "hello"}], "a") False >>> every([{"a": 1}, {"a": 1}], {"a": 1}) True >>> every([{"a": 1}, {"a": 2}], {"a": 1}) False .. versionadded:: 1.0.0 .. versionchanged: 4.0.0 Removed alias ``all_``. """ if predicate: cbk = pyd.iteratee(predicate) collection = (cbk(item) for item in collection) return all(collection) @t.overload def filter_( collection: t.Mapping[T, T2], predicate: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], IterateeObjT, None] = None, ) -> t.List[T2]: ... @t.overload def filter_( collection: t.Mapping[T, T2], predicate: t.Union[t.Callable[[T2, T], t.Any], IterateeObjT, None] = None, ) -> t.List[T2]: ... @t.overload def filter_( collection: t.Mapping[t.Any, T2], predicate: t.Union[t.Callable[[T2], t.Any], IterateeObjT, None] = None, ) -> t.List[T2]: ... @t.overload def filter_( collection: t.Iterable[T], predicate: t.Union[t.Callable[[T, int, t.List[T]], t.Any], IterateeObjT, None] = None, ) -> t.List[T]: ... @t.overload def filter_( collection: t.Iterable[T], predicate: t.Union[t.Callable[[T, int], t.Any], IterateeObjT, None] = None, ) -> t.List[T]: ... @t.overload def filter_( collection: t.Iterable[T], predicate: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None, ) -> t.List[T]: ... def filter_(collection, predicate=None): """ Iterates over elements of a collection, returning a list of all elements the predicate returns truthy for. Args: collection: Collection to iterate over. predicate: Predicate applied per iteration. Returns: Filtered list. Example: >>> results = filter_([{"a": 1}, {"b": 2}, {"a": 1, "b": 3}], {"a": 1}) >>> assert results == [{"a": 1}, {"a": 1, "b": 3}] >>> filter_([1, 2, 3, 4], lambda x: x >= 3) [3, 4] .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Removed alias ``select``. """ return [value for is_true, value, _, _ in iteriteratee(collection, predicate) if is_true] @t.overload def find( collection: t.Dict[T, T2], predicate: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], IterateeObjT, None] = None, ) -> t.Union[T2, None]: ... @t.overload def find( collection: t.Dict[T, T2], predicate: t.Union[t.Callable[[T2, T], t.Any], IterateeObjT, None] = None, ) -> t.Union[T2, None]: ... @t.overload def find( collection: t.Dict[T, T2], predicate: t.Union[t.Callable[[T2], t.Any], IterateeObjT, None] = None, ) -> t.Union[T2, None]: ... @t.overload def find( collection: t.List[T], predicate: t.Union[t.Callable[[T, int, t.List[T]], t.Any], IterateeObjT, None] = None, ) -> t.Union[T, None]: ... @t.overload def find( collection: t.List[T], predicate: t.Union[t.Callable[[T, int], t.Any], IterateeObjT, None] = None, ) -> t.Union[T, None]: ... @t.overload def find( collection: t.List[T], predicate: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None, ) -> t.Union[T, None]: ... def find(collection, predicate=None): """ Iterates over elements of a collection, returning the first element that the predicate returns truthy for. Args: collection: Collection to iterate over. predicate: Predicate applied per iteration. Returns: First element found or ``None``. Example: >>> find([1, 2, 3, 4], lambda x: x >= 3) 3 >>> find([{"a": 1}, {"b": 2}, {"a": 1, "b": 2}], {"a": 1}) {'a': 1} .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Removed aliases ``detect`` and ``find_where``. """ search = (value for is_true, value, _, _ in iteriteratee(collection, predicate) if is_true) return next(search, None) @t.overload def find_last( collection: t.Dict[T, T2], predicate: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], IterateeObjT, None] = None, ) -> t.Union[T2, None]: ... @t.overload def find_last( collection: t.Dict[T, T2], predicate: t.Union[t.Callable[[T2, T], t.Any], IterateeObjT, None] = None, ) -> t.Union[T2, None]: ... @t.overload def find_last( collection: t.Dict[t.Any, T2], predicate: t.Union[t.Callable[[T2], t.Any], IterateeObjT, None] = None, ) -> t.Union[T2, None]: ... @t.overload def find_last( collection: t.List[T], predicate: t.Union[t.Callable[[T, int, t.List[T]], t.Any], IterateeObjT, None] = None, ) -> t.Union[T, None]: ... @t.overload def find_last( collection: t.List[T], predicate: t.Union[t.Callable[[T, int], t.Any], IterateeObjT, None] = None, ) -> t.Union[T, None]: ... @t.overload def find_last( collection: t.List[T], predicate: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None, ) -> t.Union[T, None]: ... def find_last(collection, predicate=None): """ This method is like :func:`find` except that it iterates over elements of a `collection` from right to left. Args: collection: Collection to iterate over. predicate: Predicate applied per iteration. Returns: Last element found or ``None``. Example: >>> find_last([1, 2, 3, 4], lambda x: x >= 3) 4 >>> results = find_last([{'a': 1}, {'b': 2}, {'a': 1, 'b': 2}],\ {'a': 1}) >>> assert results == {'a': 1, 'b': 2} .. versionadded:: 1.0.0 """ search = ( value for is_true, value, _, _ in iteriteratee(collection, predicate, reverse=True) if is_true ) return next(search, None) @t.overload def flat_map( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T, t.Dict[T, T2]], t.Iterable[T3]] ) -> t.List[T3]: ... @t.overload def flat_map( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T], t.Iterable[T3]] ) -> t.List[T3]: ... @t.overload def flat_map( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2], t.Iterable[T3]] ) -> t.List[T3]: ... @t.overload def flat_map( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T, t.Dict[T, T2]], T3] ) -> t.List[T3]: ... @t.overload def flat_map(collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T], T3]) -> t.List[T3]: ... @t.overload def flat_map(collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2], T3]) -> t.List[T3]: ... @t.overload def flat_map(collection: t.Mapping[t.Any, t.Iterable[T2]], iteratee: None = None) -> t.List[T2]: ... @t.overload def flat_map(collection: t.Mapping[t.Any, T2], iteratee: None = None) -> t.List[T2]: ... @t.overload def flat_map( collection: t.Iterable[T], iteratee: t.Callable[[T, int, t.List[T]], t.Iterable[T2]] ) -> t.List[T2]: ... @t.overload def flat_map( collection: t.Iterable[T], iteratee: t.Callable[[T, int], t.Iterable[T2]] ) -> t.List[T2]: ... @t.overload def flat_map( collection: t.Iterable[T], iteratee: t.Callable[[T], t.Iterable[T2]] ) -> t.List[T2]: ... @t.overload def flat_map( collection: t.Iterable[T], iteratee: t.Callable[[T, int, t.List[T]], T2] ) -> t.List[T2]: ... @t.overload def flat_map(collection: t.Iterable[T], iteratee: t.Callable[[T, int], T2]) -> t.List[T2]: ... @t.overload def flat_map(collection: t.Iterable[T], iteratee: t.Callable[[T], T2]) -> t.List[T2]: ... @t.overload def flat_map(collection: t.Iterable[t.Iterable[T]], iteratee: None = None) -> t.List[T]: ... @t.overload def flat_map(collection: t.Iterable[T], iteratee: None = None) -> t.List[T]: ... def flat_map(collection, iteratee=None): """ Creates a flattened list of values by running each element in collection through `iteratee` and flattening the mapped results. The `iteratee` is invoked with three arguments: ``(value, index|key, collection)``. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. Returns: Flattened mapped list. Example: >>> duplicate = lambda n: [[n, n]] >>> flat_map([1, 2], duplicate) [[1, 1], [2, 2]] .. versionadded:: 4.0.0 """ return pyd.flatten(itermap(collection, iteratee=iteratee)) @t.overload def flat_map_deep( collection: t.Mapping[T, T2], iteratee: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], None] = None, ) -> t.List[t.Any]: ... @t.overload def flat_map_deep( collection: t.Mapping[T, T2], iteratee: t.Union[t.Callable[[T2, T], t.Any], None] = None ) -> t.List[t.Any]: ... @t.overload def flat_map_deep( collection: t.Mapping[t.Any, T2], iteratee: t.Union[t.Callable[[T2], t.Any], None] = None ) -> t.List[t.Any]: ... @t.overload def flat_map_deep( collection: t.Iterable[T], iteratee: t.Union[t.Callable[[T, int, t.List[T]], t.Any], None] = None, ) -> t.List[t.Any]: ... @t.overload def flat_map_deep( collection: t.Iterable[T], iteratee: t.Union[t.Callable[[T, int], t.Any], None] = None ) -> t.List[t.Any]: ... @t.overload def flat_map_deep( collection: t.Iterable[T], iteratee: t.Union[t.Callable[[T], t.Any], None] = None ) -> t.List[t.Any]: ... def flat_map_deep(collection, iteratee=None): """ This method is like :func:`flat_map` except that it recursively flattens the mapped results. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. Returns: Flattened mapped list. Example: >>> duplicate = lambda n: [[n, n]] >>> flat_map_deep([1, 2], duplicate) [1, 1, 2, 2] .. versionadded:: 4.0.0 """ return pyd.flatten_deep(itermap(collection, iteratee=iteratee)) @t.overload def flat_map_depth( collection: t.Mapping[T, T2], iteratee: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], None] = None, depth: int = 1, ) -> t.List[t.Any]: ... @t.overload def flat_map_depth( collection: t.Mapping[T, T2], iteratee: t.Union[t.Callable[[T2, T], t.Any], None] = None, depth: int = 1, ) -> t.List[t.Any]: ... @t.overload def flat_map_depth( collection: t.Mapping[t.Any, T2], iteratee: t.Union[t.Callable[[T2], t.Any], None] = None, depth: int = 1, ) -> t.List[t.Any]: ... @t.overload def flat_map_depth( collection: t.Iterable[T], iteratee: t.Union[t.Callable[[T, int, t.List[T]], t.Any], None] = None, depth: int = 1, ) -> t.List[t.Any]: ... @t.overload def flat_map_depth( collection: t.Iterable[T], iteratee: t.Union[t.Callable[[T, int], t.Any], None] = None, depth: int = 1, ) -> t.List[t.Any]: ... @t.overload def flat_map_depth( collection: t.Iterable[T], iteratee: t.Union[t.Callable[[T], t.Any], None] = None, depth: int = 1, ) -> t.List[t.Any]: ... def flat_map_depth(collection, iteratee=None, depth=1): """ This method is like :func:`flat_map` except that it recursively flattens the mapped results up to `depth` times. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. Returns: Flattened mapped list. Example: >>> duplicate = lambda n: [[n, n]] >>> flat_map_depth([1, 2], duplicate, 1) [[1, 1], [2, 2]] >>> flat_map_depth([1, 2], duplicate, 2) [1, 1, 2, 2] .. versionadded:: 4.0.0 """ return pyd.flatten_depth(itermap(collection, iteratee=iteratee), depth=depth) @t.overload def for_each( collection: t.Dict[T, T2], iteratee: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], IterateeObjT, None] = None, ) -> t.Dict[T, T2]: ... @t.overload def for_each( collection: t.Dict[T, T2], iteratee: t.Union[t.Callable[[T2, T], t.Any], IterateeObjT, None] = None, ) -> t.Dict[T, T2]: ... @t.overload def for_each( collection: t.Dict[T, T2], iteratee: t.Union[t.Callable[[T2], t.Any], IterateeObjT, None] = None, ) -> t.Dict[T, T2]: ... @t.overload def for_each( collection: t.List[T], iteratee: t.Union[t.Callable[[T, int, t.List[T]], t.Any], IterateeObjT, None] = None, ) -> t.List[T]: ... @t.overload def for_each( collection: t.List[T], iteratee: t.Union[t.Callable[[T, int], t.Any], IterateeObjT, None] = None, ) -> t.List[T]: ... @t.overload def for_each( collection: t.List[T], iteratee: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None, ) -> t.List[T]: ... def for_each(collection, iteratee=None): """ Iterates over elements of a collection, executing the iteratee for each element. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. Returns: `collection` Example: >>> results = {} >>> def cb(x): ... results[x] = x**2 >>> for_each([1, 2, 3, 4], cb) [1, 2, 3, 4] >>> assert results == {1: 1, 2: 4, 3: 9, 4: 16} .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Removed alias ``each``. """ next((None for ret, _, _, _ in iteriteratee(collection, iteratee) if ret is False), None) return collection @t.overload def for_each_right( collection: t.Dict[T, T2], iteratee: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], IterateeObjT], ) -> t.Dict[T, T2]: ... @t.overload def for_each_right( collection: t.Dict[T, T2], iteratee: t.Union[t.Callable[[T2, T], t.Any], IterateeObjT], ) -> t.Dict[T, T2]: ... @t.overload def for_each_right( collection: t.Dict[T, T2], iteratee: t.Union[t.Callable[[T2], t.Any], IterateeObjT], ) -> t.Dict[T, T2]: ... @t.overload def for_each_right( collection: t.List[T], iteratee: t.Union[t.Callable[[T, int, t.List[T]], t.Any], IterateeObjT], ) -> t.List[T]: ... @t.overload def for_each_right( collection: t.List[T], iteratee: t.Union[t.Callable[[T, int], t.Any], IterateeObjT], ) -> t.List[T]: ... @t.overload def for_each_right( collection: t.List[T], iteratee: t.Union[t.Callable[[T], t.Any], IterateeObjT], ) -> t.List[T]: ... def for_each_right(collection, iteratee): """ This method is like :func:`for_each` except that it iterates over elements of a `collection` from right to left. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. Returns: `collection` Example: >>> results = {"total": 1} >>> def cb(x): ... results["total"] = x * results["total"] >>> for_each_right([1, 2, 3, 4], cb) [1, 2, 3, 4] >>> assert results == {"total": 24} .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Removed alias ``each_right``. """ next( (None for ret, _, _, _ in iteriteratee(collection, iteratee, reverse=True) if ret is False), None, ) return collection @t.overload def group_by(collection: t.Iterable[T], iteratee: t.Callable[[T], T2]) -> t.Dict[T2, t.List[T]]: ... @t.overload def group_by( collection: t.Iterable[T], iteratee: t.Union[IterateeObjT, None] = None ) -> t.Dict[t.Any, t.List[T]]: ... def group_by(collection, iteratee=None): """ Creates an object composed of keys generated from the results of running each element of a `collection` through the iteratee. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. Returns: Results of grouping by `iteratee`. Example: >>> results = group_by([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}], 'a') >>> assert results == {1: [{'a': 1, 'b': 2}], 3: [{'a': 3, 'b': 4}]} >>> results = group_by([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}], {'a': 1}) >>> assert results == {False: [{'a': 3, 'b': 4}],\ True: [{'a': 1, 'b': 2}]} .. versionadded:: 1.0.0 """ ret = {} cbk = pyd.iteratee(iteratee) for value in collection: key = cbk(value) ret.setdefault(key, []) ret[key].append(value) return ret def includes( collection: t.Union[t.Sequence[t.Any], t.Dict[t.Any, t.Any]], target: t.Any, from_index: int = 0 ) -> bool: """ Checks if a given value is present in a collection. If `from_index` is negative, it is used as the offset from the end of the collection. Args: collection: Collection to iterate over. target: Target value to compare to. from_index: Offset to start search from. Returns: Whether `target` is in `collection`. Example: >>> includes([1, 2, 3, 4], 2) True >>> includes([1, 2, 3, 4], 2, from_index=2) False >>> includes({"a": 1, "b": 2, "c": 3, "d": 4}, 2) True .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Renamed from ``contains`` to ``includes`` and removed alias ``include``. """ collection_values: t.Container[t.Any] if isinstance(collection, dict): collection_values = collection.values() else: # only makes sense to do this if `collection` is not a dict collection_values = collection[from_index:] return target in collection_values def invoke_map( collection: t.Iterable[t.Any], path: PathT, *args: t.Any, **kwargs: t.Any ) -> t.List[t.Any]: """ Invokes the method at `path` of each element in `collection`, returning a list of the results of each invoked method. Any additional arguments are provided to each invoked method. If `path` is a function, it's invoked for each element in `collection`. Args: collection: Collection to iterate over. path: String path to method to invoke or callable to invoke for each element in `collection`. args: Arguments to pass to method call. kwargs: Keyword arguments to pass to method call. Returns: List of results of invoking method of each item. Example: >>> items = [{"a": [{"b": 1}]}, {"a": [{"c": 2}]}] >>> expected = [{"b": 1}.items(), {"c": 2}.items()] >>> invoke_map(items, "a[0].items") == expected True .. versionadded:: 4.0.0 """ return map_(collection, lambda item: pyd.invoke(item, path, *args, **kwargs)) @t.overload def key_by(collection: t.Iterable[T], iteratee: t.Callable[[T], T2]) -> t.Dict[T2, T]: ... @t.overload def key_by( collection: t.Iterable[t.Any], iteratee: t.Union[IterateeObjT, None] = None ) -> t.Dict[t.Any, t.Any]: ... def key_by(collection, iteratee=None): """ Creates an object composed of keys generated from the results of running each element of the collection through the given iteratee. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. Returns: Results of indexing by `iteratee`. Example: >>> results = key_by([{"a": 1, "b": 2}, {"a": 3, "b": 4}], "a") >>> assert results == {1: {"a": 1, "b": 2}, 3: {"a": 3, "b": 4}} .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Renamed from ``index_by`` to ``key_by``. """ ret = {} cbk = pyd.iteratee(iteratee) for value in collection: ret[cbk(value)] = value return ret @t.overload def map_(collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2], T3]) -> t.List[T3]: ... @t.overload def map_(collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T], T3]) -> t.List[T3]: ... @t.overload def map_( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T, t.Dict[T, T2]], T3] ) -> t.List[T3]: ... @t.overload def map_(collection: t.Iterable[T], iteratee: t.Callable[[T], T2]) -> t.List[T2]: ... @t.overload def map_(collection: t.Iterable[T], iteratee: t.Callable[[T, int], T2]) -> t.List[T2]: ... @t.overload def map_( collection: t.Iterable[T], iteratee: t.Callable[[T, int, t.List[T]], T2] ) -> t.List[T2]: ... @t.overload def map_( collection: t.Iterable[t.Any], iteratee: t.Union[IterateeObjT, None] = None ) -> t.List[t.Any]: ... def map_(collection, iteratee=None): """ Creates an array of values by running each element in the collection through the iteratee. The iteratee is invoked with three arguments: ``(value, index|key, collection)``. If a property name is passed for iteratee, the created :func:`pluck` style iteratee will return the property value of the given element. If an object is passed for iteratee, the created :func:`.matches` style iteratee will return ``True`` for elements that have the properties of the given object, else ``False``. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. Returns: Mapped list. Example: >>> map_([1, 2, 3, 4], str) ['1', '2', '3', '4'] >>> map_([{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}], "a") [1, 3, 5] >>> map_([[[0, 1]], [[2, 3]], [[4, 5]]], "0.1") [1, 3, 5] >>> map_([{"a": {"b": 1}}, {"a": {"b": 2}}], "a.b") [1, 2] >>> map_([{"a": {"b": [0, 1]}}, {"a": {"b": [2, 3]}}], "a.b[1]") [1, 3] .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Removed alias ``collect``. """ return list(itermap(collection, iteratee)) def nest(collection: t.Iterable[t.Any], *properties: t.Any) -> t.Any: """ This method is like :func:`group_by` except that it supports nested grouping by multiple string `properties`. If only a single key is given, it is like calling ``group_by(collection, prop)``. Args: collection: Collection to iterate over. *properties: Properties to nest by. Returns: Results of nested grouping by `properties`. Example: >>> results = nest([{'shape': 'square', 'color': 'red', 'qty': 5},\ {'shape': 'square', 'color': 'blue', 'qty': 10},\ {'shape': 'square', 'color': 'orange', 'qty': 5},\ {'shape': 'circle', 'color': 'yellow', 'qty': 5},\ {'shape': 'circle', 'color': 'pink', 'qty': 10},\ {'shape': 'oval', 'color': 'purple', 'qty': 5}],\ 'shape', 'qty') >>> expected = {\ 'square': {5: [{'shape': 'square', 'color': 'red', 'qty': 5},\ {'shape': 'square', 'color': 'orange', 'qty': 5}],\ 10: [{'shape': 'square', 'color': 'blue', 'qty': 10}]},\ 'circle': {5: [{'shape': 'circle', 'color': 'yellow', 'qty': 5}],\ 10: [{'shape': 'circle', 'color': 'pink', 'qty': 10}]},\ 'oval': {5: [{'shape': 'oval', 'color': 'purple', 'qty': 5}]}} >>> results == expected True .. versionadded:: 4.3.0 """ if not properties: return collection flat_properties = pyd.flatten(properties) first, rest = flat_properties[0], flat_properties[1:] return pyd.map_values(group_by(collection, first), lambda value: nest(value, *rest)) @t.overload def order_by( collection: t.Mapping[t.Any, T2], keys: t.Iterable[t.Union[str, int]], orders: t.Union[t.Iterable[bool], bool], reverse: bool = False, ) -> t.List[T2]: ... @t.overload def order_by( collection: t.Mapping[t.Any, T2], keys: t.Iterable[str], orders: None = None, reverse: bool = False, ) -> t.List[T2]: ... @t.overload def order_by( collection: t.Iterable[T], keys: t.Iterable[t.Union[str, int]], orders: t.Union[t.Iterable[bool], bool], reverse: bool = False, ) -> t.List[T]: ... @t.overload def order_by( collection: t.Iterable[T], keys: t.Iterable[str], orders: None = None, reverse: bool = False, ) -> t.List[T]: ... def order_by(collection, keys, orders=None, reverse=False): """ This method is like :func:`sort_by` except that it sorts by key names instead of an iteratee function. Keys can be sorted in descending order by prepending a ``"-"`` to the key name (e.g. ``"name"`` would become ``"-name"``) or by passing a list of boolean sort options via `orders` where ``True`` is ascending and ``False`` is descending. Args: collection: Collection to iterate over. keys: List of keys to sort by. By default, keys will be sorted in ascending order. To sort a key in descending order, prepend a ``"-"`` to the key name. For example, to sort the key value for ``"name"`` in descending order, use ``"-name"``. orders: List of boolean sort orders to apply for each key. ``True`` corresponds to ascending order while ``False`` is descending. Defaults to ``None``. reverse (bool, optional): Whether to reverse the sort. Defaults to ``False``. Returns: Sorted list. Example: >>> items = [{'a': 2, 'b': 1}, {'a': 3, 'b': 2}, {'a': 1, 'b': 3}] >>> results = order_by(items, ['b', 'a']) >>> assert results == [{'a': 2, 'b': 1},\ {'a': 3, 'b': 2},\ {'a': 1, 'b': 3}] >>> results = order_by(items, ['a', 'b']) >>> assert results == [{'a': 1, 'b': 3},\ {'a': 2, 'b': 1},\ {'a': 3, 'b': 2}] >>> results = order_by(items, ['-a', 'b']) >>> assert results == [{'a': 3, 'b': 2},\ {'a': 2, 'b': 1},\ {'a': 1, 'b': 3}] >>> results = order_by(items, ['a', 'b'], [False, True]) >>> assert results == [{'a': 3, 'b': 2},\ {'a': 2, 'b': 1},\ {'a': 1, 'b': 3}] .. versionadded:: 3.0.0 .. versionchanged:: 3.2.0 Added `orders` argument. .. versionchanged:: 3.2.0 Added :func:`sort_by_order` as alias. .. versionchanged:: 4.0.0 Renamed from ``order_by`` to ``order_by`` and removed alias ``sort_by_order``. """ if isinstance(collection, dict): collection = collection.values() # Maintain backwards compatibility. if pyd.is_boolean(orders): reverse = orders orders = None comparers = [] if orders: for i, key in enumerate(keys): if pyd.has(orders, i): order = 1 if orders[i] else -1 else: order = 1 comparers.append((pyd.property_(key), order)) else: for key in keys: if key.startswith("-"): order = -1 key = key[1:] else: order = 1 comparers.append((pyd.property_(key), order)) def comparison(left, right): # pylint: disable=useless-else-on-loop,missing-docstring for func, mult in comparers: result = cmp(func(left), func(right)) if result: return mult * result return 0 return sorted(collection, key=cmp_to_key(comparison), reverse=reverse) @t.overload def partition( collection: t.Mapping[T, T2], predicate: t.Callable[[T2, T, t.Dict[T, T2]], t.Any] ) -> t.List[t.List[T2]]: ... @t.overload def partition( collection: t.Mapping[T, T2], predicate: t.Callable[[T2, T], t.Any] ) -> t.List[t.List[T2]]: ... @t.overload def partition( collection: t.Mapping[t.Any, T2], predicate: t.Callable[[T2], t.Any] ) -> t.List[t.List[T2]]: ... @t.overload def partition( collection: t.Mapping[t.Any, T2], predicate: t.Union[IterateeObjT, None] = None ) -> t.List[t.List[T2]]: ... @t.overload def partition( collection: t.Iterable[T], predicate: t.Callable[[T, int, t.List[T]], t.Any] ) -> t.List[t.List[T]]: ... @t.overload def partition( collection: t.Iterable[T], predicate: t.Callable[[T, int], t.Any] ) -> t.List[t.List[T]]: ... @t.overload def partition( collection: t.Iterable[T], predicate: t.Callable[[T], t.Any] ) -> t.List[t.List[T]]: ... @t.overload def partition( collection: t.Iterable[T], predicate: t.Union[IterateeObjT, None] = None ) -> t.List[t.List[T]]: ... def partition(collection, predicate=None): """ Creates an array of elements split into two groups, the first of which contains elements the `predicate` returns truthy for, while the second of which contains elements the `predicate` returns falsey for. The `predicate` is invoked with three arguments: ``(value, index|key, collection)``. If a property name is provided for `predicate` the created :func:`pluck` style predicate returns the property value of the given element. If an object is provided for `predicate` the created :func:`.matches` style predicate returns ``True`` for elements that have the properties of the given object, else ``False``. Args: collection: Collection to iterate over. predicate: Predicate applied per iteration. Returns: List of grouped elements. Example: >>> partition([1, 2, 3, 4], lambda x: x >= 3) [[3, 4], [1, 2]] .. versionadded:: 1.1.0 """ trues = [] falses = [] for is_true, value, _, _ in iteriteratee(collection, predicate): if is_true: trues.append(value) else: falses.append(value) return [trues, falses] def pluck(collection: t.Iterable[t.Any], path: PathT) -> t.List[t.Any]: """ Retrieves the value of a specified property from all elements in the collection. Args: collection: List of dicts. path: Collection's path to pluck Returns: Plucked list. Example: >>> pluck([{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}], "a") [1, 3, 5] >>> pluck([[[0, 1]], [[2, 3]], [[4, 5]]], "0.1") [1, 3, 5] >>> pluck([{"a": {"b": 1}}, {"a": {"b": 2}}], "a.b") [1, 2] >>> pluck([{"a": {"b": [0, 1]}}, {"a": {"b": [2, 3]}}], "a.b.1") [1, 3] >>> pluck([{"a": {"b": [0, 1]}}, {"a": {"b": [2, 3]}}], ["a", "b", 1]) [1, 3] .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Function removed. .. versionchanged:: 4.0.1 Made property access deep. """ return map_(collection, pyd.property_(path)) @t.overload def reduce_( collection: t.Mapping[T, T2], iteratee: t.Callable[[T3, T2, T], T3], accumulator: T3, ) -> T3: ... @t.overload def reduce_( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T3, T2], T3], accumulator: T3, ) -> T3: ... @t.overload def reduce_( collection: t.Mapping[t.Any, t.Any], iteratee: t.Callable[[T3], T3], accumulator: T3, ) -> T3: ... @t.overload def reduce_( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T2, T], T2], accumulator: None = None, ) -> T2: ... @t.overload def reduce_( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2, T2], T2], accumulator: None = None, ) -> T2: ... @t.overload def reduce_( collection: t.Mapping[t.Any, t.Any], iteratee: t.Callable[[T], T], accumulator: None = None, ) -> T: ... @t.overload def reduce_( collection: t.Iterable[T], iteratee: t.Callable[[T2, T, int], T2], accumulator: T2, ) -> T2: ... @t.overload def reduce_( collection: t.Iterable[T], iteratee: t.Callable[[T2, T], T2], accumulator: T2, ) -> T2: ... @t.overload def reduce_( collection: t.Iterable[t.Any], iteratee: t.Callable[[T2], T2], accumulator: T2, ) -> T2: ... @t.overload def reduce_( collection: t.Iterable[T], iteratee: t.Callable[[T, T, int], T], accumulator: None = None, ) -> T: ... @t.overload def reduce_( collection: t.Iterable[T], iteratee: t.Callable[[T, T], T], accumulator: None = None, ) -> T: ... @t.overload def reduce_( collection: t.Iterable[t.Any], iteratee: t.Callable[[T], T], accumulator: None = None, ) -> T: ... @t.overload def reduce_( collection: t.Iterable[T], iteratee: None = None, accumulator: t.Union[T, None] = None ) -> T: ... def reduce_(collection, iteratee=None, accumulator=None): """ Reduces a collection to a value which is the accumulated result of running each element in the collection through the iteratee, where each successive iteratee execution consumes the return value of the previous execution. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. accumulator: Initial value of aggregator. Default is to use the result of the first iteration. Returns: Accumulator object containing results of reduction. Example: >>> reduce_([1, 2, 3, 4], lambda total, x: total * x) 24 .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Removed aliases ``foldl`` and ``inject``. """ iterable = iterator(collection) if accumulator is None: try: _, accumulator = next(iterable) except StopIteration as exc: raise TypeError("reduce_() of empty sequence with no initial value") from exc result = accumulator if iteratee is None: iteratee = pyd.identity argcount = 1 else: argcount = getargcount(iteratee, maxargs=3) for index, item in iterable: result = callit(iteratee, result, item, index, argcount=argcount) return result @t.overload def reduce_right( collection: t.Mapping[T, T2], iteratee: t.Callable[[T3, T2, T], T3], accumulator: T3, ) -> T3: ... @t.overload def reduce_right( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T3, T2], T3], accumulator: T3, ) -> T3: ... @t.overload def reduce_right( collection: t.Mapping[t.Any, t.Any], iteratee: t.Callable[[T3], T3], accumulator: T3, ) -> T3: ... @t.overload def reduce_right( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T2, T], T2], accumulator: None = None, ) -> T2: ... @t.overload def reduce_right( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2, T2], T2], accumulator: None = None, ) -> T2: ... @t.overload def reduce_right( collection: t.Mapping[t.Any, t.Any], iteratee: t.Callable[[T], T], accumulator: None = None, ) -> T: ... @t.overload def reduce_right( collection: t.Iterable[T], iteratee: t.Callable[[T2, T, int], T2], accumulator: T2, ) -> T2: ... @t.overload def reduce_right( collection: t.Iterable[T], iteratee: t.Callable[[T2, T], T2], accumulator: T2, ) -> T2: ... @t.overload def reduce_right( collection: t.Iterable[t.Any], iteratee: t.Callable[[T2], T2], accumulator: T2, ) -> T2: ... @t.overload def reduce_right( collection: t.Iterable[T], iteratee: t.Callable[[T, T, int], T], accumulator: None = None, ) -> T: ... @t.overload def reduce_right( collection: t.Iterable[T], iteratee: t.Callable[[T, T], T], accumulator: None = None, ) -> T: ... @t.overload def reduce_right( collection: t.Iterable[t.Any], iteratee: t.Callable[[T], T], accumulator: None = None, ) -> T: ... @t.overload def reduce_right( collection: t.Iterable[T], iteratee: None = None, accumulator: t.Union[T, None] = None ) -> T: ... def reduce_right(collection, iteratee=None, accumulator=None): """ This method is like :func:`reduce_` except that it iterates over elements of a `collection` from right to left. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. accumulator: Initial value of aggregator. Default is to use the result of the first iteration. Returns: Accumulator object containing results of reduction. Example: >>> reduce_right([1, 2, 3, 4], lambda total, x: total**x) 4096 .. versionadded:: 1.0.0 .. versionchanged:: 3.2.1 Fix bug where collection was not reversed correctly. .. versionchanged:: 4.0.0 Removed alias ``foldr``. """ if not isinstance(collection, dict): collection = list(collection)[::-1] return reduce_(collection, iteratee, accumulator) @t.overload def reductions( collection: t.Mapping[T, T2], iteratee: t.Callable[[T3, T2, T], T3], accumulator: T3, from_right: bool = False, ) -> t.List[T3]: ... @t.overload def reductions( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T3, T2], T3], accumulator: T3, from_right: bool = False, ) -> t.List[T3]: ... @t.overload def reductions( collection: t.Mapping[t.Any, t.Any], iteratee: t.Callable[[T3], T3], accumulator: T3, from_right: bool = False, ) -> t.List[T3]: ... @t.overload def reductions( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T2, T], T2], accumulator: None = None, from_right: bool = False, ) -> t.List[T2]: ... @t.overload def reductions( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2, T2], T2], accumulator: None = None, from_right: bool = False, ) -> t.List[T2]: ... @t.overload def reductions( collection: t.Mapping[t.Any, t.Any], iteratee: t.Callable[[T], T], accumulator: None = None, from_right: bool = False, ) -> t.List[T]: ... @t.overload def reductions( collection: t.Iterable[T], iteratee: t.Callable[[T2, T, int], T2], accumulator: T2, from_right: bool = False, ) -> t.List[T2]: ... @t.overload def reductions( collection: t.Iterable[T], iteratee: t.Callable[[T2, T], T2], accumulator: T2, from_right: bool = False, ) -> t.List[T2]: ... @t.overload def reductions( collection: t.Iterable[t.Any], iteratee: t.Callable[[T2], T2], accumulator: T2, from_right: bool = False, ) -> t.List[T2]: ... @t.overload def reductions( collection: t.Iterable[T], iteratee: t.Callable[[T, T, int], T], accumulator: None = None, from_right: bool = False, ) -> t.List[T]: ... @t.overload def reductions( collection: t.Iterable[T], iteratee: t.Callable[[T, T], T], accumulator: None = None, from_right: bool = False, ) -> t.List[T]: ... @t.overload def reductions( collection: t.Iterable[t.Any], iteratee: t.Callable[[T], T], accumulator: None = None, from_right: bool = False, ) -> t.List[T]: ... @t.overload def reductions( collection: t.Iterable[T], iteratee: None = None, accumulator: t.Union[T, None] = None, from_right: bool = False, ) -> t.List[T]: ... def reductions(collection, iteratee=None, accumulator=None, from_right=False): """ This function is like :func:`reduce_` except that it returns a list of every intermediate value in the reduction operation. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. accumulator: Initial value of aggregator. Default is to use the result of the first iteration. Returns: Results of each reduction operation. Example: >>> reductions([1, 2, 3, 4], lambda total, x: total * x) [2, 6, 24] Note: The last element of the returned list would be the result of using :func:`reduce_`. .. versionadded:: 2.0.0 """ if iteratee is None: iteratee = pyd.identity argcount = 1 else: argcount = getargcount(iteratee, maxargs=3) results = [] def interceptor(result, item, index): result = callit(iteratee, result, item, index, argcount=argcount) results.append(result) return result reducer = reduce_right if from_right else reduce_ reducer(collection, interceptor, accumulator) return results @t.overload def reductions_right( collection: t.Mapping[T, T2], iteratee: t.Callable[[T3, T2, T], T3], accumulator: T3, ) -> t.List[T3]: ... @t.overload def reductions_right( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T3, T2], T3], accumulator: T3, ) -> t.List[T3]: ... @t.overload def reductions_right( collection: t.Mapping[t.Any, t.Any], iteratee: t.Callable[[T3], T3], accumulator: T3, ) -> t.List[T3]: ... @t.overload def reductions_right( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T2, T], T2], accumulator: None = None, ) -> t.List[T2]: ... @t.overload def reductions_right( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2, T2], T2], accumulator: None = None, ) -> t.List[T2]: ... @t.overload def reductions_right( collection: t.Mapping[t.Any, t.Any], iteratee: t.Callable[[T], T], accumulator: None = None, ) -> t.List[T]: ... @t.overload def reductions_right( collection: t.Iterable[T], iteratee: t.Callable[[T2, T, int], T2], accumulator: T2, ) -> t.List[T2]: ... @t.overload def reductions_right( collection: t.Iterable[T], iteratee: t.Callable[[T2, T], T2], accumulator: T2, ) -> t.List[T2]: ... @t.overload def reductions_right( collection: t.Iterable[t.Any], iteratee: t.Callable[[T2], T2], accumulator: T2, ) -> t.List[T2]: ... @t.overload def reductions_right( collection: t.Iterable[T], iteratee: t.Callable[[T, T, int], T], accumulator: None = None, ) -> t.List[T]: ... @t.overload def reductions_right( collection: t.Iterable[T], iteratee: t.Callable[[T, T], T], accumulator: None = None, ) -> t.List[T]: ... @t.overload def reductions_right( collection: t.Iterable[t.Any], iteratee: t.Callable[[T], T], accumulator: None = None, ) -> t.List[T]: ... @t.overload def reductions_right( collection: t.Iterable[T], iteratee: None = None, accumulator: t.Union[T, None] = None ) -> t.List[T]: ... def reductions_right(collection, iteratee=None, accumulator=None): """ This method is like :func:`reductions` except that it iterates over elements of a `collection` from right to left. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. accumulator: Initial value of aggregator. Default is to use the result of the first iteration. Returns: Results of each reduction operation. Example: >>> reductions_right([1, 2, 3, 4], lambda total, x: total**x) [64, 4096, 4096] Note: The last element of the returned list would be the result of using :func:`reduce_`. .. versionadded:: 2.0.0 """ return reductions(collection, iteratee, accumulator, from_right=True) @t.overload def reject( collection: t.Mapping[T, T2], predicate: t.Union[t.Callable[[T2, T, t.Dict[T, T2]], t.Any], IterateeObjT, None] = None, ) -> t.List[T2]: ... @t.overload def reject( collection: t.Mapping[T, T2], predicate: t.Union[t.Callable[[T2, T], t.Any], IterateeObjT, None] = None, ) -> t.List[T2]: ... @t.overload def reject( collection: t.Mapping[t.Any, T2], predicate: t.Union[t.Callable[[T2], t.Any], IterateeObjT, None] = None, ) -> t.List[T2]: ... @t.overload def reject( collection: t.Iterable[T], predicate: t.Union[t.Callable[[T, int, t.List[T]], t.Any], IterateeObjT, None] = None, ) -> t.List[T]: ... @t.overload def reject( collection: t.Iterable[T], predicate: t.Union[t.Callable[[T, int], t.Any], IterateeObjT, None] = None, ) -> t.List[T]: ... @t.overload def reject( collection: t.Iterable[T], predicate: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None, ) -> t.List[T]: ... def reject(collection, predicate=None): """ The opposite of :func:`filter_` this method returns the elements of a collection that the predicate does **not** return truthy for. Args: collection: Collection to iterate over. predicate: Predicate applied per iteration. Returns: Rejected elements of `collection`. Example: >>> reject([1, 2, 3, 4], lambda x: x >= 3) [1, 2] >>> reject([{"a": 0}, {"a": 1}, {"a": 2}], "a") [{'a': 0}] >>> reject([{"a": 0}, {"a": 1}, {"a": 2}], {"a": 1}) [{'a': 0}, {'a': 2}] .. versionadded:: 1.0.0 """ return [value for is_true, value, _, _ in iteriteratee(collection, predicate) if not is_true] def sample(collection: t.Sequence[T]) -> T: """ Retrieves a random element from a given `collection`. Args: collection: Collection to iterate over. Returns: Random element from the given collection. Example: >>> items = [1, 2, 3, 4, 5] >>> results = sample(items) >>> assert results in items .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Moved multiple samples functionality to :func:`sample_size`. This function now only returns a single random sample. """ return random.choice(collection) def sample_size(collection: t.Sequence[T], n: t.Union[int, None] = None) -> t.List[T]: """ Retrieves list of `n` random elements from a collection. Args: collection: Collection to iterate over. n: Number of random samples to return. Returns: List of `n` sampled collection values. Examples: >>> items = [1, 2, 3, 4, 5] >>> results = sample_size(items, 2) >>> assert len(results) == 2 >>> assert set(items).intersection(results) == set(results) .. versionadded:: 4.0.0 """ num = min(n or 1, len(collection)) return random.sample(collection, num) @t.overload def shuffle(collection: t.Mapping[t.Any, T]) -> t.List[T]: ... @t.overload def shuffle(collection: t.Iterable[T]) -> t.List[T]: ... def shuffle(collection): """ Creates a list of shuffled values, using a version of the Fisher-Yates shuffle. Args: collection: Collection to iterate over. Returns: Shuffled list of values. Example: >>> items = [1, 2, 3, 4] >>> results = shuffle(items) >>> assert len(results) == len(items) >>> assert set(results) == set(items) .. versionadded:: 1.0.0 """ if isinstance(collection, dict): collection = collection.values() # Make copy of collection since random.shuffle works on list in-place. collection = list(collection) # NOTE: random.shuffle uses Fisher-Yates. random.shuffle(collection) return collection def size(collection: t.Sized) -> int: """ Gets the size of the `collection` by returning `len(collection)` for iterable objects. Args: collection: Collection to iterate over. Returns: Collection length. Example: >>> size([1, 2, 3, 4]) 4 .. versionadded:: 1.0.0 """ return len(collection) def some( collection: t.Iterable[T], predicate: t.Union[t.Callable[[T], t.Any], None] = None ) -> bool: """ Checks if the predicate returns a truthy value for any element of a collection. The predicate is invoked with three arguments: ``(value, index|key, collection)``. If a property name is passed for predicate, the created :func:`map_` style predicate will return the property value of the given element. If an object is passed for predicate, the created :func:`.matches` style predicate will return ``True`` for elements that have the properties of the given object, else ``False``. Args: collection: Collection to iterate over. predicate: Predicate applied per iteration. Returns: Whether any of the elements are truthy. Example: >>> some([False, True, 0]) True >>> some([False, 0, None]) False >>> some([1, 2, 3, 4], lambda x: x >= 3) True >>> some([1, 2, 3, 4], lambda x: x == 0) False .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Removed alias ``any_``. """ if predicate: cbk = pyd.iteratee(predicate) collection = (cbk(item) for item in collection) return any(collection) @t.overload def sort_by( collection: t.Mapping[t.Any, T2], iteratee: t.Union[t.Callable[[T2], t.Any], IterateeObjT, None] = None, reverse: bool = False, ) -> t.List[T2]: ... @t.overload def sort_by( collection: t.Iterable[T], iteratee: t.Union[t.Callable[[T], t.Any], IterateeObjT, None] = None, reverse: bool = False, ) -> t.List[T]: ... def sort_by(collection, iteratee=None, reverse=False): """ Creates a list of elements, sorted in ascending order by the results of running each element in a `collection` through the iteratee. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. reverse: Whether to reverse the sort. Defaults to ``False``. Returns: Sorted list. Example: >>> sort_by({"a": 2, "b": 3, "c": 1}) [1, 2, 3] >>> sort_by({"a": 2, "b": 3, "c": 1}, reverse=True) [3, 2, 1] >>> sort_by([{"a": 2}, {"a": 3}, {"a": 1}], "a") [{'a': 1}, {'a': 2}, {'a': 3}] .. versionadded:: 1.0.0 """ if isinstance(collection, dict): collection = collection.values() return sorted(collection, key=pyd.iteratee(iteratee), reverse=reverse) # # Utility methods not a part of the main API # def itermap( collection: t.Iterable[t.Any], iteratee: t.Union[t.Callable[..., t.Any], IterateeObjT, None] = None, ) -> t.Generator[t.Any, None, None]: """Generative mapper.""" for result in iteriteratee(collection, iteratee): yield result[0] pydash-8.0.3/src/pydash/exceptions.py000066400000000000000000000007241464745015500176420ustar00rootroot00000000000000""" Exception classes. .. versionadded:: 1.0.0 """ from __future__ import annotations __all__ = ("InvalidMethod",) # NOTE: This needs to subclass AttributeError due to compatibility with typing.Protocol and # runtime_checkable. See https://github.com/dgilland/pydash/issues/165 class InvalidMethod(AttributeError): """ Raised when an invalid pydash method is invoked through :func:`pydash.chaining.chain`. .. versionadded:: 1.0.0 """ pass pydash-8.0.3/src/pydash/functions.py000066400000000000000000001174061464745015500174770ustar00rootroot00000000000000""" Functions that wrap other functions. .. versionadded:: 1.0.0 """ from __future__ import annotations from functools import cached_property from inspect import getfullargspec import itertools import time import typing as t from typing_extensions import Concatenate, Literal, ParamSpec, Protocol import pydash as pyd from pydash.helpers import getargcount __all__ = ( "after", "ary", "before", "conjoin", "curry", "curry_right", "debounce", "delay", "disjoin", "flip", "flow", "flow_right", "iterated", "juxtapose", "negate", "once", "over_args", "partial", "partial_right", "rearg", "spread", "throttle", "unary", "wrap", ) T = t.TypeVar("T") T1 = t.TypeVar("T1") T2 = t.TypeVar("T2") T3 = t.TypeVar("T3") T4 = t.TypeVar("T4") T5 = t.TypeVar("T5") P = ParamSpec("P") class _WithArgCount(Protocol): func: t.Callable[..., t.Any] @cached_property def _argcount(self) -> t.Optional[int]: return getargcount(self.func, None) class After(_WithArgCount, t.Generic[P, T]): """Wrap a function in an after context.""" def __init__(self, func: t.Callable[P, T], n: t.SupportsInt) -> None: try: n = int(n) assert n >= 0 except (ValueError, TypeError, AssertionError): n = 0 self.n = n self.func = func def __call__(self, *args: P.args, **kwargs: P.kwargs) -> t.Union[T, None]: """Return results of :attr:`func` after :attr:`n` calls.""" self.n -= 1 if self.n <= 0: return self.func(*args, **kwargs) return None class Ary(_WithArgCount, t.Generic[T]): """Wrap a function in an ary context.""" def __init__(self, func: t.Callable[..., T], n: t.Union[t.SupportsInt, None]) -> None: try: # Type error would be caught n = int(n) # type: ignore assert n >= 0 except (ValueError, TypeError, AssertionError): n = None self.n = n self.func = func def __call__(self, *args: t.Any, **kwargs: t.Any) -> T: """ Return results of :attr:`func` with arguments capped to :attr:`n`. Only positional arguments are capped. Any number of keyword arguments are allowed. """ cut_args = args[: self.n] if self.n is not None else args return self.func(*cut_args, **kwargs) # type: ignore class Before(After[P, T], t.Generic[P, T]): """Wrap a function in a before context.""" def __call__(self, *args: P.args, **kwargs: P.kwargs) -> t.Union[T, None]: self.n -= 1 if self.n > 0: return self.func(*args, **kwargs) return None class Flow(t.Generic[P, T]): """Wrap a function in a flow context.""" @t.overload def __init__( self, func1: t.Callable[P, T2], func2: t.Callable[[T2], T3], func3: t.Callable[[T3], T4], func4: t.Callable[[T4], T5], func5: t.Callable[[T5], T], *, from_right: bool = True, ) -> None: ... @t.overload def __init__( self, func1: t.Callable[P, T2], func2: t.Callable[[T2], T3], func3: t.Callable[[T3], T4], func4: t.Callable[[T4], T], *, from_right: bool = True, ) -> None: ... @t.overload def __init__( self, func1: t.Callable[P, T2], func2: t.Callable[[T2], T3], func3: t.Callable[[T3], T], *, from_right: bool = True, ) -> None: ... @t.overload def __init__( self, func1: t.Callable[P, T2], func2: t.Callable[[T2], T], *, from_right: bool = True ) -> None: ... @t.overload def __init__(self, func1: t.Callable[P, T], *, from_right: bool = True) -> None: ... def __init__(self, *funcs, from_right: bool = True) -> None: # type: ignore self.funcs = funcs self._from_index = -1 if from_right else 0 def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T: """Return results of composing :attr:`funcs`.""" funcs = list(self.funcs) result = None while funcs: result = funcs.pop(self._from_index)(*args, **kwargs) # Incompatible type in assignements but needed here # type safety is ensured from the `__init__` signature args = (result,) # type: ignore kwargs = {} # type: ignore # type safety is ensured from the `__init__` signature return result # type: ignore @cached_property def _argcount(self) -> t.Optional[int]: return getargcount(self.funcs[self._from_index], None) class Conjoin(t.Generic[T]): """Wrap a set of functions in a conjoin context.""" def __init__(self, *funcs: t.Callable[[T], t.Any]) -> None: self.funcs = funcs def __call__(self, obj: t.Iterable[T]) -> bool: """Return result of conjoin `obj` with :attr:`funcs` predicates.""" def iteratee(item: T) -> bool: return pyd.every(self.funcs, lambda func: func(item)) return pyd.every(obj, iteratee) class Curry(t.Generic[T1, T]): """Wrap a function in a curry context.""" def __init__(self, func, arity, args=None, kwargs=None) -> None: self.func = func self.arity = len(getfullargspec(func).args) if arity is None else arity self.args = () if args is None else args self.kwargs = {} if kwargs is None else kwargs def __call__(self, *args, **kwargs): """Store `args` and `kwargs` and call :attr:`func` if we've reached or exceeded the function arity.""" args = self.compose_args(args) kwargs.update(self.kwargs) if (len(args) + len(kwargs)) >= self.arity: args_arity = self.arity - len(kwargs) args = args[: (args_arity if args_arity > 0 else 0)] curried = self.func(*args, **kwargs) else: # NOTE: Use self.__class__ so that subclasses will use their own # class to generate next iteration of call. curried = self.__class__(self.func, self.arity, args, kwargs) return curried def compose_args(self, new_args): """Combine `self.args` with `new_args` and return.""" return tuple(list(self.args) + list(new_args)) @cached_property def _argcount(self) -> t.Optional[int]: argcount = self.arity - len(self.args) - len(self.kwargs) return argcount if argcount >= 0 else None class CurryOne(Curry[T1, T]): def __call__(self, arg_one: T1) -> T: return super().__call__(arg_one) # pragma: no cover class CurryTwo(Curry[T1, CurryOne[T2, T]]): @t.overload def __call__(self, arg_one: T1) -> CurryOne[T2, T]: ... @t.overload def __call__(self, arg_one: T1, arg_two: T2) -> T: ... def __call__(self, *args, **kwargs): return super().__call__(*args, **kwargs) # pragma: no cover class CurryThree(Curry[T1, CurryTwo[T2, T3, T]]): @t.overload def __call__(self, arg_one: T1) -> CurryTwo[T2, T3, T]: ... @t.overload def __call__(self, arg_one: T1, arg_two: T2) -> CurryOne[T3, T]: ... @t.overload def __call__(self, arg_one: T1, arg_two: T2, arg_three: T3) -> T: ... def __call__(self, *args, **kwargs): return super().__call__(*args, **kwargs) # pragma: no cover class CurryFour(Curry[T1, CurryThree[T2, T3, T4, T]]): @t.overload def __call__(self, arg_one: T1) -> CurryThree[T2, T3, T4, T]: ... @t.overload def __call__(self, arg_one: T1, arg_two: T2) -> CurryTwo[T3, T4, T]: ... @t.overload def __call__(self, arg_one: T1, arg_two: T2, arg_three: T3) -> CurryOne[T4, T]: ... @t.overload def __call__(self, arg_one: T1, arg_two: T2, arg_three: T3, arg_four: T4) -> T: ... def __call__(self, *args, **kwargs): return super().__call__(*args, **kwargs) # pragma: no cover class CurryFive(Curry[T1, CurryFour[T2, T3, T4, T5, T]]): @t.overload def __call__(self, arg_one: T1) -> CurryFour[T2, T3, T4, T5, T]: ... @t.overload def __call__(self, arg_one: T1, arg_two: T2) -> CurryThree[T3, T4, T5, T]: ... @t.overload def __call__(self, arg_one: T1, arg_two: T2, arg_three: T3) -> CurryTwo[T4, T5, T]: ... @t.overload def __call__( self, arg_one: T1, arg_two: T2, arg_three: T3, arg_four: T4 ) -> CurryOne[T5, T]: ... @t.overload def __call__( self, arg_one: T1, arg_two: T2, arg_three: T3, arg_four: T4, arg_five: T5 ) -> T: ... def __call__(self, *args, **kwargs): return super().__call__(*args, **kwargs) # pragma: no cover class CurryRight(Curry[T5, T]): """Wrap a function in a curry-right context.""" def compose_args(self, new_args): return tuple(list(new_args) + list(self.args)) class CurryRightOne(CurryRight[T5, T]): def __call__(self, arg_one: T5) -> T: return super().__call__(arg_one) # pragma: no cover class CurryRightTwo(CurryRight[T5, CurryRightOne[T4, T]]): @t.overload def __call__(self, arg_one: T5) -> CurryRightOne[T4, T]: ... @t.overload def __call__(self, arg_one: T5, arg_two: T4) -> T: ... def __call__(self, *args, **kwargs): return super().__call__(*args, **kwargs) # pragma: no cover class CurryRightThree(CurryRight[T5, CurryRightTwo[T4, T3, T]]): @t.overload def __call__(self, arg_one: T5) -> CurryRightTwo[T4, T3, T]: ... @t.overload def __call__(self, arg_one: T5, arg_two: T4) -> CurryRightOne[T3, T]: ... @t.overload def __call__(self, arg_one: T5, arg_two: T4, arg_three: T3) -> T: ... def __call__(self, *args, **kwargs): return super().__call__(*args, **kwargs) # pragma: no cover class CurryRightFour(CurryRight[T5, CurryRightThree[T4, T3, T2, T]]): @t.overload def __call__(self, arg_one: T5) -> CurryRightThree[T4, T3, T2, T]: ... @t.overload def __call__(self, arg_one: T5, arg_two: T4) -> CurryRightTwo[T3, T2, T]: ... @t.overload def __call__(self, arg_one: T5, arg_two: T4, arg_three: T3) -> CurryRightOne[T2, T]: ... @t.overload def __call__(self, arg_one: T5, arg_two: T4, arg_three: T3, arg_four: T2) -> T: ... def __call__(self, *args, **kwargs): return super().__call__(*args, **kwargs) # pragma: no cover class CurryRightFive(CurryRight[T5, CurryRightFour[T4, T3, T2, T1, T]]): @t.overload def __call__(self, arg_one: T5) -> CurryRightFour[T4, T3, T2, T1, T]: ... @t.overload def __call__(self, arg_one: T5, arg_two: T4) -> CurryRightThree[T3, T2, T1, T]: ... @t.overload def __call__(self, arg_one: T5, arg_two: T4, arg_three: T3) -> CurryRightTwo[T2, T1, T]: ... @t.overload def __call__( self, arg_one: T5, arg_two: T4, arg_three: T3, arg_four: T2 ) -> CurryRightOne[T1, T]: ... @t.overload def __call__( self, arg_one: T5, arg_two: T4, arg_three: T3, arg_four: T2, arg_five: T1 ) -> T: ... def __call__(self, *args, **kwargs): return super().__call__(*args, **kwargs) # pragma: no cover class Debounce(_WithArgCount, t.Generic[P, T]): """Wrap a function in a debounce context.""" def __init__( self, func: t.Callable[P, T], wait: int, max_wait: t.Union[int, Literal[False]] = False ) -> None: self.func = func self.wait = wait self.max_wait = max_wait self.last_result: t.Union[T, None] = None # Initialize last_* times to be prior to the wait periods so that func # is primed to be executed on first call. self.last_call = pyd.now() - self.wait self.last_execution = pyd.now() - max_wait if pyd.is_number(max_wait) else None def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T: """ Execute :attr:`func` if function hasn't been called within last :attr:`wait` milliseconds or in last :attr:`max_wait` milliseconds. Return results of last successful call. """ present = pyd.now() if (present - self.last_call) >= self.wait or ( self.max_wait and (present - self.last_execution) >= self.max_wait # type: ignore ): self.last_result = self.func(*args, **kwargs) self.last_execution = present self.last_call = present # It will be set after first call, cannot be `None` anymore return self.last_result # type: ignore class Disjoin(t.Generic[T]): """Wrap a set of functions in a disjoin context.""" def __init__(self, *funcs: t.Callable[[T], t.Any]) -> None: self.funcs = funcs def __call__(self, obj: t.Iterable[T]) -> bool: """Return result of disjoin `obj` with :attr:`funcs` predicates.""" def iteratee(item: T) -> bool: return pyd.some(self.funcs, lambda func: func(item)) return pyd.some(obj, iteratee) class Flip(_WithArgCount): """Wrap a function in a flip context.""" def __init__(self, func: t.Callable[..., t.Any]) -> None: self.func = func def __call__(self, *args, **kwargs): return self.func(*reversed(args), **kwargs) class Iterated(t.Generic[T]): """Wrap a function in an iterated context.""" def __init__(self, func: t.Callable[[T], T]) -> None: self.func = func def _iteration(self, initial: T) -> t.Iterator[T]: """Iterator that composing :attr:`func` with itself.""" value = initial while True: value = self.func(value) yield value def __call__(self, initial: T, n: int) -> T: """Return value of calling :attr:`func` `n` times using `initial` as seed value.""" value = initial iteration = self._iteration(value) for _ in range(n): value = next(iteration) return value class Juxtapose(t.Generic[P, T]): """Wrap a function in a juxtapose context.""" def __init__(self, *funcs: t.Callable[P, T]) -> None: self.funcs = funcs def __call__(self, *objs: P.args, **kwargs: P.kwargs) -> t.List[T]: return [func(*objs, **kwargs) for func in self.funcs] @cached_property def _argcount(self) -> t.Optional[int]: return getargcount(self.funcs[0], None) if self.funcs else None class OverArgs(_WithArgCount): """Wrap a function in an over_args context.""" def __init__(self, func: t.Callable[..., t.Any], *transforms: t.Callable[..., t.Any]) -> None: self.func = func self.transforms = pyd.flatten(transforms) def __call__(self, *args): args = (self.transforms[idx](args) for idx, args in enumerate(args)) return self.func(*args) class Negate(_WithArgCount, t.Generic[P]): """Wrap a function in a negate context.""" def __init__(self, func: t.Callable[P, t.Any]) -> None: self.func = func def __call__(self, *args: P.args, **kwargs: P.kwargs) -> bool: """Return negated results of calling :attr:`func`.""" return not self.func(*args, **kwargs) class Once(_WithArgCount, t.Generic[P, T]): """Wrap a function in a once context.""" def __init__(self, func: t.Callable[P, T]) -> None: self.func = func self.result: t.Union[T, None] = None self.called = False def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T: """Return results from the first call of :attr:`func`.""" if not self.called: self.result = self.func(*args, **kwargs) self.called = True # At this point the result will be set, cannot be `None` anymore return self.result # type: ignore class Partial(_WithArgCount, t.Generic[T]): """Wrap a function in a partial context.""" def __init__( self, func: t.Callable[..., T], args: t.Any, kwargs: t.Any = None, from_right: bool = False ) -> None: self.func = func self.args = args self.kwargs = kwargs or {} self.from_right = from_right def __call__(self, *args: t.Any, **kwargs: t.Any) -> T: """ Return results from :attr:`func` with :attr:`args` + `args`. Apply arguments from left or right depending on :attr:`from_right`. """ if self.from_right: args = itertools.chain(args, self.args) # type: ignore else: args = itertools.chain(self.args, args) # type: ignore kwargs = {**self.kwargs, **kwargs} return self.func(*args, **kwargs) @cached_property def _argcount(self) -> t.Optional[int]: func_argcount = getargcount(self.func, None) if func_argcount is None: return None argcount = func_argcount - len(self.args) - len(self.kwargs) return argcount if argcount >= 0 else None class Rearg(_WithArgCount, t.Generic[P, T]): """Wrap a function in a rearg context.""" def __init__(self, func: t.Callable[P, T], *indexes: int) -> None: self.func = func # Index `indexes` by the index value, so we can do a lookup mapping by walking the function # arguments. self.indexes = { src_index: dest_index for dest_index, src_index in enumerate(pyd.flatten(indexes)) } def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T: """Return results from :attr:`func` using rearranged arguments.""" reargs = {} rest = [] # Walk arguments to ensure each one is added to the final argument list. for src_index, arg in enumerate(args): # NOTE: dest_index will range from 0 to len(indexes). dest_index = self.indexes.get(src_index) if dest_index is not None: # Remap argument index. reargs[dest_index] = arg else: # Argumnet index is not contained in `indexes` so stick in the back. rest.append(arg) args = itertools.chain((reargs[key] for key in sorted(reargs)), rest) # type: ignore return self.func(*args, **kwargs) class Spread(t.Generic[T]): """Wrap a function in a spread context.""" def __init__(self, func: t.Callable[..., T]) -> None: self.func = func def __call__(self, args: t.Iterable[t.Any]) -> T: """Return results from :attr:`func` using array of `args` provided.""" return self.func(*args) class Throttle(_WithArgCount, t.Generic[P, T]): """Wrap a function in a throttle context.""" def __init__(self, func: t.Callable[P, T], wait: int) -> None: self.func = func self.wait = wait self.last_result: t.Union[T, None] = None self.last_execution = pyd.now() - self.wait def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T: """ Execute :attr:`func` if function hasn't been called within last :attr:`wait` milliseconds. Return results of last successful call. """ present = pyd.now() if (present - self.last_execution) >= self.wait: self.last_result = self.func(*args, **kwargs) self.last_execution = present # The last result will be filled on first execution, so it is always `T` return self.last_result # type: ignore def after(func: t.Callable[P, T], n: t.SupportsInt) -> After[P, T]: """ Creates a function that executes `func`, with the arguments of the created function, only after being called `n` times. Args: func: Function to execute. n: Number of times `func` must be called before it is executed. Returns: Function wrapped in an :class:`After` context. Example: >>> func = lambda a, b, c: (a, b, c) >>> after_func = after(func, 3) >>> after_func(1, 2, 3) >>> after_func(1, 2, 3) >>> after_func(1, 2, 3) (1, 2, 3) >>> after_func(4, 5, 6) (4, 5, 6) .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0 Reordered arguments to make `func` first. """ return After(func, n) def ary(func: t.Callable[..., T], n: t.Union[t.SupportsInt, None]) -> Ary[T]: """ Creates a function that accepts up to `n` arguments ignoring any additional arguments. Only positional arguments are capped. All keyword arguments are allowed through. Args: func: Function to cap arguments for. n: Number of arguments to accept. Returns: Function wrapped in an :class:`Ary` context. Example: >>> func = lambda a, b, c=0, d=5: (a, b, c, d) >>> ary_func = ary(func, 2) >>> ary_func(1, 2, 3, 4, 5, 6) (1, 2, 0, 5) >>> ary_func(1, 2, 3, 4, 5, 6, c=10, d=20) (1, 2, 10, 20) .. versionadded:: 3.0.0 """ return Ary(func, n) def before(func: t.Callable[P, T], n: t.SupportsInt) -> Before[P, T]: """ Creates a function that executes `func`, with the arguments of the created function, until it has been called `n` times. Args: func: Function to execute. n: Number of times `func` may be executed. Returns: Function wrapped in an :class:`Before` context. Example: >>> func = lambda a, b, c: (a, b, c) >>> before_func = before(func, 3) >>> before_func(1, 2, 3) (1, 2, 3) >>> before_func(1, 2, 3) (1, 2, 3) >>> before_func(1, 2, 3) >>> before_func(1, 2, 3) .. versionadded:: 1.1.0 .. versionchanged:: 3.0.0 Reordered arguments to make `func` first. """ return Before(func, n) def conjoin(*funcs: t.Callable[[T], t.Any]) -> t.Callable[[t.Iterable[T]], bool]: """ Creates a function that composes multiple predicate functions into a single predicate that tests whether **all** elements of an object pass each predicate. Args: *funcs: Function(s) to conjoin. Returns: Function(s) wrapped in a :class:`Conjoin` context. Example: >>> conjoiner = conjoin(lambda x: isinstance(x, int), lambda x: x > 3) >>> conjoiner([1, 2, 3]) False >>> conjoiner([1.0, 2, 1]) False >>> conjoiner([4.0, 5, 6]) False >>> conjoiner([4, 5, 6]) True .. versionadded:: 2.0.0 """ return Conjoin(*funcs) @t.overload def curry(func: t.Callable[[T1], T], arity: t.Union[int, None] = None) -> CurryOne[T1, T]: ... @t.overload def curry( func: t.Callable[[T1, T2], T], arity: t.Union[int, None] = None ) -> CurryTwo[T1, T2, T]: ... @t.overload def curry( func: t.Callable[[T1, T2, T3], T], arity: t.Union[int, None] = None ) -> CurryThree[T1, T2, T3, T]: ... @t.overload def curry( func: t.Callable[[T1, T2, T3, T4], T], arity: t.Union[int, None] = None ) -> CurryFour[T1, T2, T3, T4, T]: ... @t.overload def curry( func: t.Callable[[T1, T2, T3, T4, T5], T], arity: t.Union[int, None] = None ) -> CurryFive[T1, T2, T3, T4, T5, T]: ... def curry(func, arity=None): """ Creates a function that accepts one or more arguments of `func` that when invoked either executes `func` returning its result (if all `func` arguments have been provided) or returns a function that accepts one or more of the remaining `func` arguments, and so on. Args: func: Function to curry. arity: Number of function arguments that can be accepted by curried function. Default is to use the number of arguments that are accepted by `func`. Returns: Function wrapped in a :class:`Curry` context. Example: >>> func = lambda a, b, c: (a, b, c) >>> currier = curry(func) >>> currier = currier(1) >>> assert isinstance(currier, Curry) >>> currier = currier(2) >>> assert isinstance(currier, Curry) >>> currier = currier(3) >>> currier (1, 2, 3) .. versionadded:: 1.0.0 """ return Curry(func, arity) @t.overload def curry_right( func: t.Callable[[T1], T], arity: t.Union[int, None] = None ) -> CurryRightOne[T1, T]: ... @t.overload def curry_right( func: t.Callable[[T1, T2], T], arity: t.Union[int, None] = None ) -> CurryRightTwo[T2, T1, T]: ... @t.overload def curry_right( func: t.Callable[[T1, T2, T3], T], arity: t.Union[int, None] = None ) -> CurryRightThree[T3, T2, T1, T]: ... @t.overload def curry_right( func: t.Callable[[T1, T2, T3, T4], T], arity: t.Union[int, None] = None ) -> CurryRightFour[T4, T3, T2, T1, T]: ... @t.overload def curry_right( func: t.Callable[[T1, T2, T3, T4, T5], T], ) -> CurryRightFive[T5, T4, T3, T2, T1, T]: ... def curry_right(func, arity=None): """ This method is like :func:`curry` except that arguments are applied to `func` in the manner of :func:`partial_right` instead of :func:`partial`. Args: func: Function to curry. arity: Number of function arguments that can be accepted by curried function. Default is to use the number of arguments that are accepted by `func`. Returns: Function wrapped in a :class:`CurryRight` context. Example: >>> func = lambda a, b, c: (a, b, c) >>> currier = curry_right(func) >>> currier = currier(1) >>> assert isinstance(currier, CurryRight) >>> currier = currier(2) >>> assert isinstance(currier, CurryRight) >>> currier = currier(3) >>> currier (3, 2, 1) .. versionadded:: 1.1.0 """ return CurryRight(func, arity) def debounce( func: t.Callable[P, T], wait: int, max_wait: t.Union[int, Literal[False]] = False ) -> Debounce[P, T]: """ Creates a function that will delay the execution of `func` until after `wait` milliseconds have elapsed since the last time it was invoked. Subsequent calls to the debounced function will return the result of the last `func` call. Args: func: Function to execute. wait: Milliseconds to wait before executing `func`. max_wait (optional): Maximum time to wait before executing `func`. Returns: Function wrapped in a :class:`Debounce` context. .. versionadded:: 1.0.0 """ return Debounce(func, wait, max_wait=max_wait) def delay(func: t.Callable[P, T], wait: int, *args: "P.args", **kwargs: "P.kwargs") -> T: """ Executes the `func` function after `wait` milliseconds. Additional arguments will be provided to `func` when it is invoked. Args: func: Function to execute. wait: Milliseconds to wait before executing `func`. *args: Arguments to pass to `func`. **kwargs: Keyword arguments to pass to `func`. Returns: Return from `func`. .. versionadded:: 1.0.0 """ time.sleep(wait / 1000.0) return func(*args, **kwargs) def disjoin(*funcs: t.Callable[[T], t.Any]) -> Disjoin[T]: """ Creates a function that composes multiple predicate functions into a single predicate that tests whether **any** elements of an object pass each predicate. Args: *funcs: Function(s) to disjoin. Returns: Function(s) wrapped in a :class:`Disjoin` context. Example: >>> disjoiner = disjoin(lambda x: isinstance(x, float),\ lambda x: isinstance(x, int)) >>> disjoiner([1, '2', '3']) True >>> disjoiner([1.0, '2', '3']) True >>> disjoiner(['1', '2', '3']) False .. versionadded:: 2.0.0 """ return Disjoin(*funcs) @t.overload def flip(func: t.Callable[[T1, T2, T3, T4, T5], T]) -> t.Callable[[T5, T4, T3, T2, T1], T]: ... @t.overload def flip(func: t.Callable[[T1, T2, T3, T4], T]) -> t.Callable[[T4, T3, T2, T1], T]: ... @t.overload def flip(func: t.Callable[[T1, T2, T3], T]) -> t.Callable[[T3, T2, T1], T]: ... @t.overload def flip(func: t.Callable[[T1, T2], T]) -> t.Callable[[T2, T1], T]: ... @t.overload def flip(func: t.Callable[[T1], T]) -> t.Callable[[T1], T]: ... def flip(func: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: """ Creates a function that invokes the method with arguments reversed. Args: func: Function to flip arguments for. Returns: Function wrapped in a :class:`Flip` context. Example: >>> flipped = flip(lambda *args: args) >>> flipped(1, 2, 3, 4) (4, 3, 2, 1) >>> flipped = flip(lambda *args: [i * 2 for i in args]) >>> flipped(1, 2, 3, 4) [8, 6, 4, 2] .. versionadded:: 4.0.0 """ return Flip(func) @t.overload def flow( func1: t.Callable[P, T2], func2: t.Callable[[T2], T3], func3: t.Callable[[T3], T4], func4: t.Callable[[T4], T5], func5: t.Callable[[T5], T], ) -> Flow[P, T]: ... @t.overload def flow( func1: t.Callable[P, T2], func2: t.Callable[[T2], T3], func3: t.Callable[[T3], T4], func4: t.Callable[[T4], T], ) -> Flow[P, T]: ... @t.overload def flow( func1: t.Callable[P, T2], func2: t.Callable[[T2], T3], func3: t.Callable[[T3], T], ) -> Flow[P, T]: ... @t.overload def flow(func1: t.Callable[P, T2], func2: t.Callable[[T2], T]) -> Flow[P, T]: ... @t.overload def flow(func1: t.Callable[P, T]) -> Flow[P, T]: ... def flow(*funcs): """ Creates a function that is the composition of the provided functions, where each successive invocation is supplied the return value of the previous. For example, composing the functions ``f()``, ``g()``, and ``h()`` produces ``h(g(f()))``. Args: *funcs: Function(s) to compose. Returns: Function(s) wrapped in a :class:`Flow` context. Example: >>> mult_5 = lambda x: x * 5 >>> div_10 = lambda x: x / 10.0 >>> pow_2 = lambda x: x**2 >>> ops = flow(sum, mult_5, div_10, pow_2) >>> ops([1, 2, 3, 4]) 25.0 .. versionadded:: 2.0.0 .. versionchanged:: 2.3.1 Added :func:`pipe` as alias. .. versionchanged:: 4.0.0 Removed alias ``pipe``. """ return Flow(*funcs, from_right=False) @t.overload def flow_right( func5: t.Callable[[T4], T], func4: t.Callable[[T3], T4], func3: t.Callable[[T2], T3], func2: t.Callable[[T1], T2], func1: t.Callable[P, T1], ) -> Flow[P, T]: ... @t.overload def flow_right( func4: t.Callable[[T3], T], func3: t.Callable[[T2], T3], func2: t.Callable[[T1], T2], func1: t.Callable[P, T1], ) -> Flow[P, T]: ... @t.overload def flow_right( func3: t.Callable[[T2], T], func2: t.Callable[[T1], T2], func1: t.Callable[P, T1], ) -> Flow[P, T]: ... @t.overload def flow_right(func2: t.Callable[[T1], T], func1: t.Callable[P, T1]) -> Flow[P, T]: ... @t.overload def flow_right(func1: t.Callable[P, T]) -> Flow[P, T]: ... def flow_right(*funcs): """ This function is like :func:`flow` except that it creates a function that invokes the provided functions from right to left. For example, composing the functions ``f()``, ``g()``, and ``h()`` produces ``f(g(h()))``. Args: *funcs: Function(s) to compose. Returns: Function(s) wrapped in a :class:`Flow` context. Example: >>> mult_5 = lambda x: x * 5 >>> div_10 = lambda x: x / 10.0 >>> pow_2 = lambda x: x**2 >>> ops = flow_right(mult_5, div_10, pow_2, sum) >>> ops([1, 2, 3, 4]) 50.0 .. versionadded:: 1.0.0 .. versionchanged:: 2.0.0 Added :func:`flow_right` and made :func:`compose` an alias. .. versionchanged:: 2.3.1 Added :func:`pipe_right` as alias. .. versionchanged:: 4.0.0 Removed aliases ``pipe_right`` and ``compose``. """ return Flow(*funcs, from_right=True) def iterated(func: t.Callable[[T], T]) -> Iterated[T]: """ Creates a function that is composed with itself. Each call to the iterated function uses the previous function call's result as input. Returned :class:`Iterated` instance can be called with ``(initial, n)`` where `initial` is the initial value to seed `func` with and `n` is the number of times to call `func`. Args: func: Function to iterate. Returns: Function wrapped in a :class:`Iterated` context. Example: >>> doubler = iterated(lambda x: x * 2) >>> doubler(4, 5) 128 >>> doubler(3, 9) 1536 .. versionadded:: 2.0.0 """ return Iterated(func) def juxtapose(*funcs: t.Callable[P, T]) -> Juxtapose[P, T]: """ Creates a function whose return value is a list of the results of calling each `funcs` with the supplied arguments. Args: *funcs: Function(s) to juxtapose. Returns: Function wrapped in a :class:`Juxtapose` context. Example: >>> double = lambda x: x * 2 >>> triple = lambda x: x * 3 >>> quadruple = lambda x: x * 4 >>> juxtapose(double, triple, quadruple)(5) [10, 15, 20] .. versionadded:: 2.0.0 """ return Juxtapose(*funcs) def negate(func: t.Callable[P, t.Any]) -> Negate[P]: """ Creates a function that negates the result of the predicate `func`. The `func` function is executed with the arguments of the created function. Args: func: Function to negate execute. Returns: Function wrapped in a :class:`Negate` context. Example: >>> not_is_number = negate(lambda x: isinstance(x, (int, float))) >>> not_is_number(1) False >>> not_is_number("1") True .. versionadded:: 1.1.0 """ return Negate(func) def once(func: t.Callable[P, T]) -> Once[P, T]: """ Creates a function that is restricted to execute `func` once. Repeat calls to the function will return the value of the first call. Args: func: Function to execute. Returns: Function wrapped in a :class:`Once` context. Example: >>> oncer = once(lambda *args: args[0]) >>> oncer(5) 5 >>> oncer(6) 5 .. versionadded:: 1.0.0 """ return Once(func) @t.overload def over_args( func: t.Callable[[T1, T2, T3, T4, T5], T], transform_one: t.Callable[[T1], T1], transform_two: t.Callable[[T2], T2], transform_three: t.Callable[[T3], T3], transform_four: t.Callable[[T4], T4], transform_five: t.Callable[[T5], T5], ) -> t.Callable[[T1, T2, T3, T4, T5], T]: ... @t.overload def over_args( func: t.Callable[[T1, T2, T3, T4], T], transform_one: t.Callable[[T1], T1], transform_two: t.Callable[[T2], T2], transform_three: t.Callable[[T3], T3], transform_four: t.Callable[[T4], T4], ) -> t.Callable[[T1, T2, T3, T4], T]: ... @t.overload def over_args( func: t.Callable[[T1, T2, T3], T], transform_one: t.Callable[[T1], T1], transform_two: t.Callable[[T2], T2], transform_three: t.Callable[[T3], T3], ) -> t.Callable[[T1, T2, T3], T]: ... @t.overload def over_args( func: t.Callable[[T1, T2], T], transform_one: t.Callable[[T1], T1], transform_two: t.Callable[[T2], T2], ) -> t.Callable[[T1, T2], T]: ... @t.overload def over_args( func: t.Callable[[T1], T], transform_one: t.Callable[[T1], T1], ) -> t.Callable[[T1], T]: ... def over_args(func, *transforms): """ Creates a function that runs each argument through a corresponding transform function. Args: func: Function to wrap. *transforms: Functions to transform arguments, specified as individual functions or lists of functions. Returns: Function wrapped in a :class:`OverArgs` context. Example: >>> squared = lambda x: x**2 >>> double = lambda x: x * 2 >>> modder = over_args(lambda x, y: [x, y], squared, double) >>> modder(5, 10) [25, 20] .. versionadded:: 3.3.0 .. versionchanged:: 4.0.0 Renamed from ``mod_args`` to ``over_args``. """ return OverArgs(func, *transforms) def partial(func: t.Callable[..., T], *args: t.Any, **kwargs: t.Any) -> Partial[T]: """ Creates a function that, when called, invokes `func` with any additional partial arguments prepended to those provided to the new function. Args: func: Function to execute. *args: Partial arguments to prepend to function call. **kwargs: Partial keyword arguments to bind to function call. Returns: Function wrapped in a :class:`Partial` context. Example: >>> dropper = partial(lambda array, n: array[n:], [1, 2, 3, 4]) >>> dropper(2) [3, 4] >>> dropper(1) [2, 3, 4] >>> myrest = partial(lambda array, n: array[n:], n=1) >>> myrest([1, 2, 3, 4]) [2, 3, 4] .. versionadded:: 1.0.0 """ return Partial(func, args, kwargs) def partial_right(func: t.Callable[..., T], *args: t.Any, **kwargs: t.Any) -> Partial[T]: """ This method is like :func:`partial` except that partial arguments are appended to those provided to the new function. Args: func: Function to execute. *args: Partial arguments to append to function call. **kwargs: Partial keyword arguments to bind to function call. Returns: Function wrapped in a :class:`Partial` context. Example: >>> myrest = partial_right(lambda array, n: array[n:], 1) >>> myrest([1, 2, 3, 4]) [2, 3, 4] .. versionadded:: 1.0.0 """ return Partial(func, args, kwargs, from_right=True) def rearg(func: t.Callable[P, T], *indexes: int) -> Rearg[P, T]: """ Creates a function that invokes `func` with arguments arranged according to the specified indexes where the argument value at the first index is provided as the first argument, the argument value at the second index is provided as the second argument, and so on. Args: func: Function to rearrange arguments for. *indexes: The arranged argument indexes. Returns: Function wrapped in a :class:`Rearg` context. Example: >>> jumble = rearg(lambda *args: args, 1, 2, 3) >>> jumble(1, 2, 3) (2, 3, 1) >>> jumble("a", "b", "c", "d", "e") ('b', 'c', 'd', 'a', 'e') .. versionadded:: 3.0.0 """ return Rearg(func, *indexes) def spread(func: t.Callable[..., T]) -> Spread[T]: """ Creates a function that invokes `func` with the array of arguments provided to the created function. Args: func: Function to spread. Returns: Function wrapped in a :class:`Spread` context. Example: >>> greet = spread(lambda *people: "Hello " + ", ".join(people) + "!") >>> greet(["Mike", "Don", "Leo"]) 'Hello Mike, Don, Leo!' .. versionadded:: 3.1.0 """ return Spread(func) def throttle(func: t.Callable[P, T], wait: int) -> Throttle[P, T]: """ Creates a function that, when executed, will only call the `func` function at most once per every `wait` milliseconds. Subsequent calls to the throttled function will return the result of the last `func` call. Args: func: Function to throttle. wait: Milliseconds to wait before calling `func` again. Returns: Results of last `func` call. .. versionadded:: 1.0.0 """ return Throttle(func, wait) def unary(func: t.Callable[..., T]) -> Ary[T]: """ Creates a function that accepts up to one argument, ignoring any additional arguments. Args: func: Function to cap arguments for. Returns: Function wrapped in an :class:`Ary` context. Example: >>> func = lambda a, b=1, c=0, d=5: (a, b, c, d) >>> unary_func = unary(func) >>> unary_func(1, 2, 3, 4, 5, 6) (1, 1, 0, 5) >>> unary_func(1, 2, 3, 4, 5, 6, b=0, c=10, d=20) (1, 0, 10, 20) .. versionadded:: 4.0.0 """ return Ary(func, 1) def wrap(value: T1, func: t.Callable[Concatenate[T1, P], T]) -> Partial[T]: """ Creates a function that provides value to the wrapper function as its first argument. Additional arguments provided to the function are appended to those provided to the wrapper function. Args: value: Value provided as first argument to function call. func: Function to execute. Returns: Function wrapped in a :class:`Partial` context. Example: >>> wrapper = wrap("hello", lambda *args: args) >>> wrapper(1, 2) ('hello', 1, 2) .. versionadded:: 1.0.0 """ return Partial(func, (value,)) pydash-8.0.3/src/pydash/helpers.py000066400000000000000000000224761464745015500171330ustar00rootroot00000000000000"""Generic utility methods not part of main API.""" from __future__ import annotations import builtins from collections.abc import Hashable, Iterable, Mapping, Sequence from decimal import Decimal from functools import wraps import inspect import operator import warnings import pydash as pyd #: Singleton object that differentiates between an explicit ``None`` value and an unset value. #: As a class so it has its own type class Unset: ... UNSET = Unset() #: Tuple of number types. NUMBER_TYPES = (int, float, Decimal) #: Dictionary of builtins with keys as the builtin function and values as the string name. BUILTINS = {value: key for key, value in builtins.__dict__.items() if isinstance(value, Hashable)} #: Object keys that are restricted from access via path access. RESTRICTED_KEYS = ("__globals__", "__builtins__") def callit(iteratee, *args, **kwargs): """Inspect argspec of `iteratee` function and only pass the supported arguments when calling it.""" maxargs = len(args) argcount = kwargs["argcount"] if "argcount" in kwargs else getargcount(iteratee, maxargs) argstop = min([maxargs, argcount]) return iteratee(*args[:argstop]) def getargcount(iteratee, maxargs): """Return argument count of iteratee function.""" if hasattr(iteratee, "_argcount"): # Optimization feature where argcount of iteratee is known and properly # set by initiator. # It should always be right, but it can be `None` for the function wrappers # in `pydash.function` as the wrapped functions are out of our control and # can support an unknown number of arguments. argcount = iteratee._argcount return argcount if argcount is not None else maxargs if isinstance(iteratee, type) or pyd.is_builtin(iteratee): # Only pass single argument to type iteratees or builtins. argcount = 1 else: argcount = 1 try: argcount = _getargcount(iteratee, maxargs) except TypeError: # pragma: no cover pass return argcount def _getargcount(iteratee, maxargs): argcount = None try: # PY2: inspect.signature was added in Python 3. # Try to use inspect.signature when possible since it works better for our purpose of # getting the iteratee argcount since it takes into account the "self" argument in callable # classes. sig = inspect.signature(iteratee) except (TypeError, ValueError, AttributeError): # pragma: no cover pass else: if not any( param.kind == inspect.Parameter.VAR_POSITIONAL for param in sig.parameters.values() ): argcount = len(sig.parameters) if argcount is None: # Signatures were added these operator methods in Python 3.12.3 and 3.11.9 but their # instance objects are incorrectly reported as accepting varargs when they only accept a # single argument. if isinstance(iteratee, (operator.itemgetter, operator.attrgetter, operator.methodcaller)): argcount = 1 else: argspec = inspect.getfullargspec(iteratee) if argspec and not argspec.varargs: # pragma: no cover # Use inspected arg count. argcount = len(argspec.args) if argcount is None: # Assume all args are handleable. argcount = maxargs return argcount def iteriteratee(obj, iteratee=None, reverse=False): """Return iterative iteratee based on collection type.""" if iteratee is None: cbk = pyd.identity argcount = 1 else: cbk = pyd.iteratee(iteratee) argcount = getargcount(cbk, maxargs=3) items = iterator(obj) if reverse: items = reversed(tuple(items)) for key, item in items: yield callit(cbk, item, key, obj, argcount=argcount), item, key, obj def iterator(obj): """Return iterative based on object type.""" if isinstance(obj, Mapping): return obj.items() elif hasattr(obj, "iteritems"): return obj.iteritems() # noqa: B301 elif hasattr(obj, "items"): return iter(obj.items()) elif isinstance(obj, Iterable): return enumerate(obj) else: return getattr(obj, "__dict__", {}).items() def base_get(obj, key, default=UNSET): """ Safely get an item by `key` from a sequence or mapping object when `default` provided. Args: obj: Sequence or mapping to retrieve item from. key: Key or index identifying which item to retrieve. default: Default value to return if `key` not found in `obj`. Returns: `obj[key]`, `obj.key`, or `default`. Raises: KeyError: If `obj` is missing key, index, or attribute and no default value provided. """ if isinstance(obj, dict): value = _base_get_dict(obj, key, default=default) elif not isinstance(obj, (Mapping, Sequence)) or ( isinstance(obj, tuple) and hasattr(obj, "_fields") ): # Don't use getattr for dict/list objects since we don't want class methods/attributes # returned for them but do allow getattr for namedtuple. value = _base_get_object(obj, key, default=default) else: value = _base_get_item(obj, key, default=default) if value is UNSET: # Raise if there's no default provided. raise KeyError(f'Object "{repr(obj)}" does not have key "{key}"') return value def _base_get_dict(obj, key, default=UNSET): value = obj.get(key, UNSET) if value is UNSET: value = default if not isinstance(key, int): # Try integer key fallback. try: value = obj.get(int(key), default) except Exception: pass return value def _base_get_item(obj, key, default=UNSET): try: return obj[key] except Exception: pass if not isinstance(key, int): try: return obj[int(key)] except Exception: pass return default def _base_get_object(obj, key, default=UNSET): value = _base_get_item(obj, key, default=UNSET) if value is UNSET: _raise_if_restricted_key(key) value = default try: value = getattr(obj, key) except Exception: pass return value def _raise_if_restricted_key(key): # Prevent access to restricted keys for security reasons. if key in RESTRICTED_KEYS: raise KeyError(f"access to restricted key {key!r} is not allowed") def base_set(obj, key, value, allow_override=True): """ Set an object's `key` to `value`. If `obj` is a ``list`` and the `key` is the next available index position, append to list; otherwise, pad the list of ``None`` and then append to the list. Args: obj: Object to assign value to. key: Key or index to assign to. value: Value to assign. allow_override: Whether to allow overriding a previously set key. """ if isinstance(obj, dict): if allow_override or key not in obj: obj[key] = value elif isinstance(obj, list): key = int(key) if key < len(obj): if allow_override: obj[key] = value else: if key > len(obj): # Pad list object with None values up to the index key, so we can append the value # into the key index. obj[:] = (obj + [None] * key)[:key] obj.append(value) elif (allow_override or not hasattr(obj, key)) and obj is not None: _raise_if_restricted_key(key) setattr(obj, key, value) return obj def cmp(a, b): # pragma: no cover """ Replacement for built-in function ``cmp`` that was removed in Python 3. Note: Mainly used for comparison during sorting. """ if a is None and b is None: return 0 elif a is None: return -1 elif b is None: return 1 return (a > b) - (a < b) def parse_iteratee(iteratee_keyword, *args, **kwargs): """Try to find iteratee function passed in either as a keyword argument or as the last positional argument in `args`.""" iteratee = kwargs.get(iteratee_keyword) last_arg = args[-1] if iteratee is None and ( callable(last_arg) or isinstance(last_arg, (dict, str)) or last_arg is None ): iteratee = last_arg args = args[:-1] return iteratee, args class iterator_with_default(object): """A wrapper around an iterator object that provides a default.""" def __init__(self, collection, default): self.iter = iter(collection) self.default = default def __iter__(self): return self def next_default(self): ret = self.default self.default = UNSET return ret def __next__(self): ret = next(self.iter, self.next_default()) if ret is UNSET: raise StopIteration return ret next = __next__ def deprecated(func): # pragma: no cover """ This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used. """ @wraps(func) def wrapper(*args, **kwargs): warnings.warn( f"Call to deprecated function {func.__name__}.", category=DeprecationWarning, stacklevel=3, ) return func(*args, **kwargs) return wrapper pydash-8.0.3/src/pydash/numerical.py000066400000000000000000000643731464745015500174520ustar00rootroot00000000000000""" Numerical/mathematical related functions. .. versionadded:: 2.1.0 """ from __future__ import annotations import math import operator import typing as t import pydash as pyd from .helpers import UNSET, Unset, iterator, iterator_with_default, iteriteratee from .types import IterateeObjT, NumberNoDecimalT, NumberT, SupportsMul, SupportsRound if t.TYPE_CHECKING: from decimal import Decimal # pragma: no cover from _typeshed import SupportsAdd, SupportsRichComparisonT, SupportsSub # pragma: no cover __all__ = ( "add", "ceil", "clamp", "divide", "floor", "max_", "max_by", "mean", "mean_by", "median", "min_", "min_by", "moving_mean", "multiply", "power", "round_", "scale", "slope", "std_deviation", "sum_", "sum_by", "subtract", "transpose", "variance", "zscore", ) T = t.TypeVar("T") T2 = t.TypeVar("T2") T3 = t.TypeVar("T3") INFINITY = float("inf") @t.overload def add(a: "SupportsAdd[T, T2]", b: T) -> T2: ... @t.overload def add(a: T, b: "SupportsAdd[T, T2]") -> T2: ... def add(a, b): """ Adds two numbers. Args: a: First number to add. b: Second number to add. Returns: number Example: >>> add(10, 5) 15 .. versionadded:: 2.1.0 .. versionchanged:: 3.3.0 Support adding two numbers when passed as positional arguments. .. versionchanged:: 4.0.0 Only support two argument addition. """ return a + b @t.overload def sum_(collection: t.Mapping[t.Any, "SupportsAdd[int, T]"]) -> T: ... @t.overload def sum_(collection: t.Iterable["SupportsAdd[int, T]"]) -> T: ... def sum_(collection): """ Sum each element in `collection`. Args: collection: Collection to process or first number to add. Returns: Result of summation. Example: >>> sum_([1, 2, 3, 4]) 10 .. versionadded:: 2.1.0 .. versionchanged:: 3.3.0 Support adding two numbers when passed as positional arguments. .. versionchanged:: 4.0.0 Move iteratee support to :func:`sum_by`. Move two argument addition to :func:`add`. """ return sum_by(collection) @t.overload def sum_by( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T, t.Dict[T, T2]], "SupportsAdd[int, T3]"], ) -> T3: ... @t.overload def sum_by( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T], "SupportsAdd[int, T3]"] ) -> T3: ... @t.overload def sum_by( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2], "SupportsAdd[int, T3]"] ) -> T3: ... @t.overload def sum_by( collection: t.Iterable[T], iteratee: t.Callable[[T, int, t.List[T]], "SupportsAdd[int, T2]"] ) -> T2: ... @t.overload def sum_by( collection: t.Iterable[T], iteratee: t.Callable[[T, int], "SupportsAdd[int, T2]"] ) -> T2: ... @t.overload def sum_by(collection: t.Iterable[T], iteratee: t.Callable[[T], "SupportsAdd[int, T2]"]) -> T2: ... @t.overload def sum_by(collection: t.Mapping[t.Any, "SupportsAdd[int, T]"], iteratee: None = None) -> T: ... @t.overload def sum_by(collection: t.Iterable["SupportsAdd[int, T]"], iteratee: None = None) -> T: ... def sum_by(collection, iteratee=None): """ Sum each element in `collection`. If iteratee is passed, each element of `collection` is passed through an iteratee before the summation is computed. Args: collection: Collection to process or first number to add. iteratee: Iteratee applied per iteration or second number to add. Returns: Result of summation. Example: >>> sum_by([1, 2, 3, 4], lambda x: x**2) 30 .. versionadded:: 4.0.0 """ return sum(result[0] for result in iteriteratee(collection, iteratee)) @t.overload def mean(collection: t.Mapping[t.Any, "SupportsAdd[int, t.Any]"]) -> float: ... @t.overload def mean(collection: t.Iterable["SupportsAdd[int, t.Any]"]) -> float: ... def mean(collection): """ Calculate arithmetic mean of each element in `collection`. Args: collection: Collection to process. Returns: Result of mean. Example: >>> mean([1, 2, 3, 4]) 2.5 .. versionadded:: 2.1.0 .. versionchanged:: 4.0.0 - Removed ``average`` and ``avg`` aliases. - Moved iteratee functionality to :func:`mean_by`. """ return mean_by(collection) @t.overload def mean_by( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T, t.Dict[T, T2]], "SupportsAdd[int, t.Any]"], ) -> float: ... @t.overload def mean_by( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T], "SupportsAdd[int, t.Any]"] ) -> float: ... @t.overload def mean_by( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2], "SupportsAdd[int, t.Any]"] ) -> float: ... @t.overload def mean_by( collection: t.Iterable[T], iteratee: t.Callable[[T, int, t.List[T]], "SupportsAdd[int, t.Any]"] ) -> float: ... @t.overload def mean_by( collection: t.Iterable[T], iteratee: t.Callable[[T, int], "SupportsAdd[int, t.Any]"] ) -> float: ... @t.overload def mean_by( collection: t.Iterable[T], iteratee: t.Callable[[T], "SupportsAdd[int, t.Any]"] ) -> float: ... @t.overload def mean_by( collection: t.Mapping[t.Any, "SupportsAdd[int, t.Any]"], iteratee: None = None ) -> float: ... @t.overload def mean_by(collection: t.Iterable["SupportsAdd[int, t.Any]"], iteratee: None = None) -> float: ... def mean_by(collection, iteratee=None): """ Calculate arithmetic mean of each element in `collection`. If iteratee is passed, each element of `collection` is passed through an iteratee before the mean is computed. Args: collection: Collection to process. iteratee: Iteratee applied per iteration. Returns: Result of mean. Example: >>> mean_by([1, 2, 3, 4], lambda x: x**2) 7.5 .. versionadded:: 4.0.0 """ return sum_by(collection, iteratee) / len(collection) def ceil(x: NumberT, precision: int = 0) -> float: """ Round number up to precision. Args: x: Number to round up. precision: Rounding precision. Defaults to ``0``. Returns: Number rounded up. Example: >>> ceil(3.275) == 4.0 True >>> ceil(3.215, 1) == 3.3 True >>> ceil(6.004, 2) == 6.01 True .. versionadded:: 3.3.0 """ return rounder(math.ceil, x, precision) NumT = t.TypeVar("NumT", int, float, "Decimal") NumT2 = t.TypeVar("NumT2", int, float, "Decimal") NumT3 = t.TypeVar("NumT3", int, float, "Decimal") def clamp(x: NumT, lower: NumT2, upper: t.Union[NumT3, None] = None) -> t.Union[NumT, NumT2, NumT3]: """ Clamps number within the inclusive lower and upper bounds. Args: x: Number to clamp. lower: Lower bound. upper: Upper bound Returns: number Example: >>> clamp(-10, -5, 5) -5 >>> clamp(10, -5, 5) 5 >>> clamp(10, 5) 5 >>> clamp(-10, 5) -10 .. versionadded:: 4.0.0 """ if upper is None: upper = lower # type: ignore lower = x # type: ignore if x < lower: x = lower # type: ignore elif x > upper: # type: ignore x = upper # type: ignore return x def divide(dividend: t.Union[NumberT, None], divisor: t.Union[NumberT, None]) -> float: """ Divide two numbers. Args: dividend: The first number in a division. divisor: The second number in a division. Returns: Returns the quotient. Example: >>> divide(20, 5) 4.0 >>> divide(1.5, 3) 0.5 >>> divide(None, None) 1.0 >>> divide(5, None) 5.0 .. versionadded:: 4.0.0 """ return call_math_operator(dividend, divisor, operator.truediv, 1) def floor(x: NumberT, precision: int = 0) -> float: """ Round number down to precision. Args: x: Number to round down. precision: Rounding precision. Defaults to ``0``. Returns: Number rounded down. Example: >>> floor(3.75) == 3.0 True >>> floor(3.215, 1) == 3.2 True >>> floor(0.046, 2) == 0.04 True .. versionadded:: 3.3.0 """ return rounder(math.floor, x, precision) @t.overload def max_( collection: t.Mapping[t.Any, "SupportsRichComparisonT"], default: Unset = UNSET ) -> "SupportsRichComparisonT": ... @t.overload def max_( collection: t.Mapping[t.Any, "SupportsRichComparisonT"], default: T ) -> t.Union["SupportsRichComparisonT", T]: ... @t.overload def max_( collection: t.Iterable["SupportsRichComparisonT"], default: Unset = UNSET ) -> "SupportsRichComparisonT": ... @t.overload def max_( collection: t.Iterable["SupportsRichComparisonT"], default: T ) -> t.Union["SupportsRichComparisonT", T]: ... def max_(collection, default=UNSET): """ Retrieves the maximum value of a `collection`. Args: collection: Collection to iterate over. default: Value to return if `collection` is empty. Returns: Maximum value. Example: >>> max_([1, 2, 3, 4]) 4 >>> max_([], default=-1) -1 .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Moved iteratee iteratee support to :func:`max_by`. """ return max_by(collection, default=default) @t.overload def max_by( collection: t.Mapping[t.Any, "SupportsRichComparisonT"], iteratee: None = None, default: Unset = UNSET, ) -> "SupportsRichComparisonT": ... @t.overload def max_by( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2], "SupportsRichComparisonT"], default: Unset = UNSET, ) -> T2: ... @t.overload def max_by( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2], "SupportsRichComparisonT"], *, default: T, ) -> t.Union[T2, T]: ... @t.overload def max_by( collection: t.Mapping[t.Any, "SupportsRichComparisonT"], iteratee: None = None, *, default: T ) -> t.Union["SupportsRichComparisonT", T]: ... @t.overload def max_by( collection: t.Iterable["SupportsRichComparisonT"], iteratee: None = None, default: Unset = UNSET ) -> "SupportsRichComparisonT": ... @t.overload def max_by( collection: t.Iterable[T2], iteratee: t.Callable[[T2], "SupportsRichComparisonT"], default: Unset = UNSET, ) -> T2: ... @t.overload def max_by( collection: t.Iterable[T2], iteratee: t.Callable[[T2], "SupportsRichComparisonT"], *, default: T ) -> t.Union[T2, T]: ... @t.overload def max_by( collection: t.Iterable["SupportsRichComparisonT"], iteratee: None = None, *, default: T ) -> t.Union["SupportsRichComparisonT", T]: ... @t.overload def max_by(collection: t.Iterable[T], iteratee: IterateeObjT, default: Unset = UNSET) -> T: ... @t.overload def max_by(collection: t.Iterable[T], iteratee: IterateeObjT, default: T2) -> t.Union[T, T2]: ... def max_by(collection, iteratee=None, default=UNSET): """ Retrieves the maximum value of a `collection`. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. default: Value to return if `collection` is empty. Returns: Maximum value. Example: >>> max_by([1.0, 1.5, 1.8], math.floor) 1.0 >>> max_by([{"a": 1}, {"a": 2}, {"a": 3}], "a") {'a': 3} >>> max_by([], default=-1) -1 .. versionadded:: 4.0.0 """ if isinstance(collection, dict): collection = collection.values() return max(iterator_with_default(collection, default), key=pyd.iteratee(iteratee)) @t.overload def median( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T, t.Dict[T, T2]], NumberT] ) -> t.Union[float, int]: ... @t.overload def median( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T], NumberT] ) -> t.Union[float, int]: ... @t.overload def median( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2], NumberT] ) -> t.Union[float, int]: ... @t.overload def median( collection: t.Iterable[T], iteratee: t.Callable[[T, int, t.List[T]], NumberT] ) -> t.Union[float, int]: ... @t.overload def median( collection: t.Iterable[T], iteratee: t.Callable[[T, int], NumberT] ) -> t.Union[float, int]: ... @t.overload def median( collection: t.Iterable[T], iteratee: t.Callable[[T], NumberT] ) -> t.Union[float, int]: ... @t.overload def median(collection: t.Iterable[NumberT], iteratee: None = None) -> t.Union[float, int]: ... def median(collection, iteratee=None): """ Calculate median of each element in `collection`. If iteratee is passed, each element of `collection` is passed through an iteratee before the median is computed. Args: collection: Collection to process. iteratee: Iteratee applied per iteration. Returns: Result of median. Example: >>> median([1, 2, 3, 4, 5]) 3 >>> median([1, 2, 3, 4]) 2.5 .. versionadded:: 2.1.0 """ length = len(collection) middle = (length + 1) / 2 collection = sorted(ret[0] for ret in iteriteratee(collection, iteratee)) if pyd.is_odd(length): result = collection[int(middle - 1)] else: left = int(middle - 1.5) right = int(middle - 0.5) result = (collection[left] + collection[right]) / 2 return result @t.overload def min_( collection: t.Mapping[t.Any, "SupportsRichComparisonT"], default: Unset = UNSET ) -> "SupportsRichComparisonT": ... @t.overload def min_( collection: t.Mapping[t.Any, "SupportsRichComparisonT"], default: T ) -> t.Union["SupportsRichComparisonT", T]: ... @t.overload def min_( collection: t.Iterable["SupportsRichComparisonT"], default: Unset = UNSET ) -> "SupportsRichComparisonT": ... @t.overload def min_( collection: t.Iterable["SupportsRichComparisonT"], default: T ) -> t.Union["SupportsRichComparisonT", T]: ... def min_(collection, default=UNSET): """ Retrieves the minimum value of a `collection`. Args: collection: Collection to iterate over. default: Value to return if `collection` is empty. Returns: Minimum value. Example: >>> min_([1, 2, 3, 4]) 1 >>> min_([], default=100) 100 .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Moved iteratee iteratee support to :func:`min_by`. """ return min_by(collection, default=default) @t.overload def min_by( collection: t.Mapping[t.Any, "SupportsRichComparisonT"], iteratee: None = None, default: Unset = UNSET, ) -> "SupportsRichComparisonT": ... @t.overload def min_by( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2], "SupportsRichComparisonT"], default: Unset = UNSET, ) -> T2: ... @t.overload def min_by( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2], "SupportsRichComparisonT"], *, default: T, ) -> t.Union[T2, T]: ... @t.overload def min_by( collection: t.Mapping[t.Any, "SupportsRichComparisonT"], iteratee: None = None, *, default: T ) -> t.Union["SupportsRichComparisonT", T]: ... @t.overload def min_by( collection: t.Iterable["SupportsRichComparisonT"], iteratee: None = None, default: Unset = UNSET ) -> "SupportsRichComparisonT": ... @t.overload def min_by( collection: t.Iterable[T2], iteratee: t.Callable[[T2], "SupportsRichComparisonT"], default: Unset = UNSET, ) -> T2: ... @t.overload def min_by( collection: t.Iterable[T2], iteratee: t.Callable[[T2], "SupportsRichComparisonT"], *, default: T ) -> t.Union[T2, T]: ... @t.overload def min_by( collection: t.Iterable["SupportsRichComparisonT"], iteratee: None = None, *, default: T ) -> t.Union["SupportsRichComparisonT", T]: ... @t.overload def min_by(collection: t.Iterable[T], iteratee: IterateeObjT, default: Unset = UNSET) -> T: ... @t.overload def min_by(collection: t.Iterable[T], iteratee: IterateeObjT, default: T2) -> t.Union[T, T2]: ... def min_by(collection, iteratee=None, default=UNSET): """ Retrieves the minimum value of a `collection`. Args: collection: Collection to iterate over. iteratee: Iteratee applied per iteration. default: Value to return if `collection` is empty. Returns: Minimum value. Example: >>> min_by([1.8, 1.5, 1.0], math.floor) 1.8 >>> min_by([{"a": 1}, {"a": 2}, {"a": 3}], "a") {'a': 1} >>> min_by([], default=100) 100 .. versionadded:: 4.0.0 """ if isinstance(collection, dict): collection = collection.values() return min(iterator_with_default(collection, default), key=pyd.iteratee(iteratee)) def moving_mean(array: t.Sequence["SupportsAdd[int, t.Any]"], size: t.SupportsInt) -> t.List[float]: """ Calculate moving mean of each element of `array`. Args: array: List to process. size: Window size. Returns: Result of moving average. Example: >>> moving_mean(range(10), 1) [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] >>> moving_mean(range(10), 5) [2.0, 3.0, 4.0, 5.0, 6.0, 7.0] >>> moving_mean(range(10), 10) [4.5] .. versionadded:: 2.1.0 .. versionchanged:: 4.0.0 Rename to ``moving_mean`` and remove ``moving_average`` and ``moving_avg`` aliases. """ result = [] size = int(size) for i in range(size - 1, len(array) + 1): window = array[i - size : i] if len(window) == size: result.append(mean(window)) return result @t.overload def multiply(multiplier: SupportsMul[int, T2], multiplicand: None) -> T2: ... @t.overload def multiply(multiplier: None, multiplicand: SupportsMul[int, T2]) -> T2: ... @t.overload def multiply(multiplier: None, multiplicand: None) -> int: ... @t.overload def multiply(multiplier: SupportsMul[T, T2], multiplicand: T) -> T2: ... @t.overload def multiply(multiplier: T, multiplicand: SupportsMul[T, T2]) -> T2: ... def multiply(multiplier, multiplicand): """ Multiply two numbers. Args: multiplier: The first number in a multiplication. multiplicand: The second number in a multiplication. Returns: Returns the product. Example: >>> multiply(4, 5) 20 >>> multiply(10, 4) 40 >>> multiply(None, 10) 10 >>> multiply(None, None) 1 .. versionadded:: 4.0.0 """ return call_math_operator(multiplier, multiplicand, operator.mul, 1) @t.overload def power(x: int, n: int) -> t.Union[int, float]: ... @t.overload def power(x: float, n: t.Union[int, float]) -> float: ... @t.overload def power(x: t.List[int], n: int) -> t.List[t.Union[int, float]]: ... @t.overload def power(x: t.List[float], n: t.List[t.Union[int, float]]) -> t.List[float]: ... def power(x, n): """ Calculate exponentiation of `x` raised to the `n` power. Args: x: Base number. n: Exponent. Returns: Result of calculation. Example: >>> power(5, 2) 25 >>> power(12.5, 3) 1953.125 .. versionadded:: 2.1.0 .. versionchanged:: 4.0.0 Removed alias ``pow_``. """ if pyd.is_number(x): result = pow(x, n) elif pyd.is_list(x): result = [pow(item, n) for item in x] else: result = None return result @t.overload def round_(x: t.List[SupportsRound[NumberT]], precision: int = 0) -> t.List[float]: ... @t.overload def round_(x: SupportsRound[NumberT], precision: int = 0) -> float: ... def round_(x, precision=0): """ Round number to precision. Args: x: Number to round. precision: Rounding precision. Defaults to ``0``. Returns: Rounded number. Example: >>> round_(3.275) == 3.0 True >>> round_(3.275, 1) == 3.3 True .. versionadded:: 2.1.0 .. versionchanged:: 4.0.0 Remove alias ``curve``. """ return rounder(round, x, precision) @t.overload def scale(array: t.Iterable["Decimal"], maximum: "Decimal") -> t.List["Decimal"]: ... @t.overload def scale(array: t.Iterable[NumberNoDecimalT], maximum: NumberNoDecimalT) -> t.List[float]: ... @t.overload def scale(array: t.Iterable[NumberT], maximum: int = 1) -> t.List[float]: ... def scale(array, maximum: NumberT = 1): """ Scale list of value to a maximum number. Args: array: Numbers to scale. maximum: Maximum scale value. Returns: Scaled numbers. Example: >>> scale([1, 2, 3, 4]) [0.25, 0.5, 0.75, 1.0] >>> scale([1, 2, 3, 4], 1) [0.25, 0.5, 0.75, 1.0] >>> scale([1, 2, 3, 4], 4) [1.0, 2.0, 3.0, 4.0] >>> scale([1, 2, 3, 4], 2) [0.5, 1.0, 1.5, 2.0] .. versionadded:: 2.1.0 """ array_max = max(array) factor = maximum / array_max return [item * factor for item in array] @t.overload def slope( point1: t.Union[t.Tuple["Decimal", "Decimal"], t.List["Decimal"]], point2: t.Union[t.Tuple["Decimal", "Decimal"], t.List["Decimal"]], ) -> "Decimal": ... @t.overload def slope( point1: t.Union[t.Tuple[NumberNoDecimalT, NumberNoDecimalT], t.List[NumberNoDecimalT]], point2: t.Union[t.Tuple[NumberNoDecimalT, NumberNoDecimalT], t.List[NumberNoDecimalT]], ) -> float: ... def slope(point1, point2): """ Calculate the slope between two points. Args: point1: X and Y coordinates of first point. point2: X and Y cooredinates of second point. Returns: Calculated slope. Example: >>> slope((1, 2), (4, 8)) 2.0 .. versionadded:: 2.1.0 """ x1, y1 = point1[0], point1[1] x2, y2 = point2[0], point2[1] if x1 == x2: result = INFINITY else: result = (y2 - y1) / (x2 - x1) return result def std_deviation(array: t.List[NumberT]) -> float: """ Calculate standard deviation of list of numbers. Args: array: List to process. Returns: Calculated standard deviation. Example: >>> round(std_deviation([1, 18, 20, 4]), 2) == 8.35 True .. versionadded:: 2.1.0 .. versionchanged:: 4.0.0 Remove alias ``sigma``. """ return math.sqrt(variance(array)) @t.overload def subtract(minuend: "SupportsSub[T, T2]", subtrahend: T) -> T2: ... @t.overload def subtract(minuend: T, subtrahend: "SupportsSub[T, T2]") -> T2: ... def subtract(minuend, subtrahend): """ Subtracts two numbers. Args: minuend: Value passed in by the user. subtrahend: Value passed in by the user. Returns: Result of the difference from the given values. Example: >>> subtract(10, 5) 5 >>> subtract(-10, 4) -14 >>> subtract(2, 0.5) 1.5 .. versionadded:: 4.0.0 """ return call_math_operator(minuend, subtrahend, operator.sub, 0) def transpose(array: t.Iterable[t.Iterable[T]]) -> t.List[t.List[T]]: """ Transpose the elements of `array`. Args: array: List to process. Returns: Transposed list. Example: >>> transpose([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) [[1, 4, 7], [2, 5, 8], [3, 6, 9]] .. versionadded:: 2.1.0 """ trans: t.List[t.List[T]] = [] for y, row in iterator(array): for x, col in iterator(row): trans = pyd.set_(trans, [x, y], col) return trans @t.overload def variance(array: t.Mapping[t.Any, "SupportsAdd[int, t.Any]"]) -> float: ... @t.overload def variance(array: t.Iterable["SupportsAdd[int, t.Any]"]) -> float: ... def variance(array): """ Calculate the variance of the elements in `array`. Args: array: List to process. Returns: Calculated variance. Example: >>> variance([1, 18, 20, 4]) 69.6875 .. versionadded:: 2.1.0 """ avg = mean(array) def var(x): return power(x - avg, 2) return pyd._(array).map_(var).mean().value() @t.overload def zscore( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T, t.Dict[T, T2]], NumberT] ) -> t.List[float]: ... @t.overload def zscore( collection: t.Mapping[T, T2], iteratee: t.Callable[[T2, T], NumberT] ) -> t.List[float]: ... @t.overload def zscore( collection: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2], NumberT] ) -> t.List[float]: ... @t.overload def zscore( collection: t.Iterable[T], iteratee: t.Callable[[T, int, t.List[T]], NumberT] ) -> t.List[float]: ... @t.overload def zscore(collection: t.Iterable[T], iteratee: t.Callable[[T, int], NumberT]) -> t.List[float]: ... @t.overload def zscore(collection: t.Iterable[T], iteratee: t.Callable[[T], NumberT]) -> t.List[float]: ... @t.overload def zscore(collection: t.Iterable[NumberT], iteratee: None = None) -> t.List[float]: ... def zscore(collection, iteratee=None): """ Calculate the standard score assuming normal distribution. If iteratee is passed, each element of `collection` is passed through an iteratee before the standard score is computed. Args: collection: Collection to process. iteratee: Iteratee applied per iteration. Returns: Calculated standard score. Example: >>> results = zscore([1, 2, 3]) # [-1.224744871391589, 0.0, 1.224744871391589] .. versionadded:: 2.1.0 """ array = pyd.map_(collection, iteratee) avg = mean(array) sig = std_deviation(array) return [(item - avg) / sig for item in array] # # Utility methods not a part of the main API # def call_math_operator(value1, value2, op, default): """Return the result of the math operation on the given values.""" if value1 is None: value1 = default if value2 is None: value2 = default if not pyd.is_number(value1): try: value1 = float(value1) except Exception: pass if not pyd.is_number(value2): try: value2 = float(value2) except Exception: pass return op(value1, value2) def rounder(func, x, precision): precision = pow(10, precision) def rounder_func(item): return func(item * precision) / precision result = None if pyd.is_number(x): result = rounder_func(x) elif pyd.is_iterable(x): try: result = [rounder_func(item) for item in x] except TypeError: pass return result pydash-8.0.3/src/pydash/objects.py000066400000000000000000002006231464745015500171120ustar00rootroot00000000000000""" Functions that operate on lists, dicts, and other objects. .. versionadded:: 1.0.0 """ from __future__ import annotations import copy from functools import partial import math import re import typing as t import pydash as pyd from .helpers import UNSET, Unset, base_get, base_set, callit, getargcount, iterator, iteriteratee from .types import IterateeObjT, PathT from .utilities import PathToken, to_path, to_path_tokens if t.TYPE_CHECKING: from _typeshed import SupportsRichComparisonT # pragma: no cover __all__ = ( "apply", "apply_catch", "apply_if", "apply_if_not_none", "assign", "assign_with", "callables", "clone", "clone_deep", "clone_deep_with", "clone_with", "defaults", "defaults_deep", "find_key", "find_last_key", "for_in", "for_in_right", "get", "has", "invert", "invert_by", "invoke", "keys", "map_keys", "map_values", "map_values_deep", "merge", "merge_with", "omit", "omit_by", "parse_int", "pick", "pick_by", "rename_keys", "set_", "set_with", "to_boolean", "to_dict", "to_integer", "to_list", "to_number", "to_pairs", "to_string", "transform", "unset", "update", "update_with", "values", ) T = t.TypeVar("T") T2 = t.TypeVar("T2") T3 = t.TypeVar("T3") T4 = t.TypeVar("T4") T5 = t.TypeVar("T5") @t.overload def assign( obj: t.Mapping[T, T2], *sources: t.Mapping[T3, T4] ) -> t.Dict[t.Union[T, T3], t.Union[T2, T4]]: ... @t.overload def assign( obj: t.Union[t.Tuple[T, ...], t.List[T]], *sources: t.Mapping[int, T2] ) -> t.List[t.Union[T, T2]]: ... def assign(obj, *sources) -> t.Union[t.List[t.Any], t.Dict[t.Any, t.Any]]: """ Assigns properties of source object(s) to the destination object. Args: obj: Destination object whose properties will be modified. sources: Source objects to assign to `obj`. Returns: Modified `obj`. Warning: `obj` is modified in place. Example: >>> obj = {} >>> obj2 = assign(obj, {"a": 1}, {"b": 2}, {"c": 3}) >>> obj == {"a": 1, "b": 2, "c": 3} True >>> obj is obj2 True .. versionadded:: 1.0.0 .. versionchanged:: 2.3.2 Apply :func:`clone_deep` to each `source` before assigning to `obj`. .. versionchanged:: 3.0.0 Allow iteratees to accept partial arguments. .. versionchanged:: 3.4.4 Shallow copy each `source` instead of deep copying. .. versionchanged:: 4.0.0 - Moved `iteratee` argument to :func:`assign_with`. - Removed alias ``extend``. """ return assign_with(obj, *sources) # type: ignore @t.overload def assign_with( obj: t.Mapping[T, T2], *sources: t.Mapping[T3, t.Any], customizer: t.Callable[[t.Union[T2, None]], T5], ) -> t.Dict[t.Union[T, T3], t.Union[T2, T5]]: ... @t.overload def assign_with( obj: t.Mapping[T, T2], *sources: t.Mapping[T3, T4], customizer: t.Callable[[t.Union[T2, None], T4], T5], ) -> t.Dict[t.Union[T, T3], t.Union[T2, T5]]: ... @t.overload def assign_with( obj: t.Mapping[T, T2], *sources: t.Mapping[T3, T4], customizer: t.Callable[[t.Union[T2, None], T4, T3], T5], ) -> t.Dict[t.Union[T, T3], t.Union[T2, T5]]: ... @t.overload def assign_with( obj: t.Mapping[T, T2], *sources: t.Mapping[T3, T4], customizer: t.Callable[[t.Union[T2, None], T4, T3, t.Dict[T, T2]], T5], ) -> t.Dict[t.Union[T, T3], t.Union[T2, T5]]: ... @t.overload def assign_with( obj: t.Mapping[T, T2], *sources: t.Mapping[T3, T4], customizer: t.Callable[[t.Union[T2, None], T4, T3, t.Dict[T, T2], t.Dict[T3, T4]], T5], ) -> t.Dict[t.Union[T, T3], t.Union[T2, T5]]: ... @t.overload def assign_with( obj: t.Mapping[T, T2], *sources: t.Mapping[T3, T4], customizer: None = None ) -> t.Dict[t.Union[T, T3], t.Union[T2, T4]]: ... def assign_with(obj, *sources, customizer=None): """ This method is like :func:`assign` except that it accepts customizer which is invoked to produce the assigned values. If customizer returns ``None``, assignment is handled by the method instead. The customizer is invoked with five arguments: ``(obj_value, src_value, key, obj, source)``. Args: obj: Destination object whose properties will be modified. sources: Source objects to assign to `obj`. Keyword Args: customizer: Customizer applied per iteration. Returns: Modified `obj`. Warning: `obj` is modified in place. Example: >>> customizer = lambda o, s: s if o is None else o >>> results = assign_with({"a": 1}, {"b": 2}, {"a": 3}, customizer) >>> results == {"a": 1, "b": 2} True .. versionadded:: 4.0.0 """ sources = list(sources) if customizer is None and callable(sources[-1]): customizer = sources.pop() if customizer is not None: argcount = getargcount(customizer, maxargs=5) else: argcount = None for source in sources: source = source.copy() for key, value in source.items(): if customizer: val = callit(customizer, obj.get(key), value, key, obj, source, argcount=argcount) if val is not None: value = val obj[key] = value return obj @t.overload def callables( obj: t.Mapping["SupportsRichComparisonT", t.Any], ) -> t.List["SupportsRichComparisonT"]: ... @t.overload def callables(obj: t.Iterable[T]) -> t.List[T]: ... def callables(obj): """ Creates a sorted list of keys of an object that are callable. Args: obj: Object to inspect. Returns: All keys whose values are callable. Example: >>> callables({"a": 1, "b": lambda: 2, "c": lambda: 3}) ['b', 'c'] .. versionadded:: 1.0.0 .. versionchanged:: 2.0.0 Renamed ``functions`` to ``callables``. .. versionchanged:: 4.0.0 Removed alias ``methods``. """ return sorted(key for key, value in iterator(obj) if callable(value)) def clone(value: T) -> T: """ Creates a clone of `value`. Args: value: Object to clone. Example: >>> x = {"a": 1, "b": 2, "c": {"d": 3}} >>> y = clone(x) >>> y == y True >>> x is y False >>> x["c"] is y["c"] True Returns: Cloned object. .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Moved 'iteratee' parameter to :func:`clone_with`. """ return base_clone(value) @t.overload def clone_with( value: t.Mapping[T, T2], customizer: t.Callable[[T2, T, t.Mapping[T, T2]], T3] ) -> t.Dict[T, t.Union[T2, T3]]: ... @t.overload def clone_with( value: t.Mapping[T, T2], customizer: t.Callable[[T2, T], T3] ) -> t.Dict[T, t.Union[T2, T3]]: ... @t.overload def clone_with( value: t.Mapping[T, T2], customizer: t.Callable[[T2], T3] ) -> t.Dict[T, t.Union[T2, T3]]: ... @t.overload def clone_with( value: t.List[T], customizer: t.Callable[[T, int, t.List[T]], T2] ) -> t.List[t.Union[T, T2]]: ... @t.overload def clone_with( value: t.List[T], customizer: t.Callable[[T, int], T2] ) -> t.List[t.Union[T, T2]]: ... @t.overload def clone_with(value: t.List[T], customizer: t.Callable[[T], T2]) -> t.List[t.Union[T, T2]]: ... @t.overload def clone_with(value: T, customizer: None = None) -> T: ... @t.overload def clone_with(value: t.Any, customizer: t.Callable[..., t.Any]) -> t.Any: ... def clone_with(value, customizer=None): """ This method is like :func:`clone` except that it accepts customizer which is invoked to produce the cloned value. If customizer returns ``None``, cloning is handled by the method instead. The customizer is invoked with up to three arguments: ``(value, index|key, object)``. Args: value: Object to clone. customizer: Function to customize cloning. Returns: Cloned object. Example: >>> x = {"a": 1, "b": 2, "c": {"d": 3}} >>> cbk = lambda v, k: v + 2 if isinstance(v, int) and k else None >>> y = clone_with(x, cbk) >>> y == {"a": 3, "b": 4, "c": {"d": 3}} True """ return base_clone(value, customizer=customizer) def clone_deep(value: T) -> T: """ Creates a deep clone of `value`. If an iteratee is provided it will be executed to produce the cloned values. Args: value: Object to clone. Returns: Cloned object. Example: >>> x = {"a": 1, "b": 2, "c": {"d": 3}} >>> y = clone_deep(x) >>> y == y True >>> x is y False >>> x["c"] is y["c"] False .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Moved 'iteratee' parameter to :func:`clone_deep_with`. """ return base_clone(value, is_deep=True) @t.overload def clone_deep_with( value: t.Mapping[T, T2], customizer: t.Callable[[T2, T, t.Mapping[T, T2]], T3] ) -> t.Dict[T, t.Union[T2, T3]]: ... @t.overload def clone_deep_with( value: t.Mapping[T, T2], customizer: t.Callable[[T2, T], T3] ) -> t.Dict[T, t.Union[T2, T3]]: ... @t.overload def clone_deep_with( value: t.Mapping[T, T2], customizer: t.Callable[[T2], T3] ) -> t.Dict[T, t.Union[T2, T3]]: ... @t.overload def clone_deep_with( value: t.List[T], customizer: t.Callable[[T, int, t.List[T]], T2] ) -> t.List[t.Union[T, T2]]: ... @t.overload def clone_deep_with( value: t.List[T], customizer: t.Callable[[T, int], T2] ) -> t.List[t.Union[T, T2]]: ... @t.overload def clone_deep_with( value: t.List[T], customizer: t.Callable[[T], T2] ) -> t.List[t.Union[T, T2]]: ... @t.overload def clone_deep_with(value: T, customizer: None = None) -> T: ... @t.overload def clone_deep_with(value: t.Any, customizer: t.Callable[..., t.Any]) -> t.Any: ... def clone_deep_with(value, customizer=None): """ This method is like :func:`clone_with` except that it recursively clones `value`. Args: value: Object to clone. customizer: Function to customize cloning. Returns: Cloned object. """ return base_clone(value, is_deep=True, customizer=customizer) def defaults( obj: t.Dict[T, T2], *sources: t.Dict[T3, T4] ) -> t.Dict[t.Union[T, T3], t.Union[T2, T4]]: """ Assigns properties of source object(s) to the destination object for all destination properties that resolve to undefined. Args: obj: Destination object whose properties will be modified. sources: Source objects to assign to `obj`. Returns: Modified `obj`. Warning: `obj` is modified in place. Example: >>> obj = {"a": 1} >>> obj2 = defaults(obj, {"b": 2}, {"c": 3}, {"a": 4}) >>> obj is obj2 True >>> obj == {"a": 1, "b": 2, "c": 3} True .. versionadded:: 1.0.0 """ for source in sources: for key, value in source.items(): obj.setdefault(key, value) # type: ignore return obj # type: ignore def defaults_deep( obj: t.Dict[T, T2], *sources: t.Dict[T3, T4] ) -> t.Dict[t.Union[T, T3], t.Union[T2, T4]]: """ This method is like :func:`defaults` except that it recursively assigns default properties. Args: obj: Destination object whose properties will be modified. sources: Source objects to assign to `obj`. Returns: Modified `obj`. Warning: `obj` is modified in place. Example: >>> obj = {"a": {"b": 1}} >>> obj2 = defaults_deep(obj, {"a": {"b": 2, "c": 3}}) >>> obj is obj2 True >>> obj == {"a": {"b": 1, "c": 3}} True .. versionadded:: 3.3.0 """ def setter(obj, key, value): if hasattr(obj, "setdefault"): obj.setdefault(key, value) return merge_with(obj, *sources, _setter=setter) @t.overload def find_key( obj: t.Mapping[T, T2], predicate: t.Callable[[T2, T, t.Dict[T, T2]], t.Any] ) -> t.Union[T, None]: ... @t.overload def find_key(obj: t.Mapping[T, T2], predicate: t.Callable[[T2, T], t.Any]) -> t.Union[T, None]: ... @t.overload def find_key(obj: t.Mapping[T, T2], predicate: t.Callable[[T2], t.Any]) -> t.Union[T, None]: ... @t.overload def find_key(obj: t.Mapping[T, t.Any], predicate: None = None) -> t.Union[T, None]: ... @t.overload def find_key( collection: t.Iterable[T], iteratee: t.Callable[[T, int, t.List[T]], t.Any] ) -> t.Union[int, None]: ... @t.overload def find_key( collection: t.Iterable[T], iteratee: t.Callable[[T, int], t.Any] ) -> t.Union[int, None]: ... @t.overload def find_key(collection: t.Iterable[T], iteratee: t.Callable[[T], t.Any]) -> t.Union[int, None]: ... @t.overload def find_key(collection: t.Iterable[t.Any], iteratee: None = None) -> t.Union[int, None]: ... def find_key(obj, predicate=None): """ This method is like :func:`pydash.arrays.find_index` except that it returns the key of the first element that passes the predicate check, instead of the element itself. Args: obj: Object to search. predicate: Predicate applied per iteration. Returns: Found key or ``None``. Example: >>> find_key({"a": 1, "b": 2, "c": 3}, lambda x: x == 1) 'a' >>> find_key([1, 2, 3, 4], lambda x: x == 1) 0 .. versionadded:: 1.0.0 """ for result, _, key, _ in iteriteratee(obj, predicate): if result: return key @t.overload def find_last_key( obj: t.Mapping[T, T2], predicate: t.Callable[[T2, T, t.Dict[T, T2]], t.Any] ) -> t.Union[T, None]: ... @t.overload def find_last_key( obj: t.Mapping[T, T2], predicate: t.Callable[[T2, T], t.Any] ) -> t.Union[T, None]: ... @t.overload def find_last_key( obj: t.Mapping[T, T2], predicate: t.Callable[[T2], t.Any] ) -> t.Union[T, None]: ... @t.overload def find_last_key(obj: t.Mapping[T, t.Any], predicate: None = None) -> t.Union[T, None]: ... @t.overload def find_last_key( collection: t.Iterable[T], iteratee: t.Callable[[T, int, t.List[T]], t.Any] ) -> t.Union[int, None]: ... @t.overload def find_last_key( collection: t.Iterable[T], iteratee: t.Callable[[T, int], t.Any] ) -> t.Union[int, None]: ... @t.overload def find_last_key( collection: t.Iterable[T], iteratee: t.Callable[[T], t.Any] ) -> t.Union[int, None]: ... @t.overload def find_last_key(collection: t.Iterable[t.Any], iteratee: None = None) -> t.Union[int, None]: ... def find_last_key(obj, predicate=None): """ This method is like :func:`find_key` except that it iterates over elements of a collection in the opposite order. Args: obj: Object to search. predicate: Predicate applied per iteration. Returns: Found key or ``None``. Example: >>> find_last_key({"a": 1, "b": 2, "c": 3}, lambda x: x == 1) 'a' >>> find_last_key([1, 2, 3, 1], lambda x: x == 1) 3 .. versionchanged:: 4.0.0 Made into its own function (instead of an alias of ``find_key``) with proper reverse find implementation. """ reversed_obj = reversed(list(iteriteratee(obj, predicate))) for result, _, key, _ in reversed_obj: if result: return key @t.overload def for_in( obj: t.Mapping[T, T2], iteratee: t.Callable[[T2, T, t.Dict[T, T2]], t.Any] ) -> t.Dict[T, T2]: ... @t.overload def for_in(obj: t.Mapping[T, T2], iteratee: t.Callable[[T2, T], t.Any]) -> t.Dict[T, T2]: ... @t.overload def for_in(obj: t.Mapping[T, T2], iteratee: t.Callable[[T2], t.Any]) -> t.Dict[T, T2]: ... @t.overload def for_in(obj: t.Mapping[T, T2], iteratee: None = None) -> t.Dict[T, T2]: ... @t.overload def for_in(obj: t.Sequence[T], iteratee: t.Callable[[T, int, t.List[T]], t.Any]) -> t.List[T]: ... @t.overload def for_in(obj: t.Sequence[T], iteratee: t.Callable[[T, int], t.Any]) -> t.List[T]: ... @t.overload def for_in(obj: t.Sequence[T], iteratee: t.Callable[[T], t.Any]) -> t.List[T]: ... @t.overload def for_in(obj: t.Sequence[T], iteratee: None = None) -> t.List[T]: ... def for_in(obj, iteratee=None): """ Iterates over own and inherited enumerable properties of `obj`, executing `iteratee` for each property. Args: obj: Object to process. iteratee: Iteratee applied per iteration. Returns: `obj`. Example: >>> obj = {} >>> def cb(v, k): ... obj[k] = v >>> results = for_in({"a": 1, "b": 2, "c": 3}, cb) >>> results == {"a": 1, "b": 2, "c": 3} True >>> obj == {"a": 1, "b": 2, "c": 3} True .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Removed alias ``for_own``. """ walk = (None for ret, _, _, _ in iteriteratee(obj, iteratee) if ret is False) next(walk, None) return obj @t.overload def for_in_right( obj: t.Mapping[T, T2], iteratee: t.Callable[[T2, T, t.Dict[T, T2]], t.Any] ) -> t.Dict[T, T2]: ... @t.overload def for_in_right(obj: t.Mapping[T, T2], iteratee: t.Callable[[T2, T], t.Any]) -> t.Dict[T, T2]: ... @t.overload def for_in_right(obj: t.Mapping[T, T2], iteratee: t.Callable[[T2], t.Any]) -> t.Dict[T, T2]: ... @t.overload def for_in_right(obj: t.Mapping[T, T2], iteratee: None = None) -> t.Dict[T, T2]: ... @t.overload def for_in_right( obj: t.Sequence[T], iteratee: t.Callable[[T, int, t.List[T]], t.Any] ) -> t.List[T]: ... @t.overload def for_in_right(obj: t.Sequence[T], iteratee: t.Callable[[T, int], t.Any]) -> t.List[T]: ... @t.overload def for_in_right(obj: t.Sequence[T], iteratee: t.Callable[[T], t.Any]) -> t.List[T]: ... @t.overload def for_in_right(obj: t.Sequence[T], iteratee: None = None) -> t.List[T]: ... def for_in_right(obj, iteratee=None): """ This function is like :func:`for_in` except it iterates over the properties in reverse order. Args: obj: Object to process. iteratee: Iteratee applied per iteration. Returns: `obj`. Example: >>> data = {"product": 1} >>> def cb(v): ... data["product"] *= v >>> for_in_right([1, 2, 3, 4], cb) [1, 2, 3, 4] >>> data["product"] == 24 True .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Removed alias ``for_own_right``. """ walk = (None for ret, _, _, _ in iteriteratee(obj, iteratee, reverse=True) if ret is False) next(walk, None) return obj @t.overload def get(obj: t.List[T], path: int, default: T2) -> t.Union[T, T2]: ... @t.overload def get(obj: t.List[T], path: int, default: None = None) -> t.Union[T, None]: ... @t.overload def get(obj: t.Any, path: PathT, default: t.Any = None) -> t.Any: ... def get(obj: t.Any, path: PathT, default: t.Any = None) -> t.Any: """ Get the value at any depth of a nested object based on the path described by `path`. If path doesn't exist, `default` is returned. Args: obj: Object to process. path: List or ``.`` delimited string of path describing path. default: Default value to return if path doesn't exist. Defaults to ``None``. Returns: Value of `obj` at path. Example: >>> get({}, "a.b.c") is None True >>> get({"a": {"b": {"c": [1, 2, 3, 4]}}}, "a.b.c[1]") 2 >>> get({"a": {"b": {"c": [1, 2, 3, 4]}}}, "a.b.c.1") 2 >>> get({"a": {"b": [0, {"c": [1, 2]}]}}, "a.b.1.c.1") 2 >>> get({"a": {"b": [0, {"c": [1, 2]}]}}, ["a", "b", 1, "c", 1]) 2 >>> get({"a": {"b": [0, {"c": [1, 2]}]}}, "a.b.1.c.2") is None True .. versionadded:: 2.0.0 .. versionchanged:: 2.2.0 Support escaping "." delimiter in single string path key. .. versionchanged:: 3.3.0 - Added :func:`get` as main definition and :func:`get_path` as alias. - Made :func:`deep_get` an alias. .. versionchanged:: 3.4.7 Fixed bug where an iterable default was iterated over instead of being returned when an object path wasn't found. .. versionchanged:: 4.0.0 - Support attribute access on `obj` if item access fails. - Removed aliases ``get_path`` and ``deep_get``. .. versionchanged:: 4.7.6 Fixed bug where getattr is used on Mappings and Sequence in Python 3.5+ """ if default is UNSET: # When NoValue given for default, then this method will raise if path is not present in obj. sentinel = default else: # When a returnable default is given, use a sentinel value to detect when base_get() returns # a default value for a missing path, so we can exit early from the loop and not mistakenly # iterate over the default. sentinel = object() for key in to_path(path): obj = base_get(obj, key, default=sentinel) if obj is sentinel: # Path doesn't exist so set return obj to the default. obj = default break return obj def has(obj: t.Any, path: PathT) -> bool: """ Checks if `path` exists as a key of `obj`. Args: obj: Object to test. path: Path to test for. Can be a list of nested keys or a ``.`` delimited string of path describing the path. Returns: Whether `obj` has `path`. Example: >>> has([1, 2, 3], 1) True >>> has({"a": 1, "b": 2}, "b") True >>> has({"a": 1, "b": 2}, "c") False >>> has({"a": {"b": [0, {"c": [1, 2]}]}}, "a.b.1.c.1") True >>> has({"a": {"b": [0, {"c": [1, 2]}]}}, "a.b.1.c.2") False .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0 Return ``False`` on ``ValueError`` when checking path. .. versionchanged:: 3.3.0 - Added :func:`deep_has` as alias. - Added :func:`has_path` as alias. .. versionchanged:: 4.0.0 Removed aliases ``deep_has`` and ``has_path``. """ try: get(obj, path, default=UNSET) exists = True except (KeyError, IndexError, TypeError, ValueError): exists = False return exists @t.overload def invert(obj: t.Mapping[T, T2]) -> t.Dict[T2, T]: ... @t.overload def invert(obj: t.Union[t.Iterator[T], t.Sequence[T]]) -> t.Dict[T, int]: ... def invert(obj): """ Creates an object composed of the inverted keys and values of the given object. Args: obj: Dict to invert. Returns: Inverted dict. Example: >>> results = invert({"a": 1, "b": 2, "c": 3}) >>> results == {1: "a", 2: "b", 3: "c"} True Note: Assumes `obj` values are hashable as ``dict`` keys. .. versionadded:: 1.0.0 .. versionchanged:: 2.0.0 Added ``multivalue`` argument. .. versionchanged:: 4.0.0 Moved ``multivalue=True`` functionality to :func:`invert_by`. """ return {value: key for key, value in iterator(obj)} @t.overload def invert_by(obj: t.Mapping[T, T2], iteratee: t.Callable[[T2], T3]) -> t.Dict[T3, t.List[T]]: ... @t.overload def invert_by(obj: t.Mapping[T, T2], iteratee: None = None) -> t.Dict[T2, t.List[T]]: ... @t.overload def invert_by( obj: t.Union[t.Iterator[T], t.Sequence[T]], iteratee: t.Callable[[T], T2] ) -> t.Dict[T2, t.List[int]]: ... @t.overload def invert_by( obj: t.Union[t.Iterator[T], t.Sequence[T]], iteratee: None = None ) -> t.Dict[T, t.List[int]]: ... def invert_by(obj, iteratee=None): """ This method is like :func:`invert` except that the inverted object is generated from the results of running each element of object through iteratee. The corresponding inverted value of each inverted key is a list of keys responsible for generating the inverted value. The iteratee is invoked with one argument: ``(value)``. Args: obj: Object to invert. iteratee: Iteratee applied per iteration. Returns: Inverted dict. Example: >>> obj = {"a": 1, "b": 2, "c": 1} >>> results = invert_by(obj) # {1: ['a', 'c'], 2: ['b']} >>> set(results[1]) == set(["a", "c"]) True >>> set(results[2]) == set(["b"]) True >>> results2 = invert_by(obj, lambda value: "group" + str(value)) >>> results2["group1"] == results[1] True >>> results2["group2"] == results[2] True Note: Assumes `obj` values are hashable as ``dict`` keys. .. versionadded:: 4.0.0 """ callback = pyd.iteratee(iteratee) result = {} for key, value in iterator(obj): result.setdefault(callback(value), []).append(key) return result def invoke(obj: t.Any, path: PathT, *args: t.Any, **kwargs: t.Any) -> t.Any: """ Invokes the method at path of object. Args: obj: The object to query. path: The path of the method to invoke. args: Arguments to pass to method call. kwargs: Keyword arguments to pass to method call. Returns: Result of the invoked method. Example: >>> obj = {"a": [{"b": {"c": [1, 2, 3, 4]}}]} >>> invoke(obj, "a[0].b.c.pop", 1) 2 >>> obj {'a': [{'b': {'c': [1, 3, 4]}}]} .. versionadded:: 1.0.0 """ paths = to_path(path) target_path = pyd.initial(paths) method_name = pyd.last(paths) try: # potential error is caught method = getattr(get(obj, target_path), method_name) # type: ignore except AttributeError: ret = None else: ret = method(*args, **kwargs) return ret @t.overload def keys(obj: t.Iterable[T]) -> t.List[T]: ... @t.overload def keys(obj: t.Any) -> t.List[t.Any]: ... def keys(obj): """ Creates a list composed of the keys of `obj`. Args: obj: Object to extract keys from. Returns: List of keys. Example: >>> keys([1, 2, 3]) [0, 1, 2] >>> set(keys({"a": 1, "b": 2, "c": 3})) == set(["a", "b", "c"]) True .. versionadded:: 1.0.0 .. versionchanged:: 1.1.0 Added ``keys_in`` as alias. .. versionchanged:: 4.0.0 Removed alias ``keys_in``. """ return [key for key, _ in iterator(obj)] @t.overload def map_keys( obj: t.Mapping[T, T2], iteratee: t.Callable[[T2, T, t.Dict[T, T2]], T3] ) -> t.Dict[T3, T2]: ... @t.overload def map_keys(obj: t.Mapping[T, T2], iteratee: t.Callable[[T2, T], T3]) -> t.Dict[T3, T2]: ... @t.overload def map_keys(obj: t.Mapping[t.Any, T2], iteratee: t.Callable[[T2], T3]) -> t.Dict[T3, T2]: ... @t.overload def map_keys( obj: t.Iterable[T], iteratee: t.Callable[[T, int, t.List[T]], T2] ) -> t.Dict[T2, T]: ... @t.overload def map_keys(obj: t.Iterable[T], iteratee: t.Callable[[T, int], T2]) -> t.Dict[T2, T]: ... @t.overload def map_keys(obj: t.Iterable[T], iteratee: t.Callable[[T], T2]) -> t.Dict[T2, T]: ... @t.overload def map_keys( obj: t.Iterable[t.Any], iteratee: t.Union[IterateeObjT, None] = None ) -> t.Dict[t.Any, t.Any]: ... def map_keys(obj, iteratee=None): """ The opposite of :func:`map_values`, this method creates an object with the same values as object and keys generated by running each own enumerable string keyed property of object through iteratee. The iteratee is invoked with three arguments: ``(value, key, object)``. Args: obj: Object to map. iteratee: Iteratee applied per iteration. Returns: Results of running `obj` through `iteratee`. Example: >>> callback = lambda value, key: key * 2 >>> results = map_keys({"a": 1, "b": 2, "c": 3}, callback) >>> results == {"aa": 1, "bb": 2, "cc": 3} True .. versionadded:: 3.3.0 """ return {result: value for result, value, _, _ in iteriteratee(obj, iteratee)} @t.overload def map_values( obj: t.Mapping[T, T2], iteratee: t.Callable[[T2, T, t.Dict[T, T2]], T3] ) -> t.Dict[T, T3]: ... @t.overload def map_values(obj: t.Mapping[T, T2], iteratee: t.Callable[[T2, T], T3]) -> t.Dict[T, T3]: ... @t.overload def map_values(obj: t.Mapping[T, T2], iteratee: t.Callable[[T2], T3]) -> t.Dict[T, T3]: ... @t.overload def map_values( obj: t.Iterable[T], iteratee: t.Callable[[T, int, t.List[T]], T2] ) -> t.Dict[T, T2]: ... @t.overload def map_values(obj: t.Iterable[T], iteratee: t.Callable[[T, int], T2]) -> t.Dict[T, T2]: ... @t.overload def map_values(obj: t.Iterable[T], iteratee: t.Callable[[T], T2]) -> t.Dict[T, T2]: ... @t.overload def map_values( obj: t.Iterable[t.Any], iteratee: t.Union[IterateeObjT, None] = None ) -> t.Dict[t.Any, t.Any]: ... def map_values(obj, iteratee=None): """ Creates an object with the same keys as object and values generated by running each string keyed property of object through iteratee. The iteratee is invoked with three arguments: ``(value, key, object)``. Args: obj: Object to map. iteratee: Iteratee applied per iteration. Returns: Results of running `obj` through `iteratee`. Example: >>> results = map_values({"a": 1, "b": 2, "c": 3}, lambda x: x * 2) >>> results == {"a": 2, "b": 4, "c": 6} True >>> results = map_values({"a": 1, "b": {"d": 4}, "c": 3}, {"d": 4}) >>> results == {"a": False, "b": True, "c": False} True .. versionadded:: 1.0.0 """ return {key: result for result, _, key, _ in iteriteratee(obj, iteratee)} def map_values_deep( obj: t.Iterable[t.Any], iteratee: t.Union[t.Callable[..., t.Any], None] = None, property_path: t.Any = UNSET, ) -> t.Any: """ Map all non-object values in `obj` with return values from `iteratee`. The iteratee is invoked with two arguments: ``(obj_value, property_path)`` where ``property_path`` contains the list of path keys corresponding to the path of ``obj_value``. Args: obj: Object to map. iteratee: Iteratee applied to each value. property_path: Path key(s) to access. Returns: The modified object. Warning: `obj` is modified in place. Example: >>> x = {"a": 1, "b": {"c": 2}} >>> y = map_values_deep(x, lambda val: val * 2) >>> y == {"a": 2, "b": {"c": 4}} True >>> z = map_values_deep(x, lambda val, props: props) >>> z == {"a": ["a"], "b": {"c": ["b", "c"]}} True .. versionadded: 2.2.0 .. versionchanged:: 3.0.0 Allow iteratees to accept partial arguments. .. versionchanged:: 4.0.0 Renamed from ``deep_map_values`` to ``map_values_deep``. """ properties = to_path(property_path) if pyd.is_object(obj): def deep_iteratee(value, key): return map_values_deep(value, iteratee, pyd.flatten([properties, key])) return assign(obj, map_values(obj, deep_iteratee)) # type: ignore else: return callit(iteratee, obj, properties) def apply(obj: T, func: t.Callable[[T], T2]) -> T2: """ Returns the result of calling `func` on `obj`. Particularly useful to pass `obj` through a function during a method chain. Args: obj: Object to apply function to func: Function called with `obj`. Returns: Results of ``func(value)``. Example: >>> apply(5, lambda x: x * 2) 10 .. versionadded:: 8.0.0 """ return func(obj) def apply_if(obj: T, func: t.Callable[[T], T2], predicate: t.Callable[[T], bool]) -> t.Union[T, T2]: """ Apply `func` to `obj` if `predicate` returns `True`. Args: obj: Object to apply `func` to. func: Function to apply to `obj`. predicate: Predicate applied to `obj`. Returns: Result of applying `func` to `obj` or `obj`. Example: >>> apply_if(2, lambda x: x * 2, lambda x: x > 1) 4 >>> apply_if(2, lambda x: x * 2, lambda x: x < 1) 2 .. versionadded:: 8.0.0 """ return func(obj) if predicate(obj) else obj def apply_if_not_none(obj: t.Optional[T], func: t.Callable[[T], T2]) -> t.Optional[T2]: """ Apply `func` to `obj` if `obj` is not ``None``. Args: obj: Object to apply `func` to. func: Function to apply to `obj`. Returns: Result of applying `func` to `obj` or ``None``. Example: >>> apply_if_not_none(2, lambda x: x * 2) 4 >>> apply_if_not_none(None, lambda x: x * 2) is None True .. versionadded:: 8.0.0 """ return apply_if(obj, func, lambda x: x is not None) # type: ignore @t.overload def apply_catch( obj: T, func: t.Callable[[T], T2], exceptions: t.Iterable[t.Type[Exception]], default: T3 ) -> t.Union[T2, T3]: ... @t.overload def apply_catch( obj: T, func: t.Callable[[T], T2], exceptions: t.Iterable[t.Type[Exception]], default: Unset = UNSET, ) -> t.Union[T, T2]: ... def apply_catch(obj, func, exceptions, default=UNSET): """ Tries to apply `func` to `obj` if any of the exceptions in `excs` are raised, return `default` or `obj` if not set. Args: obj: Object to apply `func` to. func: Function to apply to `obj`. excs: Exceptions to catch. default: Value to return if exception is raised. Returns: Result of applying `func` to `obj` or ``default``. Example: >>> apply_catch(2, lambda x: x * 2, [ValueError]) 4 >>> apply_catch(2, lambda x: x / 0, [ZeroDivisionError], "error") 'error' >>> apply_catch(2, lambda x: x / 0, [ZeroDivisionError]) 2 .. versionadded:: 8.0.0 """ try: return func(obj) except tuple(exceptions): return obj if default is UNSET else default @t.overload def merge( obj: t.Mapping[T, T2], *sources: t.Mapping[T3, T4] ) -> t.Dict[t.Union[T, T3], t.Union[T2, T4]]: ... @t.overload def merge(obj: t.Sequence[T], *sources: t.Sequence[T2]) -> t.List[t.Union[T, T2]]: ... def merge(obj, *sources): """ Recursively merges properties of the source object(s) into the destination object. Subsequent sources will overwrite property assignments of previous sources. Args: obj: Destination object to merge source(s) into. sources: Source objects to merge from. subsequent sources overwrite previous ones. Returns: Merged object. Warning: `obj` is modified in place. Example: >>> obj = {"a": 2} >>> obj2 = merge(obj, {"a": 1}, {"b": 2, "c": 3}, {"d": 4}) >>> obj2 == {"a": 1, "b": 2, "c": 3, "d": 4} True >>> obj is obj2 True .. versionadded:: 1.0.0 .. versionchanged:: 2.3.2 Apply :func:`clone_deep` to each `source` before assigning to `obj`. .. versionchanged:: 2.3.2 Allow `iteratee` to be passed by reference if it is the last positional argument. .. versionchanged:: 4.0.0 Moved iteratee argument to :func:`merge_with`. .. versionchanged:: 4.9.3 Fixed regression in v4.8.0 that caused exception when `obj` was ``None``. """ return merge_with(obj, *sources) def merge_with(obj: t.Any, *sources: t.Any, **kwargs: t.Any) -> t.Any: """ This method is like :func:`merge` except that it accepts customizer which is invoked to produce the merged values of the destination and source properties. If customizer returns ``None``, merging is handled by this method instead. The customizer is invoked with five arguments: ``(obj_value, src_value, key, obj, source)``. Args: obj: Destination object to merge source(s) into. sources: Source objects to merge from. subsequent sources overwrite previous ones. Keyword Args: iteratee: Iteratee function to handle merging (must be passed in as keyword argument). Returns: Merged object. Warning: `obj` is modified in place. Example: >>> cbk = lambda obj_val, src_val: obj_val + src_val >>> obj1 = {"a": [1], "b": [2]} >>> obj2 = {"a": [3], "b": [4]} >>> res = merge_with(obj1, obj2, cbk) >>> obj1 == {"a": [1, 3], "b": [2, 4]} True .. versionadded:: 4.0.0 .. versionchanged:: 4.9.3 Fixed regression in v4.8.0 that caused exception when `obj` was ``None``. """ if obj is None: return None list_sources = list(sources) iteratee = kwargs.pop("iteratee", None) if iteratee is None and list_sources and callable(list_sources[-1]): iteratee = list_sources.pop() list_sources = [copy.deepcopy(source) for source in list_sources] if callable(iteratee): iteratee = partial(callit, iteratee, argcount=getargcount(iteratee, maxargs=5)) else: iteratee = None return _merge_with(obj, *list_sources, iteratee=iteratee, **kwargs) def _merge_with(obj, *sources, **kwargs): iteratee = kwargs.get("iteratee") setter = kwargs.get("_setter") if setter is None: setter = base_set for source in sources: for key, src_value in iterator(source): obj_value = base_get(obj, key, default=None) all_sequences = isinstance(src_value, list) and isinstance(obj_value, list) all_mappings = isinstance(src_value, dict) and isinstance(obj_value, dict) _result = None if iteratee: _result = iteratee(obj_value, src_value, key, obj, source) if _result is not None: result = _result elif all_sequences or all_mappings: result = _merge_with(obj_value, src_value, iteratee=iteratee, _setter=setter) else: result = src_value setter(obj, key, result) return obj @t.overload def omit(obj: t.Mapping[T, T2], *properties: PathT) -> t.Dict[T, T2]: ... @t.overload def omit(obj: t.Union[t.Iterator[T], t.Sequence[T]], *properties: PathT) -> t.Dict[int, T]: ... @t.overload def omit(obj: t.Any, *properties: PathT) -> t.Dict[t.Any, t.Any]: ... def omit(obj, *properties): """ The opposite of :func:`pick`. This method creates an object composed of the property paths of `obj` that are not omitted. Args: obj: Object to process. *properties: Property values to omit. Returns: Results of omitting properties. Example: >>> omit({"a": 1, "b": 2, "c": 3}, "b", "c") == {"a": 1} True >>> omit({"a": 1, "b": 2, "c": 3}, ["a", "c"]) == {"b": 2} True >>> omit([1, 2, 3, 4], 0, 3) == {1: 2, 2: 3} True >>> omit({"a": {"b": {"c": "d"}}}, "a.b.c") == {"a": {"b": {}}} True .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Moved iteratee argument to :func:`omit_by`. .. versionchanged:: 4.2.0 Support deep paths. """ return omit_by(obj, pyd.flatten(properties)) @t.overload def omit_by(obj: t.Mapping[T, T2], iteratee: t.Callable[[T2, T], t.Any]) -> t.Dict[T, T2]: ... @t.overload def omit_by(obj: t.Mapping[T, T2], iteratee: t.Callable[[T2], t.Any]) -> t.Dict[T, T2]: ... @t.overload def omit_by(obj: t.Dict[T, T2], iteratee: None = None) -> t.Dict[T, T2]: ... @t.overload def omit_by( obj: t.Union[t.Iterator[T], t.Sequence[T]], iteratee: t.Callable[[T, int], t.Any] ) -> t.Dict[int, T]: ... @t.overload def omit_by( obj: t.Union[t.Iterator[T], t.Sequence[T]], iteratee: t.Callable[[T], t.Any] ) -> t.Dict[int, T]: ... @t.overload def omit_by(obj: t.List[T], iteratee: None = None) -> t.Dict[int, T]: ... @t.overload def omit_by( obj: t.Any, iteratee: t.Union[t.Callable[..., t.Any], None] = None ) -> t.Dict[t.Any, t.Any]: ... def omit_by(obj, iteratee=None): """ The opposite of :func:`pick_by`. This method creates an object composed of the string keyed properties of object that predicate doesn't return truthy for. The predicate is invoked with two arguments: ``(value, key)``. Args: obj: Object to process. iteratee: Iteratee used to determine which properties to omit. Returns: Results of omitting properties. Example: >>> omit_by({"a": 1, "b": "2", "c": 3}, lambda v: isinstance(v, int)) {'b': '2'} .. versionadded:: 4.0.0 .. versionchanged:: 4.2.0 Support deep paths for `iteratee`. """ if not callable(iteratee): paths = pyd.map_(iteratee, to_path) if any(len(path) > 1 for path in paths): cloned = clone_deep(obj) else: cloned = to_dict(obj) def _unset(obj, path): pyd.unset(obj, path) return obj ret = pyd.reduce_(paths, _unset, cloned) else: argcount = getargcount(iteratee, maxargs=2) ret = { key: value for key, value in iterator(obj) if not callit(iteratee, value, key, argcount=argcount) } return ret def parse_int(value: t.Any, radix: t.Union[int, None] = None) -> t.Union[int, None]: """ Converts the given `value` into an integer of the specified `radix`. If `radix` is falsey, a radix of ``10`` is used unless the `value` is a hexadecimal, in which case a radix of 16 is used. Args: value: Value to parse. radix: Base to convert to. Returns: Integer if parsable else ``None``. Example: >>> parse_int("5") 5 >>> parse_int("12", 8) 10 >>> parse_int("x") is None True .. versionadded:: 1.0.0 """ if not radix and pyd.is_string(value): try: # Check if value is hexadecimal and if so use base-16 conversion. int(value, 16) except ValueError: pass else: radix = 16 if not radix: radix = 10 try: # NOTE: Must convert value to string when supplying radix to int(). Dropping radix arg when # 10 is needed to allow floats to parse correctly. args = (value,) if radix == 10 else (to_string(value), radix) parsed = int(*args) except (ValueError, TypeError): parsed = None return parsed @t.overload def pick(obj: t.Mapping[T, T2], *properties: PathT) -> t.Dict[T, T2]: ... @t.overload def pick(obj: t.Union[t.Tuple[T, ...], t.List[T]], *properties: PathT) -> t.Dict[int, T]: ... @t.overload def pick(obj: t.Any, *properties: PathT) -> t.Dict[t.Any, t.Any]: ... def pick(obj, *properties): """ Creates an object composed of the picked object properties. Args: obj: Object to pick from. properties: Property values to pick. Returns: Dict containing picked properties. Example: >>> pick({"a": 1, "b": 2, "c": 3}, "a", "b") == {"a": 1, "b": 2} True .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Moved iteratee argument to :func:`pick_by`. """ return pick_by(obj, pyd.flatten(properties)) @t.overload def pick_by(obj: t.Mapping[T, T2], iteratee: t.Callable[[T2], t.Any]) -> t.Dict[T, T2]: ... @t.overload def pick_by(obj: t.Mapping[T, T2], iteratee: t.Callable[[T2, T], t.Any]) -> t.Dict[T, T2]: ... @t.overload def pick_by(obj: t.Dict[T, T2], iteratee: None = None) -> t.Dict[T, T2]: ... @t.overload def pick_by( obj: t.Union[t.Tuple[T, ...], t.List[T]], iteratee: t.Callable[[T, int], t.Any] ) -> t.Dict[int, T]: ... @t.overload def pick_by( obj: t.Union[t.Tuple[T, ...], t.List[T]], iteratee: t.Callable[[T], t.Any] ) -> t.Dict[int, T]: ... @t.overload def pick_by(obj: t.Union[t.Tuple[T, ...], t.List[T]], iteratee: None = None) -> t.Dict[int, T]: ... @t.overload def pick_by( obj: t.Any, iteratee: t.Union[t.Callable[..., t.Any], None] = None ) -> t.Dict[t.Any, t.Any]: ... def pick_by(obj, iteratee=None): """ Creates an object composed of the object properties predicate returns truthy for. The predicate is invoked with two arguments: ``(value, key)``. Args: obj: Object to pick from. iteratee: Iteratee used to determine which properties to pick. Returns: Dict containing picked properties. Example: >>> obj = {"a": 1, "b": "2", "c": 3} >>> pick_by(obj, lambda v: isinstance(v, int)) == {"a": 1, "c": 3} True .. versionadded:: 4.0.0 """ obj = to_dict(obj) if iteratee is None or callable(iteratee): paths = keys(obj) if iteratee is None: iteratee = pyd.identity argcount = 1 else: argcount = getargcount(iteratee, maxargs=2) else: paths = iteratee if iteratee is not None else [] def iteratee(value, path): # pylint: disable=function-redefined return has(obj, path) argcount = 2 result = {} for path in paths: value = get(obj, path) if callit(iteratee, value, path, argcount=argcount): set_(result, path, value) return result def rename_keys(obj: t.Dict[T, T2], key_map: t.Dict[t.Any, T3]) -> t.Dict[t.Union[T, T3], T2]: """ Rename the keys of `obj` using `key_map` and return new object. Args: obj: Object to rename. key_map: Renaming map whose keys correspond to existing keys in `obj` and whose values are the new key name. Returns: Renamed `obj`. Example: >>> obj = rename_keys({"a": 1, "b": 2, "c": 3}, {"a": "A", "b": "B"}) >>> obj == {"A": 1, "B": 2, "c": 3} True .. versionadded:: 2.0.0 """ return {key_map.get(key, key): value for key, value in obj.items()} def set_(obj: T, path: PathT, value: t.Any) -> T: """ Sets the value of an object described by `path`. If any part of the object path doesn't exist, it will be created. Args: obj: Object to modify. path: Target path to set value to. value: Value to set. Returns: Modified `obj`. Warning: `obj` is modified in place. Example: >>> set_({}, "a.b.c", 1) {'a': {'b': {'c': 1}}} >>> set_({}, "a.0.c", 1) {'a': {'0': {'c': 1}}} >>> set_([1, 2], "[2][0]", 1) [1, 2, [1]] >>> set_({}, "a.b[0].c", 1) {'a': {'b': [{'c': 1}]}} .. versionadded:: 2.2.0 .. versionchanged:: 3.3.0 Added :func:`set_` as main definition and :func:`deep_set` as alias. .. versionchanged:: 4.0.0 - Modify `obj` in place. - Support creating default path values as ``list`` or ``dict`` based on whether key or index substrings are used. - Remove alias ``deep_set``. """ return set_with(obj, path, value) def set_with( obj: T, path: PathT, value: t.Any, customizer: t.Union[t.Callable[..., t.Any], None] = None ) -> T: """ This method is like :func:`set_` except that it accepts customizer which is invoked to produce the objects of path. If customizer returns undefined path creation is handled by the method instead. The customizer is invoked with three arguments: ``(nested_value, key, nested_object)``. Args: obj: Object to modify. path: Target path to set value to. value: Value to set. customizer: The function to customize assigned values. Returns: Modified `obj`. Warning: `obj` is modified in place. Example: >>> set_with({}, "[0][1]", "a", lambda: {}) {0: {1: 'a'}} .. versionadded:: 4.0.0 .. versionchanged:: 4.3.1 Fixed bug where a callable `value` was called when being set. """ return update_with(obj, path, pyd.constant(value), customizer=customizer) def to_boolean( obj: t.Any, true_values: t.Tuple[str, ...] = ("true", "1"), false_values: t.Tuple[str, ...] = ("false", "0"), ) -> t.Union[bool, None]: """ Convert `obj` to boolean. This is not like the builtin ``bool`` function. By default, commonly considered strings values are converted to their boolean equivalent, i.e., ``'0'`` and ``'false'`` are converted to ``False`` while ``'1'`` and ``'true'`` are converted to ``True``. If a string value is provided that isn't recognized as having a common boolean conversion, then the returned value is ``None``. Non-string values of `obj` are converted using ``bool``. Optionally, `true_values` and `false_values` can be overridden but each value must be a string. Args: obj: Object to convert. true_values: Values to consider ``True``. Each value must be a string. Comparision is case-insensitive. Defaults to ``('true', '1')``. false_values: Values to consider ``False``. Each value must be a string. Comparision is case-insensitive. Defaults to ``('false', '0')``. Returns: Boolean value of `obj`. Example: >>> to_boolean("true") True >>> to_boolean("1") True >>> to_boolean("false") False >>> to_boolean("0") False >>> assert to_boolean("a") is None .. versionadded:: 3.0.0 """ if pyd.is_string(obj): obj = obj.strip() def boolean_match(text, vals): if text.lower() in [val.lower() for val in vals]: return True else: return re.match("|".join(vals), text) if true_values and boolean_match(obj, true_values): value = True elif false_values and boolean_match(obj, false_values): value = False else: value = None else: value = bool(obj) return value @t.overload def to_dict(obj: t.Mapping[T, T2]) -> t.Dict[T, T2]: ... @t.overload def to_dict(obj: t.Union[t.Iterator[T], t.Sequence[T]]) -> t.Dict[int, T]: ... @t.overload def to_dict(obj: t.Any) -> t.Dict[t.Any, t.Any]: ... def to_dict(obj): """ Convert `obj` to ``dict`` by creating a new ``dict`` using `obj` keys and values. Args: obj: Object to convert. Returns: Object converted to ``dict``. Example: >>> obj = {"a": 1, "b": 2} >>> obj2 = to_dict(obj) >>> obj2 == obj True >>> obj2 is not obj True .. versionadded:: 3.0.0 .. versionchanged:: 4.0.0 Removed alias ``to_plain_object``. .. versionchanged:: 4.2.0 Use ``pydash.helpers.iterator`` to generate key/value pairs. .. versionchanged:: 4.7.1 Try to convert to ``dict`` using ``dict()`` first, then fallback to using ``pydash.helpers.iterator``. """ return dict(iterator(obj)) def to_integer(obj: t.Any) -> int: """ Converts `obj` to an integer. Args: obj: Object to convert. Returns: Converted integer or ``0`` if it can't be converted. Example: >>> to_integer(3.2) 3 >>> to_integer("3.2") 3 >>> to_integer("3.9") 3 >>> to_integer("invalid") 0 .. versionadded:: 4.0.0 """ try: # Convert to float first to handle converting floats as string since int('1.1') would fail # but this won't. num = int(float(obj)) except (ValueError, TypeError): num = 0 return num @t.overload def to_list(obj: t.Dict[t.Any, T], split_strings: bool = True) -> t.List[T]: ... @t.overload def to_list(obj: t.Iterable[T], split_strings: bool = True) -> t.List[T]: ... @t.overload def to_list(obj: T, split_strings: bool = True) -> t.List[T]: ... def to_list(obj, split_strings=True): """ Converts an obj, an iterable or a single item to a list. Args: obj: Object to convert item or wrap. split_strings: Whether to split strings into single chars. Defaults to ``True``. Returns: Converted obj or wrapped item. Example: >>> results = to_list({"a": 1, "b": 2, "c": 3}) >>> assert set(results) == set([1, 2, 3]) >>> to_list((1, 2, 3, 4)) [1, 2, 3, 4] >>> to_list(1) [1] >>> to_list([1]) [1] >>> to_list(a for a in [1, 2, 3]) [1, 2, 3] >>> to_list("cat") ['c', 'a', 't'] >>> to_list("cat", split_strings=False) ['cat'] .. versionadded:: 1.0.0 .. versionchanged:: 4.3.0 - Wrap non-iterable items in a list. - Convert other iterables to list. - Byte objects are returned as single character strings in Python 3. """ if isinstance(obj, list): return obj[:] elif isinstance(obj, dict): return obj.values() elif not split_strings and isinstance(obj, (str, bytes)): return [obj] elif split_strings and isinstance(obj, bytes): # in python3 iterating over bytes gives integers instead of strings return list(chr(c) if isinstance(c, int) else c for c in obj) else: try: return list(obj) except TypeError: return [obj] def to_number(obj: t.Any, precision: int = 0) -> t.Union[float, None]: """ Convert `obj` to a number. All numbers are retuned as ``float``. If precision is negative, round `obj` to the nearest positive integer place. If `obj` can't be converted to a number, ``None`` is returned. Args: obj: Object to convert. precision: Precision to round number to. Defaults to ``0``. Returns: Converted number or ``None`` if it can't be converted. Example: >>> to_number("1234.5678") 1235.0 >>> to_number("1234.5678", 4) 1234.5678 >>> to_number(1, 2) 1.0 .. versionadded:: 3.0.0 """ try: factor = pow(10, precision) if precision < 0: # Round down since negative `precision` means we are going to the nearest positive # integer place. rounder: t.Callable[..., t.Any] = math.floor else: rounder = round num = rounder(float(obj) * factor) / factor except Exception: num = None return num @t.overload def to_pairs(obj: t.Mapping[T, T2]) -> t.List[t.Tuple[T, T2]]: ... @t.overload def to_pairs(obj: t.Union[t.Iterator[T], t.Sequence[T]]) -> t.List[t.Tuple[int, T]]: ... @t.overload def to_pairs(obj: t.Any) -> t.List[t.Any]: ... def to_pairs(obj): """ Creates a list of tuples of an object's key-value pairs, i.e., ``[(key1, value1), (key2, value2)]``. Args: obj: Object to process. Returns: List of tuples of the object's key-value pairs. Example: >>> to_pairs([1, 2, 3, 4]) [(0, 1), (1, 2), (2, 3), (3, 4)] >>> to_pairs({"a": 1}) [('a', 1)] .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Renamed from ``pairs`` to ``to_pairs``. .. versionchanged:: 8.0.0 Returning list of tuples instead of list of lists. """ return [(key, value) for key, value in iterator(obj)] def to_string(obj: t.Any) -> str: """ Converts an object to string. Args: obj: Object to convert. Returns: String representation of `obj`. Example: >>> to_string(1) == "1" True >>> to_string(None) == "" True >>> to_string([1, 2, 3]) == "[1, 2, 3]" True >>> to_string("a") == "a" True .. versionadded:: 2.0.0 .. versionchanged:: 3.0.0 Convert ``None`` to empty string. """ if pyd.is_string(obj): res = obj elif obj is None: res = "" else: res = str(obj) return res @t.overload def transform( obj: t.Mapping[T, T2], iteratee: t.Callable[[T3, T2, T, t.Dict[T, T2]], t.Any], accumulator: T3 ) -> T3: ... @t.overload def transform( obj: t.Mapping[T, T2], iteratee: t.Callable[[T3, T2, T], t.Any], accumulator: T3 ) -> T3: ... @t.overload def transform( obj: t.Mapping[t.Any, T2], iteratee: t.Callable[[T3, T2], t.Any], accumulator: T3 ) -> T3: ... @t.overload def transform( obj: t.Mapping[t.Any, t.Any], iteratee: t.Callable[[T3], t.Any], accumulator: T3 ) -> T3: ... @t.overload def transform( obj: t.Iterable[T], iteratee: t.Callable[[T3, T, int, t.List[T]], t.Any], accumulator: T3 ) -> T3: ... @t.overload def transform( obj: t.Iterable[T], iteratee: t.Callable[[T3, T, int], t.Any], accumulator: T3 ) -> T3: ... @t.overload def transform(obj: t.Iterable[T], iteratee: t.Callable[[T3, T], t.Any], accumulator: T3) -> T3: ... @t.overload def transform(obj: t.Iterable[t.Any], iteratee: t.Callable[[T3], t.Any], accumulator: T3) -> T3: ... @t.overload def transform(obj: t.Any, iteratee: t.Any = None, accumulator: t.Any = None) -> t.Any: ... def transform(obj, iteratee=None, accumulator=None): """ An alernative to :func:`pydash.collections.reduce`, this method transforms `obj` to a new accumulator object which is the result of running each of its properties through an iteratee, with each iteratee execution potentially mutating the accumulator object. The iteratee is invoked with four arguments: ``(accumulator, value, key, object)``. Iteratees may exit iteration early by explicitly returning ``False``. Args: obj: Object to process. iteratee: Iteratee applied per iteration. accumulator: Accumulated object. Defaults to ``list``. Returns: Accumulated object. Example: >>> transform([1, 2, 3, 4], lambda acc, v, k: acc.append((k, v))) [(0, 1), (1, 2), (2, 3), (3, 4)] .. versionadded:: 1.0.0 """ if iteratee is None: iteratee = pyd.identity argcount = 1 else: argcount = getargcount(iteratee, maxargs=4) if accumulator is None: accumulator = [] walk = ( None for key, item in iterator(obj) if callit(iteratee, accumulator, item, key, obj, argcount=argcount) is False ) next(walk, None) return accumulator @t.overload def update( obj: t.Dict[t.Any, T2], path: PathT, updater: t.Callable[[T2], t.Any], ) -> t.Dict[t.Any, t.Any]: ... @t.overload def update( obj: t.List[T], path: PathT, updater: t.Callable[[T], t.Any], ) -> t.List[t.Any]: ... @t.overload def update( obj: T, path: PathT, updater: t.Callable[..., t.Any], ) -> T: ... def update(obj, path, updater): """ This method is like :func:`set_` except that accepts updater to produce the value to set. Use :func:`update_with` to customize path creation. The updater is invoked with one argument: ``(value)``. Args: obj: Object to modify. path: A string or list of keys that describe the object path to modify. updater: Function that returns updated value. Returns: Updated `obj`. Warning: `obj` is modified in place. Example: >>> update({}, ["a", "b"], lambda value: value) {'a': {'b': None}} >>> update([], [0, 0], lambda value: 1) [[1]] .. versionadded:: 4.0.0 """ return update_with(obj, path, updater) @t.overload def update_with( obj: t.Dict[t.Any, T2], path: PathT, updater: t.Callable[[T2], t.Any], customizer: t.Union[t.Callable[..., t.Any], None], ) -> t.Dict[t.Any, t.Any]: ... @t.overload def update_with( obj: t.List[T], path: PathT, updater: t.Callable[[T], t.Any], customizer: t.Union[t.Callable[..., t.Any], None] = None, ) -> t.List[t.Any]: ... @t.overload def update_with( obj: T, path: PathT, updater: t.Callable[..., t.Any], customizer: t.Union[t.Callable[..., t.Any], None] = None, ) -> T: ... def update_with(obj, path, updater, customizer=None): # noqa: PLR0912 """ This method is like :func:`update` except that it accepts customizer which is invoked to produce the objects of path. If customizer returns ``None``, path creation is handled by the method instead. The customizer is invoked with three arguments: ``(nested_value, key, nested_object)``. Args: obj: Object to modify. path: A string or list of keys that describe the object path to modify. updater: Function that returns updated value. customizer: The function to customize assigned values. Returns: Updated `obj`. Warning: `obj` is modified in place. Example: >>> update_with({}, "[0][1]", lambda: "a", lambda: {}) {0: {1: 'a'}} .. versionadded:: 4.0.0 """ if not callable(updater): updater = pyd.constant(updater) if customizer is not None and not callable(customizer): call_customizer = partial(callit, clone, customizer, argcount=1) elif customizer: call_customizer = partial(callit, customizer, argcount=getargcount(customizer, maxargs=3)) else: call_customizer = None default_type = dict if isinstance(obj, dict) else list tokens = to_path_tokens(path) last_key = pyd.last(tokens) if isinstance(last_key, PathToken): last_key = last_key.key target = obj for idx, token in enumerate(pyd.initial(tokens)): key = token.key default_factory = pyd.get(tokens, [idx + 1, "default_factory"], default=default_type) obj_val = base_get(target, key, default=None) path_obj = None if call_customizer: path_obj = call_customizer(obj_val, key, target) if path_obj is None: path_obj = default_factory() base_set(target, key, path_obj, allow_override=False) try: target = base_get(target, key, default=None) except TypeError as exc: # pragma: no cover try: target = target[int(key)] _failed = False except Exception: _failed = True if _failed: raise TypeError(f"Unable to update object at index {key!r}. {exc}") from exc value = base_get(target, last_key, default=None) base_set(target, last_key, callit(updater, value)) return obj def unset(obj: t.Union[t.List[t.Any], t.Dict[t.Any, t.Any]], path: PathT) -> bool: # noqa: C901 """ Removes the property at `path` of `obj`. Note: Only ``list``, ``dict``, or objects with a ``pop()`` method can be unset by this function. Args: obj: The object to modify. path: The path of the property to unset. Returns: Whether the property was deleted. Warning: `obj` is modified in place. Example: >>> obj = {"a": [{"b": {"c": 7}}]} >>> unset(obj, "a[0].b.c") True >>> obj {'a': [{'b': {}}]} >>> unset(obj, "a[0].b.c") False """ tokens = to_path_tokens(path) last_key = pyd.last(tokens) if isinstance(last_key, PathToken): last_key = last_key.key target = obj for token in pyd.initial(tokens): key = token.key try: try: target = target[key] except TypeError: target = target[int(key)] except Exception: # Allow different types reassignment target = UNSET # type: ignore if target is UNSET: break did_unset = False if target is not UNSET: try: try: # last_key can be a lot of things # safe as everything wrapped in try/except target.pop(last_key) # type: ignore did_unset = True except TypeError: target.pop(int(last_key)) # type: ignore did_unset = True except Exception: pass return did_unset @t.overload def values(obj: t.Mapping[t.Any, T2]) -> t.List[T2]: ... @t.overload def values(obj: t.Iterable[T]) -> t.List[T]: ... @t.overload def values(obj: t.Any) -> t.List[t.Any]: ... def values(obj): """ Creates a list composed of the values of `obj`. Args: obj: Object to extract values from. Returns: List of values. Example: >>> results = values({"a": 1, "b": 2, "c": 3}) >>> set(results) == set([1, 2, 3]) True >>> values([2, 4, 6, 8]) [2, 4, 6, 8] .. versionadded:: 1.0.0 .. versionchanged:: 1.1.0 Added ``values_in`` as alias. .. versionchanged:: 4.0.0 Removed alias ``values_in``. """ return [value for _, value in iterator(obj)] # # Utility methods not a part of the main API # def base_clone(value, is_deep=False, customizer=None, key=None, _cloned=False): """Base clone function that supports deep clone and customizer callback.""" clone_by = copy.deepcopy if is_deep else copy.copy result = None if callable(customizer) and not _cloned: argcount = getargcount(customizer, maxargs=4) cbk = partial(callit, customizer, argcount=argcount) elif _cloned: cbk = customizer else: cbk = None if cbk: result = cbk(value, key, value) if result is not None: return result if not _cloned: result = clone_by(value) else: result = value if cbk and not pyd.is_string(value) and not isinstance(value, bytes): for key, subvalue in iterator(value): # noqa: PLR1704 if is_deep: val = base_clone(subvalue, is_deep, cbk, key, _cloned=True) else: val = cbk(subvalue, key, value) if val is not None: result[key] = val return result pydash-8.0.3/src/pydash/predicates.py000066400000000000000000001017461464745015500176120ustar00rootroot00000000000000""" Predicate functions that return boolean evaluations of objects. .. versionadded:: 2.0.0 """ from __future__ import annotations from collections.abc import Iterable, Mapping import datetime from itertools import islice import json import operator import re from types import BuiltinFunctionType import typing as t from typing_extensions import TypeGuard import pydash as pyd from .helpers import BUILTINS, NUMBER_TYPES, UNSET, base_get, callit, iterator if t.TYPE_CHECKING: from _typeshed import ( # pragma: no cover SupportsDunderGE, SupportsDunderGT, SupportsDunderLE, SupportsDunderLT, SupportsRichComparison, ) __all__ = ( "eq", "eq_cmp", "gt", "gt_cmp", "gte", "gte_cmp", "lt", "lt_cmp", "lte", "lte_cmp", "in_range", "in_range_cmp", "is_associative", "is_blank", "is_boolean", "is_builtin", "is_date", "is_decreasing", "is_dict", "is_empty", "is_equal", "is_equal_cmp", "is_equal_with", "is_equal_with_cmp", "is_error", "is_even", "is_float", "is_function", "is_increasing", "is_indexed", "is_instance_of", "is_instance_of_cmp", "is_integer", "is_iterable", "is_json", "is_list", "is_match", "is_match_cmp", "is_match_with", "is_match_with_cmp", "is_monotone", "is_monotone_cmp", "is_nan", "is_negative", "is_none", "is_number", "is_object", "is_odd", "is_positive", "is_reg_exp", "is_set", "is_strictly_decreasing", "is_strictly_increasing", "is_string", "is_tuple", "is_zero", ) T = t.TypeVar("T") T2 = t.TypeVar("T2") T3 = t.TypeVar("T3") RegExp = type(re.compile("")) def eq(value: t.Any, other: t.Any) -> bool: """ Checks if :attr:`value` is equal to :attr:`other`. Args: value: Value to compare. other: Other value to compare. Returns: Whether :attr:`value` is equal to :attr:`other`. Example: >>> eq(None, None) True >>> eq(None, "") False >>> eq("a", "a") True >>> eq(1, str(1)) False .. versionadded:: 4.0.0 """ return value is other def eq_cmp(other: T) -> t.Callable[[T], bool]: """ Curried version of :func:`eq`. Args: other: Value to compare. Returns: A predicate checking whether passed :attr:`value` is equal to :attr:`other`. Example: >>> eq_cmp(None)(None) True >>> eq_cmp(None)("") False >>> eq_cmp("a")("a") True >>> eq_cmp(1)(str(1)) False .. versionadded:: 7.1.0 """ return lambda value: eq(value, other) def gt(value: "SupportsDunderGT[T]", other: T) -> bool: """ Checks if `value` is greater than `other`. Args: value: Value to compare. other: Other value to compare. Returns: Whether `value` is greater than `other`. Example: >>> gt(5, 3) True >>> gt(3, 5) False >>> gt(5, 5) False .. versionadded:: 3.3.0 """ return value > other def gt_cmp(other: T) -> t.Callable[["SupportsDunderGT[T]"], bool]: """ Curried version of :func:`gt`. Args: other: Value to compare. Returns: A predicate checking whether passed :attr:`value` is greater than :attr:`other`. Example: >>> gt_cmp(3)(5) True >>> gt_cmp(5)(3) False >>> gt_cmp(5)(5) False .. versionadded:: 7.1.0 """ return lambda value: gt(value, other) def gte(value: "SupportsDunderGE[T]", other: T) -> bool: """ Checks if `value` is greater than or equal to `other`. Args: value: Value to compare. other: Other value to compare. Returns: Whether `value` is greater than or equal to `other`. Example: >>> gte(5, 3) True >>> gte(3, 5) False >>> gte(5, 5) True .. versionadded:: 3.3.0 """ return value >= other def gte_cmp(other: T) -> t.Callable[["SupportsDunderGE[T]"], bool]: """ Curried version of :func:`gte`. Args: other: Value to compare. Returns: A predicate checking whether passed :attr:`value` is greater than or equal to :attr:`other`. Example: >>> gte_cmp(3)(5) True >>> gte_cmp(5)(3) False >>> gte_cmp(5)(5) True .. versionadded:: 7.1.0 """ return lambda value: gte(value, other) def lt(value: "SupportsDunderLT[T]", other: T) -> bool: """ Checks if `value` is less than `other`. Args: value: Value to compare. other: Other value to compare. Returns: Whether `value` is less than `other`. Example: >>> lt(5, 3) False >>> lt(3, 5) True >>> lt(5, 5) False .. versionadded:: 3.3.0 """ return value < other def lt_cmp(other: T) -> t.Callable[["SupportsDunderLT[T]"], bool]: """ Curried version of :func:`lt`. Args: other: Value to compare. Returns: A predicate checking whether passed :attr:`value` is less than :attr:`other`. Example: >>> lt_cmp(3)(5) False >>> lt_cmp(5)(3) True >>> lt_cmp(5)(5) False .. versionadded:: 7.1.0 """ return lambda value: lt(value, other) def lte(value: "SupportsDunderLE[T]", other: T) -> bool: """ Checks if `value` is less than or equal to `other`. Args: value: Value to compare. other: Other value to compare. Returns: Whether `value` is less than or equal to `other`. Example: >>> lte(5, 3) False >>> lte(3, 5) True >>> lte(5, 5) True .. versionadded:: 3.3.0 """ return value <= other def lte_cmp(other: T) -> t.Callable[["SupportsDunderLE[T]"], bool]: """ Curried version of :func:`lte`. Args: other: Value to compare. Returns: A predicate checking whether passed :attr:`value` is less than or equal to :attr:`other`. Example: >>> lte_cmp(3)(5) False >>> lte_cmp(5)(3) True >>> lte_cmp(5)(5) True .. versionadded:: 7.1.0 """ return lambda value: lte(value, other) def in_range(value: t.Any, start: t.Any = 0, end: t.Any = None) -> bool: """ Checks if `value` is between `start` and up to but not including `end`. If `end` is not specified it defaults to `start` with `start` becoming ``0``. Args: value: Number to check. start: Start of range inclusive. Defaults to ``0``. end: End of range exclusive. Defaults to `start`. Returns: Whether `value` is in range. Example: >>> in_range(2, 4) True >>> in_range(4, 2) False >>> in_range(2, 1, 3) True >>> in_range(3, 1, 2) False >>> in_range(2.5, 3.5) True >>> in_range(3.5, 2.5) False .. versionadded:: 3.1.0 """ if not is_number(value): return False if not is_number(start): start = 0 if end is None: end = start start = 0 elif not is_number(end): end = 0 return start <= value < end def in_range_cmp(start: t.Any = 0, end: t.Any = None) -> t.Callable[[t.Any], bool]: """ Curried version of :func:`in_range`. Args: start: Start of range inclusive. Defaults to ``0``. end: End of range exclusive. Defaults to `start`. Returns: A predicate checking whether passed :attr:`value` is in range. Example: >>> in_range_cmp(4)(2) True >>> in_range_cmp(2)(4) False >>> in_range_cmp(1, 3)(2) True >>> in_range_cmp(1, 2)(3) False >>> in_range_cmp(3.5)(2.5) True >>> in_range_cmp(2.5)(3.5) False .. versionadded:: 7.1.0 """ return lambda value: in_range(value, start, end) def is_associative(value: t.Any) -> bool: """ Checks if `value` is an associative object meaning that it can be accessed via an index or key. Args: value: Value to check. Returns: Whether `value` is associative. Example: >>> is_associative([]) True >>> is_associative({}) True >>> is_associative(1) False >>> is_associative(True) False .. versionadded:: 2.0.0 """ return hasattr(value, "__getitem__") def is_blank(text: t.Any) -> TypeGuard[str]: r""" Checks if `text` contains only whitespace characters. Args: text: String to test. Returns: Whether `text` is blank. Example: >>> is_blank("") True >>> is_blank(" \r\n ") True >>> is_blank(False) False .. versionadded:: 3.0.0 """ try: ret = bool(re.match(r"^(\s+)?$", text)) except TypeError: ret = False return ret def is_boolean(value: t.Any) -> TypeGuard[bool]: """ Checks if `value` is a boolean value. Args: value: Value to check. Returns: Whether `value` is a boolean. Example: >>> is_boolean(True) True >>> is_boolean(False) True >>> is_boolean(0) False .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0 Added ``is_bool`` as alias. .. versionchanged:: 4.0.0 Removed alias ``is_bool``. """ return isinstance(value, bool) def is_builtin(value: t.Any) -> bool: """ Checks if `value` is a Python builtin function or method. Args: value: Value to check. Returns: Whether `value` is a Python builtin function or method. Example: >>> is_builtin(1) True >>> is_builtin(list) True >>> is_builtin("foo") False .. versionadded:: 3.0.0 .. versionchanged:: 4.0.0 Removed alias ``is_native``. """ try: return isinstance(value, BuiltinFunctionType) or value in BUILTINS except TypeError: # pragma: no cover return False def is_date(value: t.Any) -> bool: """ Check if `value` is a date object. Args: value: Value to check. Returns: Whether `value` is a date object. Example: >>> import datetime >>> is_date(datetime.date.today()) True >>> is_date(datetime.datetime.today()) True >>> is_date("2014-01-01") False Note: This will also return ``True`` for datetime objects. .. versionadded:: 1.0.0 """ return isinstance(value, datetime.date) def is_decreasing( value: t.Union["SupportsRichComparison", t.List["SupportsRichComparison"]], ) -> bool: """ Check if `value` is monotonically decreasing. Args: value: Value to check. Returns: Whether `value` is monotonically decreasing. Example: >>> is_decreasing([5, 4, 4, 3]) True >>> is_decreasing([5, 5, 5]) True >>> is_decreasing([5, 4, 5]) False .. versionadded:: 2.0.0 """ return is_monotone(value, operator.ge) # type: ignore def is_dict(value: t.Any) -> bool: """ Checks if `value` is a ``dict``. Args: value: Value to check. Returns: Whether `value` is a ``dict``. Example: >>> is_dict({}) True >>> is_dict([]) False .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0 Added :func:`is_dict` as main definition and made `is_plain_object`` an alias. .. versionchanged:: 4.0.0 Removed alias ``is_plain_object``. """ return isinstance(value, dict) def is_empty(value: t.Any) -> bool: """ Checks if `value` is empty. Args: value: Value to check. Returns: Whether `value` is empty. Example: >>> is_empty(0) True >>> is_empty(1) True >>> is_empty(True) True >>> is_empty("foo") False >>> is_empty(None) True >>> is_empty({}) True Note: Returns ``True`` for booleans and numbers. .. versionadded:: 1.0.0 """ return is_boolean(value) or is_number(value) or not value def is_equal(value: t.Any, other: t.Any) -> bool: """ Performs a comparison between two values to determine if they are equivalent to each other. Args: value: Object to compare. other: Object to compare. Returns: Whether `value` and `other` are equal. Example: >>> is_equal([1, 2, 3], [1, 2, 3]) True >>> is_equal("a", "A") False .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Removed :attr:`iteratee` from :func:`is_equal` and added it in :func:`is_equal_with`. """ return is_equal_with(value, other, customizer=None) def is_equal_cmp(other: T) -> t.Callable[[T], bool]: """ Curried version of :func:`is_equal`. Args: other: Value to compare. Returns: A predicate checking whether passed :attr:`value` is equal to :attr:`other`. Example: >>> is_equal_cmp([1, 2, 3])([1, 2, 3]) True >>> is_equal_cmp("a")("A") False .. versionadded:: 7.1.0 """ return lambda value: is_equal(value, other) @t.overload def is_equal_with(value: T, other: T2, customizer: t.Callable[[T, T2], T3]) -> T3: ... @t.overload def is_equal_with(value: t.Any, other: t.Any, customizer: t.Callable[..., t.Any]) -> bool: ... @t.overload def is_equal_with(value: t.Any, other: t.Any, customizer: None) -> bool: ... def is_equal_with(value, other, customizer): """ This method is like :func:`is_equal` except that it accepts customizer which is invoked to compare values. A customizer is provided which will be executed to compare values. If the customizer returns ``None``, comparisons will be handled by the method instead. The customizer is invoked with two arguments: ``(value, other)``. Args: value: Object to compare. other: Object to compare. customizer: Customizer used to compare values from `value` and `other`. Returns: Whether `value` and `other` are equal. Example: >>> is_equal_with([1, 2, 3], [1, 2, 3], None) True >>> is_equal_with("a", "A", None) False >>> is_equal_with("a", "A", lambda a, b: a.lower() == b.lower()) True .. versionadded:: 4.0.0 """ # If customizer provided, use it for comparison. equal = customizer(value, other) if callable(customizer) else None # Return customizer results if anything but None. if equal is not None: pass elif ( callable(customizer) and type(value) is type(other) and isinstance(value, (list, dict)) and isinstance(other, (list, dict)) and len(value) == len(other) ): # Walk a/b to determine equality using customizer. for key, val in iterator(value): if pyd.has(other, key): equal = is_equal_with(val, other[key], customizer) else: equal = False if not equal: break else: # Use basic == comparison. equal = value == other return equal def is_equal_with_cmp(other: T, customizer: t.Callable[[T, T], T3]) -> t.Callable[[T], T3]: """ Curried version of :func:`is_equal_with`. Args: other: Value to compare. customizer: Customizer used to compare values from `value` and `other`. Returns: A predicate checking whether passed :attr:`value` and :attr:`other` are equal. Example: >>> is_equal_with_cmp([1, 2, 3], None)([1, 2, 3]) True >>> is_equal_with_cmp("a", None)("A") False >>> is_equal_with_cmp("a", lambda a, b: a.lower() == b.lower())("A") True .. versionadded:: 7.1.0 """ return lambda value: is_equal_with(value, other, customizer) def is_error(value: t.Any) -> bool: """ Checks if `value` is an ``Exception``. Args: value: Value to check. Returns: Whether `value` is an exception. Example: >>> is_error(Exception()) True >>> is_error(Exception) False >>> is_error(None) False .. versionadded:: 1.1.0 """ return isinstance(value, Exception) def is_even(value: t.Any) -> bool: """ Checks if `value` is even. Args: value: Value to check. Returns: Whether `value` is even. Example: >>> is_even(2) True >>> is_even(3) False >>> is_even(False) False .. versionadded:: 2.0.0 """ return is_number(value) and value % 2 == 0 def is_float(value: t.Any) -> TypeGuard[float]: """ Checks if `value` is a float. Args: value: Value to check. Returns: Whether `value` is a float. Example: >>> is_float(1.0) True >>> is_float(1) False .. versionadded:: 2.0.0 """ return isinstance(value, float) def is_function(value: t.Any) -> bool: """ Checks if `value` is a function. Args: value: Value to check. Returns: Whether `value` is callable. Example: >>> is_function(list) True >>> is_function(lambda: True) True >>> is_function(1) False .. versionadded:: 1.0.0 """ return callable(value) def is_increasing( value: t.Union["SupportsRichComparison", t.List["SupportsRichComparison"]], ) -> bool: """ Check if `value` is monotonically increasing. Args: value: Value to check. Returns: Whether `value` is monotonically increasing. Example: >>> is_increasing([1, 3, 5]) True >>> is_increasing([1, 1, 2, 3, 3]) True >>> is_increasing([5, 5, 5]) True >>> is_increasing([1, 2, 4, 3]) False .. versionadded:: 2.0.0 """ return is_monotone(value, operator.le) # type: ignore def is_indexed(value: t.Any) -> bool: """ Checks if `value` is integer indexed, i.e., ``list``, ``str`` or ``tuple``. Args: value: Value to check. Returns: Whether `value` is integer indexed. Example: >>> is_indexed("") True >>> is_indexed([]) True >>> is_indexed(()) True >>> is_indexed({}) False .. versionadded:: 2.0.0 .. versionchanged:: 3.0.0 Return ``True`` for tuples. """ return isinstance(value, (list, tuple, str)) def is_instance_of(value: t.Any, types: t.Union[type, t.Tuple[type, ...]]) -> bool: """ Checks if `value` is an instance of `types`. Args: value: Value to check. types: Types to check against. Pass as ``tuple`` to check if `value` is one of multiple types. Returns: Whether `value` is an instance of `types`. Example: >>> is_instance_of({}, dict) True >>> is_instance_of({}, list) False .. versionadded:: 2.0.0 """ return isinstance(value, types) def is_instance_of_cmp( types: t.Union[type, t.Tuple[type, ...]], ) -> t.Callable[[t.Any], bool]: """ Curried version of :func:`is_instance_of`. Args: types: Types to check against. Pass as ``tuple`` to check if `value` is one of multiple types. Returns: A predicate checking whether passed :attr:`value` is an instance of :attr:`types`. Example: >>> is_instance_of_cmp(dict)({}) True >>> is_instance_of_cmp(list)({}) False .. versionadded:: 7.1.0 """ return lambda value: is_instance_of(value, types) def is_integer(value: t.Any) -> TypeGuard[int]: """ Checks if `value` is a integer. Args: value: Value to check. Returns: Whether `value` is an integer. Example: >>> is_integer(1) True >>> is_integer(1.0) False >>> is_integer(True) False .. versionadded:: 2.0.0 .. versionchanged:: 3.0.0 Added ``is_int`` as alias. .. versionchanged:: 4.0.0 Removed alias ``is_int``. """ return is_number(value) and isinstance(value, int) def is_iterable(value: t.Any) -> bool: """ Checks if `value` is an iterable. Args: value: Value to check. Returns: Whether `value` is an iterable. Example: >>> is_iterable([]) True >>> is_iterable({}) True >>> is_iterable(()) True >>> is_iterable(5) False >>> is_iterable(True) False .. versionadded:: 3.3.0 """ try: iter(value) except TypeError: return False else: return True def is_json(value: t.Any) -> bool: """ Checks if `value` is a valid JSON string. Args: value: Value to check. Returns: Whether `value` is JSON. Example: >>> is_json({}) False >>> is_json("{}") True >>> is_json({"hello": 1, "world": 2}) False >>> is_json('{"hello": 1, "world": 2}') True .. versionadded:: 2.0.0 """ try: json.loads(value) return True except Exception: return False def is_list(value: t.Any) -> bool: """ Checks if `value` is a list. Args: value: Value to check. Returns: Whether `value` is a list. Example: >>> is_list([]) True >>> is_list({}) False >>> is_list(()) False .. versionadded:: 1.0.0 """ return isinstance(value, list) def is_match(obj: t.Any, source: t.Any) -> bool: """ Performs a partial deep comparison between `obj` and `source` to determine if `obj` contains equivalent property values. Args: obj: Object to compare. source: Object of property values to match. Returns: Whether `obj` is a match or not. Example: >>> is_match({'a': 1, 'b': 2}, {'b': 2}) True >>> is_match({'a': 1, 'b': 2}, {'b': 3}) False >>> is_match({'a': [{'b': [{'c': 3, 'd': 4}]}]},\ {'a': [{'b': [{'d': 4}]}]}) True .. versionadded:: 3.0.0 .. versionchanged:: 3.2.0 Don't compare `obj` and `source` using ``type``. Use ``isinstance`` exclusively. .. versionchanged:: 4.0.0 Move `iteratee` argument to :func:`is_match_with`. """ return is_match_with(obj, source) def is_match_cmp(source: t.Any) -> t.Callable[[t.Any], bool]: """ Curried version of :func:`is_match`. Args: source: Object of property values to match. Returns: A predicate checking whether passed :attr:`obj` is a match or not. Example: >>> is_match_cmp({"b": 2})({"a": 1, "b": 2}) True >>> is_match_cmp({"b": 3})({"a": 1, "b": 2}) False >>> is_match_cmp({"a": [{"b": [{"d": 4}]}]})({"a": [{"b": [{"c": 3, "d": 4}]}]}) True .. versionadded:: 7.1.0 """ return lambda obj: is_match(obj, source) def is_match_with( obj: t.Any, source: t.Any, customizer: t.Any = None, _key: t.Any = UNSET, _obj: t.Any = UNSET, _source: t.Any = UNSET, ) -> bool: """ This method is like :func:`is_match` except that it accepts customizer which is invoked to compare values. If customizer returns ``None``, comparisons are handled by the method instead. The customizer is invoked with five arguments: ``(obj_value, src_value, index|key, obj, source)``. Args: obj: Object to compare. source: Object of property values to match. customizer: Customizer used to compare values from `obj` and `source`. Returns: Whether `obj` is a match or not. Example: >>> is_greeting = lambda val: val in ("hello", "hi") >>> customizer = lambda ov, sv: is_greeting(ov) and is_greeting(sv) >>> obj = {"greeting": "hello"} >>> src = {"greeting": "hi"} >>> is_match_with(obj, src, customizer) True .. versionadded:: 4.0.0 """ if _obj is UNSET: _obj = obj if _source is UNSET: _source = source if not callable(customizer): def cbk(obj_value, src_value): return obj_value == src_value # no attribute `_argcount` cbk._argcount = 2 # type: ignore else: cbk = customizer if isinstance(source, (Mapping, Iterable)) and not isinstance(source, str): # Set equal to True if source is empty, otherwise, False and then allow deep comparison to # determine equality. equal = not source # Walk a/b to determine equality. for key, value in iterator(source): try: obj_value = base_get(obj, key) equal = is_match_with(obj_value, value, cbk, _key=key, _obj=_obj, _source=_source) except Exception: equal = False if not equal: break else: equal = callit(cbk, obj, source, _key, _obj, _source) return equal def is_match_with_cmp(source: t.Any, customizer: t.Any = None) -> t.Callable[[t.Any], bool]: """ Curried version of :func:`is_match_with`. Args: source: Object of property values to match. customizer: Customizer used to compare values from `obj` and `source`. Returns: A predicate checking whether passed :attr:`obj` is a match or not. Example: >>> is_greeting = lambda val: val in ("hello", "hi") >>> customizer = lambda ov, sv: is_greeting(ov) and is_greeting(sv) >>> obj = {"greeting": "hello"} >>> src = {"greeting": "hi"} >>> is_match_with_cmp(src, customizer)(obj) True .. versionadded:: 7.1.0 """ return lambda obj: is_match_with(obj, source, customizer) def is_monotone(value: t.Union[T, t.List[T]], op: t.Callable[[T, T], t.Any]) -> bool: """ Checks if `value` is monotonic when `operator` used for comparison. Args: value: Value to check. op: Operation to used for comparison. Returns: Whether `value` is monotone. Example: >>> is_monotone([1, 1, 2, 3], operator.le) True >>> is_monotone([1, 1, 2, 3], operator.lt) False .. versionadded:: 2.0.0 """ if not is_list(value): l_value = [value] else: l_value = value # type: ignore search = ( False for x, y in zip(l_value, islice(l_value, 1, None)) if not op(x, y) # type: ignore ) return next(search, True) def is_monotone_cmp( op: t.Callable[[T, T], t.Any], ) -> t.Callable[[t.Union[T, t.List[T]]], bool]: """ Curried version of :func:`is_monotone`. Args: op: Operation to used for comparison. Returns: A predicate checking whether passed :attr:`value` is monotone. Example: >>> is_monotone_cmp(operator.le)([1, 1, 2, 3]) True >>> is_monotone_cmp(operator.lt)([1, 1, 2, 3]) False .. versionadded:: 7.1.0 """ return lambda value: is_monotone(value, op) def is_nan(value: t.Any) -> bool: """ Checks if `value` is not a number. Args: value: Value to check. Returns: Whether `value` is not a number. Example: >>> is_nan("a") True >>> is_nan(1) False >>> is_nan(1.0) False .. versionadded:: 1.0.0 """ return not is_number(value) def is_negative(value: t.Any) -> bool: """ Checks if `value` is negative. Args: value: Value to check. Returns: Whether `value` is negative. Example: >>> is_negative(-1) True >>> is_negative(0) False >>> is_negative(1) False .. versionadded:: 2.0.0 """ return is_number(value) and value < 0 def is_none(value: t.Any) -> TypeGuard[None]: """ Checks if `value` is `None`. Args: value: Value to check. Returns: Whether `value` is ``None``. Example: >>> is_none(None) True >>> is_none(False) False .. versionadded:: 1.0.0 """ return value is None def is_number(value: t.Any) -> bool: """ Checks if `value` is a number. Args: value: Value to check. Returns: Whether `value` is a number. Note: Returns ``True`` for ``int``, ``long`` (PY2), ``float``, and ``decimal.Decimal``. Example: >>> is_number(1) True >>> is_number(1.0) True >>> is_number("a") False .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0 Added ``is_num`` as alias. .. versionchanged:: 4.0.0 Removed alias ``is_num``. """ return not is_boolean(value) and isinstance(value, NUMBER_TYPES) def is_object(value: t.Any) -> bool: """ Checks if `value` is a ``list`` or ``dict``. Args: value: Value to check. Returns: Whether `value` is ``list`` or ``dict``. Example: >>> is_object([]) True >>> is_object({}) True >>> is_object(()) False >>> is_object(1) False .. versionadded:: 1.0.0 """ return isinstance(value, (list, dict)) def is_odd(value: t.Any) -> bool: """ Checks if `value` is odd. Args: value: Value to check. Returns: Whether `value` is odd. Example: >>> is_odd(3) True >>> is_odd(2) False >>> is_odd("a") False .. versionadded:: 2.0.0 """ return is_number(value) and value % 2 != 0 def is_positive(value: t.Any) -> bool: """ Checks if `value` is positive. Args: value: Value to check. Returns: Whether `value` is positive. Example: >>> is_positive(1) True >>> is_positive(0) False >>> is_positive(-1) False .. versionadded:: 2.0.0 """ return is_number(value) and value > 0 def is_reg_exp(value: t.Any) -> TypeGuard[re.Pattern[t.Any]]: """ Checks if `value` is a ``RegExp`` object. Args: value: Value to check. Returns: Whether `value` is a RegExp object. Example: >>> is_reg_exp(re.compile("")) True >>> is_reg_exp("") False .. versionadded:: 1.1.0 .. versionchanged:: 4.0.0 Removed alias ``is_re``. """ return isinstance(value, RegExp) def is_set(value: t.Any) -> bool: """ Checks if the given value is a set object or not. Args: value: Value passed in by the user. Returns: True if the given value is a set else False. Example: >>> is_set(set([1, 2])) True >>> is_set([1, 2, 3]) False .. versionadded:: 4.0.0 """ return isinstance(value, set) def is_strictly_decreasing( value: t.Union["SupportsRichComparison", t.List["SupportsRichComparison"]], ) -> bool: """ Check if `value` is strictly decreasing. Args: value: Value to check. Returns: Whether `value` is strictly decreasing. Example: >>> is_strictly_decreasing([4, 3, 2, 1]) True >>> is_strictly_decreasing([4, 4, 2, 1]) False .. versionadded:: 2.0.0 """ return is_monotone(value, operator.gt) # type: ignore def is_strictly_increasing( value: t.Union["SupportsRichComparison", t.List["SupportsRichComparison"]], ) -> bool: """ Check if `value` is strictly increasing. Args: value: Value to check. Returns: Whether `value` is strictly increasing. Example: >>> is_strictly_increasing([1, 2, 3, 4]) True >>> is_strictly_increasing([1, 1, 3, 4]) False .. versionadded:: 2.0.0 """ return is_monotone(value, operator.lt) # type: ignore def is_string(value: t.Any) -> TypeGuard[str]: """ Checks if `value` is a string. Args: value: Value to check. Returns: Whether `value` is a string. Example: >>> is_string("") True >>> is_string(1) False .. versionadded:: 1.0.0 """ return isinstance(value, str) def is_tuple(value: t.Any) -> bool: """ Checks if `value` is a tuple. Args: value: Value to check. Returns: Whether `value` is a tuple. Example: >>> is_tuple(()) True >>> is_tuple({}) False >>> is_tuple([]) False .. versionadded:: 3.0.0 """ return isinstance(value, tuple) def is_zero(value: t.Any) -> TypeGuard[int]: """ Checks if `value` is ``0``. Args: value: Value to check. Returns: Whether `value` is ``0``. Example: >>> is_zero(0) True >>> is_zero(1) False .. versionadded:: 2.0.0 """ return value == 0 and is_integer(value) pydash-8.0.3/src/pydash/py.typed000066400000000000000000000000001464745015500165710ustar00rootroot00000000000000pydash-8.0.3/src/pydash/strings.py000066400000000000000000001612131464745015500171530ustar00rootroot00000000000000""" String functions. .. versionadded:: 1.1.0 """ from __future__ import annotations import html import math import re import typing import typing as t import unicodedata from urllib.parse import parse_qsl, urlencode, urlsplit, urlunsplit import pydash as pyd from .helpers import UNSET, Unset from .types import NumberT __all__ = ( "camel_case", "capitalize", "chop", "chop_right", "chars", "clean", "count_substr", "deburr", "decapitalize", "ends_with", "ensure_ends_with", "ensure_starts_with", "escape", "escape_reg_exp", "has_substr", "human_case", "insert_substr", "join", "kebab_case", "lines", "lower_case", "lower_first", "number_format", "pad", "pad_end", "pad_start", "pascal_case", "predecessor", "prune", "quote", "reg_exp_js_match", "reg_exp_js_replace", "reg_exp_replace", "repeat", "replace", "replace_end", "replace_start", "separator_case", "series_phrase", "series_phrase_serial", "slugify", "snake_case", "split", "start_case", "starts_with", "strip_tags", "substr_left", "substr_left_end", "substr_right", "substr_right_end", "successor", "surround", "swap_case", "title_case", "to_lower", "to_upper", "trim", "trim_end", "trim_start", "truncate", "unescape", "unquote", "upper_case", "upper_first", "url", "words", ) T = t.TypeVar("T") T2 = t.TypeVar("T2") class JSRegExp: """ Javascript-style regular expression pattern. Converts a Javascript-style regular expression to the equivalent Python version. """ def __init__(self, reg_exp: str) -> None: pattern, options = reg_exp[1:].rsplit("/", 1) self._global = "g" in options self._ignore_case = "i" in options flags = re.I if self._ignore_case else 0 self.pattern = re.compile(pattern, flags=flags) def find(self, text: str) -> t.List[str]: """Return list of regular expression matches.""" if self._global: results = self.pattern.findall(text) else: res = self.pattern.search(text) if res: results = [res.group()] else: results = [] return results def replace(self, text: str, repl: t.Union[str, t.Callable[[re.Match[str]], str]]) -> str: """Replace parts of text that match the regular expression.""" count = 0 if self._global else 1 return self.pattern.sub(repl, text, count=count) HTML_ESCAPES = {"&": "&", "<": "<", ">": ">", '"': """, "'": "'", "`": "`"} DEBURRED_LETTERS = { "\xc0": "A", "\xc1": "A", "\xc2": "A", "\xc3": "A", "\xc4": "A", "\xc5": "A", "\xe0": "a", "\xe1": "a", "\xe2": "a", "\xe3": "a", "\xe4": "a", "\xe5": "a", "\xc7": "C", "\xe7": "c", "\xd0": "D", "\xf0": "d", "\xc8": "E", "\xc9": "E", "\xca": "E", "\xcb": "E", "\xe8": "e", "\xe9": "e", "\xea": "e", "\xeb": "e", "\xcc": "I", "\xcd": "I", "\xce": "I", "\xcf": "I", "\xec": "i", "\xed": "i", "\xee": "i", "\xef": "i", "\xd1": "N", "\xf1": "n", "\xd2": "O", "\xd3": "O", "\xd4": "O", "\xd5": "O", "\xd6": "O", "\xd8": "O", "\xf2": "o", "\xf3": "o", "\xf4": "o", "\xf5": "o", "\xf6": "o", "\xf8": "o", "\xd9": "U", "\xda": "U", "\xdb": "U", "\xdc": "U", "\xf9": "u", "\xfa": "u", "\xfb": "u", "\xfc": "u", "\xdd": "Y", "\xfd": "y", "\xff": "y", "\xc6": "Ae", "\xe6": "ae", "\xde": "Th", "\xfe": "th", "\xdf": "ss", "\xd7": " ", "\xf7": " ", } # Use Javascript style regex to make Lo-Dash compatibility easier. # Lodash Regex definitions: https://github.com/lodash/lodash/blob/master/.internal/unicodeWords.js # References: https://github.com/lodash/lodash/blob/master/words.js#L8 RS_ASCII_WORDS = "/[^\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\x7f]+/g" RS_LATIN1 = "/[\xc0-\xff]/g" # Used to compose unicode character classes. RS_ASTRAL_RANGE = "\\ud800-\\udfff" RS_COMBO_MARKS_RANGE = "\\u0300-\\u036f" RE_COMBO_HALF_MARKS_RANGE = "\\ufe20-\\ufe2f" RS_COMBO_SYMBOLS_RANGE = "\\u20d0-\\u20ff" RS_COMBO_MARKS_EXTENDED_RANGE = "\\u1ab0-\\u1aff" RS_COMBO_MARKS_SUPPLEMENT_RANGE = "\\u1dc0-\\u1dff" RS_COMBO_RANGE = ( RS_COMBO_MARKS_RANGE + RE_COMBO_HALF_MARKS_RANGE + RS_COMBO_SYMBOLS_RANGE + RS_COMBO_MARKS_EXTENDED_RANGE + RS_COMBO_MARKS_SUPPLEMENT_RANGE ) RS_DINGBAT_RANGE = "\\u2700-\\u27bf" RS_LOWER_RANGE = "a-z\\xdf-\\xf6\\xf8-\\xff" RS_MATH_OP_RANGE = "\\xac\\xb1\\xd7\\xf7" RS_NON_CHAR_RANGE = "\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\xbf" RS_PUNCTUATION_RANGE = "\\u2000-\\u206f" RS_SPACE_RANGE = ( " \\t\\x0b\\f\\xa0\\ufeff\\n\\r\\u2028\\u2029\\" "u1680\\u180e\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\" "u2007\\u2008\\u2009\\u200a\\u202f\\u205f\\u3000" ) RS_UPPER_RANGE = "A-Z\\xc0-\\xd6\\xd8-\\xde" RS_VAR_RANGE = "\\ufe0e\\ufe0f" RS_BREAK_RANGE = RS_MATH_OP_RANGE + RS_NON_CHAR_RANGE + RS_PUNCTUATION_RANGE + RS_SPACE_RANGE # Used to compose unicode capture groups. RS_APOS = "['\u2019]" RS_BREAK = f"[{RS_BREAK_RANGE}]" RS_COMBO = f"[{RS_COMBO_RANGE}]" RS_DIGIT = "\\d" RS_DINGBAT = f"[{RS_DINGBAT_RANGE}]" RS_LOWER = f"[{RS_LOWER_RANGE}]" RS_MISC = ( f"[^{RS_ASTRAL_RANGE}{RS_BREAK_RANGE}{RS_DIGIT}" f"{RS_DINGBAT_RANGE}{RS_LOWER_RANGE}{RS_UPPER_RANGE}]" ) RS_FITZ = "\\ud83c[\\udffb-\\udfff]" RS_MODIFIER = f"(?:{RS_COMBO}|{RS_FITZ})" RS_NON_ASTRAL = f"[^{RS_ASTRAL_RANGE}]" RS_REGIONAL = "(?:\\ud83c[\\udde6-\\uddff]){2}" RS_SURR_PAIR = "[\\ud800-\\udbff][\\udc00-\\udfff]" RS_UPPER = f"[{RS_UPPER_RANGE}]" RS_ZWJ = "\\u200d" # Used to compose unicode regexes. RS_MISC_LOWER = f"(?:{RS_LOWER}|{RS_MISC})" RS_MISC_UPPER = f"(?:{RS_UPPER}|{RS_MISC})" RS_OPT_CONTR_LOWER = f"(?:{RS_APOS}(?:d|ll|m|re|s|t|ve))?" RS_OPT_CONTR_UPPER = f"(?:{RS_APOS}(?:D|LL|M|RE|S|T|VE))?" RE_OPT_MOD = f"{RS_MODIFIER}?" RS_OPT_VAR = f"[{RS_VAR_RANGE}]?" RS_OPT_JOIN = ( f"(?:{RS_ZWJ}(?:{RS_NON_ASTRAL}|{RS_REGIONAL}|{RS_SURR_PAIR}){RS_OPT_VAR}{RE_OPT_MOD})*" ) RS_ORD_LOWER = "\\d*(?:1st|2nd|3rd|(?![123])\\dth)(?=\\b|[A-Z_])" RS_ORD_UPPER = "\\d*(?:1ST|2ND|3RD|(?![123])\\dTH)(?=\\b|[a-z_])" RS_SEQ = RS_OPT_VAR + RE_OPT_MOD + RS_OPT_JOIN RS_EMOJI = f"(?:{RS_DINGBAT}|{RS_REGIONAL}|{RS_SURR_PAIR}){RS_SEQ}" RS_HAS_UNICODE_WORD = "[a-z][A-Z]|[A-Z]{2}[a-z]|[0-9][a-zA-Z]|[a-zA-Z][0-9]|[^a-zA-Z0-9 ]" RS_UNICODE_WORDS = ( f"/" f"{RS_UPPER}?{RS_LOWER}+{RS_OPT_CONTR_LOWER}(?={RS_BREAK}|{RS_UPPER}|$)" f"|{RS_MISC_UPPER}+{RS_OPT_CONTR_UPPER}(?={RS_BREAK}|{RS_UPPER}{RS_MISC_LOWER}|$)" f"|{RS_UPPER}?{RS_MISC_LOWER}+{RS_OPT_CONTR_LOWER}" f"|{RS_UPPER}+{RS_OPT_CONTR_UPPER}" f"|{RS_ORD_UPPER}" f"|{RS_ORD_LOWER}" f"|{RS_DIGIT}+" f"|{RS_EMOJI}" f"/g" ) # Compiled regexes for use in functions. JS_RE_ASCII_WORDS = JSRegExp(RS_ASCII_WORDS) JS_RE_UNICODE_WORDS = JSRegExp(RS_UNICODE_WORDS) JS_RE_LATIN1 = JSRegExp(RS_LATIN1) RE_HAS_UNICODE_WORD = re.compile(RS_HAS_UNICODE_WORD) RE_APOS = re.compile(RS_APOS) RE_HTML_TAGS = re.compile(r"<\/?[^>]+>") def camel_case(text: t.Any) -> str: """ Converts `text` to camel case. Args: text: String to convert. Returns: String converted to camel case. Example: >>> camel_case("FOO BAR_bAz") 'fooBarBAz' .. versionadded:: 1.1.0 .. versionchanged:: 5.0.0 Improved unicode word support. """ text = "".join(word.title() for word in compounder(text)) return text[:1].lower() + text[1:] def capitalize(text: t.Any, strict: bool = True) -> str: """ Capitalizes the first character of `text`. Args: text: String to capitalize. strict: Whether to cast rest of string to lower case. Defaults to ``True``. Returns: Capitalized string. Example: >>> capitalize("once upon a TIME") 'Once upon a time' >>> capitalize("once upon a TIME", False) 'Once upon a TIME' .. versionadded:: 1.1.0 .. versionchanged:: 3.0.0 Added `strict` option. """ text = pyd.to_string(text) return text.capitalize() if strict else text[:1].upper() + text[1:] def chars(text: t.Any) -> t.List[str]: """ Split `text` into a list of single characters. Args: text: String to split up. Returns: List of individual characters. Example: >>> chars("onetwo") ['o', 'n', 'e', 't', 'w', 'o'] .. versionadded:: 3.0.0 """ return list(pyd.to_string(text)) def chop(text: t.Any, step: int) -> t.List[str]: """ Break up `text` into intervals of length `step`. Args: text: String to chop. step: Interval to chop `text`. Returns: List of chopped characters. If `text` is `None` an empty list is returned. Example: >>> chop("abcdefg", 3) ['abc', 'def', 'g'] .. versionadded:: 3.0.0 """ if text is None: return [] text = pyd.to_string(text) if step <= 0: chopped = [text] else: chopped = [text[i : i + step] for i in range(0, len(text), step)] return chopped def chop_right(text: t.Any, step: int) -> t.List[str]: """ Like :func:`chop` except `text` is chopped from right. Args: text: String to chop. step: Interval to chop `text`. Returns: List of chopped characters. Example: >>> chop_right("abcdefg", 3) ['a', 'bcd', 'efg'] .. versionadded:: 3.0.0 """ if text is None: return [] text = pyd.to_string(text) if step <= 0: chopped = [text] else: text_len = len(text) chopped = [text[-(i + step) : text_len - i] for i in range(0, text_len, step)][::-1] return chopped def clean(text: t.Any) -> str: """ Trim and replace multiple spaces with a single space. Args: text: String to clean. Returns: Cleaned string. Example: >>> clean("a b c d") 'a b c d' .. versionadded:: 3.0.0 """ text = pyd.to_string(text) return " ".join(pyd.compact(text.split())) def count_substr(text: t.Any, subtext: t.Any) -> int: """ Count the occurrences of `subtext` in `text`. Args: text: Source string to count from. subtext: String to count. Returns: Number of occurrences of `subtext` in `text`. Example: >>> count_substr("aabbccddaabbccdd", "bc") 2 .. versionadded:: 3.0.0 """ if text is None or subtext is None: return 0 text = pyd.to_string(text) subtext = pyd.to_string(subtext) return text.count(subtext) def deburr(text: t.Any) -> str: """ Deburrs `text` by converting latin-1 supplementary letters to basic latin letters. Args: text: String to deburr. Returns: Deburred string. Example: >>> deburr("déjà vu") '... >>> "deja vu" 'deja vu' .. versionadded:: 2.0.0 """ text = pyd.to_string(text) return JS_RE_LATIN1.replace( text, lambda match: DEBURRED_LETTERS.get(match.group(), match.group()) ) def decapitalize(text: t.Any) -> str: """ Decaptitalizes the first character of `text`. Args: text: String to decapitalize. Returns: Decapitalized string. Example: >>> decapitalize("FOO BAR") 'fOO BAR' .. versionadded:: 3.0.0 """ text = pyd.to_string(text) return text[:1].lower() + text[1:] def ends_with(text: t.Any, target: t.Any, position: t.Union[int, None] = None) -> bool: """ Checks if `text` ends with a given target string. Args: text: String to check. target: String to check for. position: Position to search from. Defaults to end of `text`. Returns: Whether `text` ends with `target`. Example: >>> ends_with("abc def", "def") True >>> ends_with("abc def", 4) False .. versionadded:: 1.1.0 """ target = pyd.to_string(target) text = pyd.to_string(text) if position is None: position = len(text) return text[:position].endswith(target) def ensure_ends_with(text: t.Any, suffix: t.Any) -> str: """ Append a given suffix to a string, but only if the source string does not end with that suffix. Args: text: Source string to append `suffix` to. suffix: String to append to the source string if the source string does not end with `suffix`. Returns: source string possibly extended by `suffix`. Example: >>> ensure_ends_with("foo bar", "!") 'foo bar!' >>> ensure_ends_with("foo bar!", "!") 'foo bar!' .. versionadded:: 2.4.0 """ text = pyd.to_string(text) suffix = pyd.to_string(suffix) if text.endswith(suffix): return text return f"{text}{suffix}" def ensure_starts_with(text: t.Any, prefix: t.Any) -> str: """ Prepend a given prefix to a string, but only if the source string does not start with that prefix. Args: text: Source string to prepend `prefix` to. prefix: String to prepend to the source string if the source string does not start with `prefix`. Returns: source string possibly prefixed by `prefix` Example: >>> ensure_starts_with("foo bar", "Oh my! ") 'Oh my! foo bar' >>> ensure_starts_with("Oh my! foo bar", "Oh my! ") 'Oh my! foo bar' .. versionadded:: 2.4.0 """ text = pyd.to_string(text) prefix = pyd.to_string(prefix) if text.startswith(prefix): return text return f"{prefix}{text}" def escape(text: t.Any) -> str: r""" Converts the characters ``&``, ``<``, ``>``, ``"``, ``'``, and ``\``` in `text` to their corresponding HTML entities. Args: text: String to escape. Returns: HTML escaped string. Example: >>> escape('"1 > 2 && 3 < 4"') '"1 > 2 && 3 < 4"' .. versionadded:: 1.0.0 .. versionchanged:: 1.1.0 Moved function to :mod:`pydash.strings`. """ text = pyd.to_string(text) # NOTE: Not using html.escape because Lo-Dash escapes certain chars differently (e.g. `'` isn't # escaped by html.escape() but is by Lo-Dash). return "".join(HTML_ESCAPES.get(char, char) for char in text) def escape_reg_exp(text: t.Any) -> str: """ Escapes the RegExp special characters in `text`. Args: text: String to escape. Returns: RegExp escaped string. Example: >>> escape_reg_exp("[()]") '\\\\[\\\\(\\\\)\\\\]' .. versionadded:: 1.1.0 .. versionchanged:: 4.0.0 Removed alias ``escape_re`` """ text = pyd.to_string(text) return re.escape(text) def has_substr(text: t.Any, subtext: t.Any) -> bool: """ Returns whether `subtext` is included in `text`. Args: text: String to search. subtext: String to search for. Returns: Whether `subtext` is found in `text`. Example: >>> has_substr("abcdef", "bc") True >>> has_substr("abcdef", "bb") False .. versionadded:: 3.0.0 """ text = pyd.to_string(text) subtext = pyd.to_string(subtext) return text.find(subtext) >= 0 def human_case(text: t.Any) -> str: """ Converts `text` to human case which has only the first letter capitalized and each word separated by a space. Args: text: String to convert. Returns: String converted to human case. Example: >>> human_case("abc-def_hij lmn") 'Abc def hij lmn' >>> human_case("user_id") 'User' .. versionadded:: 3.0.0 .. versionchanged:: 5.0.0 Improved unicode word support. """ return ( pyd.chain(text) .snake_case() .reg_exp_replace("_id$", "") .replace("_", " ") .capitalize() .value() ) def insert_substr(text: t.Any, index: int, subtext: t.Any) -> str: """ Insert `subtext` in `text` starting at position `index`. Args: text: String to add substring to. index: String index to insert into. subtext: String to insert. Returns: Modified string. Example: >>> insert_substr("abcdef", 3, "--") 'abc--def' .. versionadded:: 3.0.0 """ text = pyd.to_string(text) subtext = pyd.to_string(subtext) return text[:index] + subtext + text[index:] def join(array: t.Iterable[t.Any], separator: t.Any = "") -> str: """ Joins an iterable into a string using `separator` between each element. Args: array: Iterable to implode. separator: Separator to using when joining. Defaults to ``''``. Returns: Joined string. Example: >>> join(["a", "b", "c"]) == "abc" True >>> join([1, 2, 3, 4], "&") == "1&2&3&4" True >>> join("abcdef", "-") == "a-b-c-d-e-f" True .. versionadded:: 2.0.0 .. versionchanged:: 4.0.0 Removed alias ``implode``. """ return pyd.to_string(separator).join(pyd.map_(array or (), pyd.to_string)) def kebab_case(text: t.Any) -> str: """ Converts `text` to kebab case (a.k.a. spinal case). Args: text: String to convert. Returns: String converted to kebab case. Example: >>> kebab_case("a b c_d-e!f") 'a-b-c-d-e-f' .. versionadded:: 1.1.0 .. versionchanged:: 5.0.0 Improved unicode word support. """ return "-".join(word.lower() for word in compounder(text) if word) def lines(text: t.Any) -> t.List[str]: r""" Split lines in `text` into an array. Args: text: String to split. Returns: String split by lines. Example: >>> lines("a\nb\r\nc") ['a', 'b', 'c'] .. versionadded:: 3.0.0 """ text = pyd.to_string(text) return text.splitlines() def lower_case(text: t.Any) -> str: """ Converts string to lower case as space separated words. Args: text: String to convert. Returns: String converted to lower case as space separated words. Example: >>> lower_case("fooBar") 'foo bar' >>> lower_case("--foo-Bar--") 'foo bar' >>> lower_case('/?*Foo10/;"B*Ar') 'foo 10 b ar' .. versionadded:: 4.0.0 .. versionchanged:: 5.0.0 Improved unicode word support. """ return " ".join(compounder(text)).lower() def lower_first(text: str) -> str: """ Converts the first character of string to lower case. Args: text: String passed in by the user. Returns: String in which the first character is converted to lower case. Example: >>> lower_first("FRED") 'fRED' >>> lower_first("Foo Bar") 'foo Bar' >>> lower_first("1foobar") '1foobar' >>> lower_first(";foobar") ';foobar' .. versionadded:: 4.0.0 """ return text[:1].lower() + text[1:] def number_format( number: NumberT, scale: int = 0, decimal_separator: str = ".", order_separator: str = "," ) -> str: """ Format a number to scale with custom decimal and order separators. Args: number: Number to format. scale: Number of decimals to include. Defaults to ``0``. decimal_separator: Decimal separator to use. Defaults to ``'.'``. order_separator: Order separator to use. Defaults to ``','``. Returns: Number formatted as string. Example: >>> number_format(1234.5678) '1,235' >>> number_format(1234.5678, 2, ",", ".") '1.234,57' .. versionadded:: 3.0.0 """ # Create a string formatter which converts number to the appropriately scaled representation. fmt = f"{{0:.{scale:d}f}}" try: num_parts = fmt.format(number).split(".") except ValueError: text = "" else: int_part = num_parts[0] dec_part = (num_parts + [""])[1] # Reverse the integer part, chop it into groups of 3, join on `order_separator`, and then # un-reverse the string. int_part = order_separator.join(chop(int_part[::-1], 3))[::-1] text = decimal_separator.join(pyd.compact([int_part, dec_part])) return text def pad(text: t.Any, length: int, chars: t.Any = " ") -> str: """ Pads `text` on the left and right sides if it is shorter than the given padding length. The `chars` string may be truncated if the number of padding characters can't be evenly divided by the padding length. Args: text: String to pad. length: Amount to pad. chars: Characters to pad with. Defaults to ``" "``. Returns: Padded string. Example: >>> pad("abc", 5) ' abc ' >>> pad("abc", 6, "x") 'xabcxx' >>> pad("abc", 5, "...") '.abc.' .. versionadded:: 1.1.0 .. versionchanged:: 3.0.0 Fix handling of multiple `chars` so that padded string isn't over padded. """ # pylint: disable=redefined-outer-name text = pyd.to_string(text) text_len = len(text) if text_len >= length: return text mid = (length - text_len) / 2.0 left_len = int(math.floor(mid)) right_len = int(math.ceil(mid)) chars = pad_end("", right_len, chars) return chars[:left_len] + text + chars def pad_end(text: t.Any, length: int, chars: t.Any = " ") -> str: """ Pads `text` on the right side if it is shorter than the given padding length. The `chars` string may be truncated if the number of padding characters can't be evenly divided by the padding length. Args: text: String to pad. length: Amount to pad. chars: Characters to pad with. Defaults to ``" "``. Returns: Padded string. Example: >>> pad_end("abc", 5) 'abc ' >>> pad_end("abc", 5, ".") 'abc..' .. versionadded:: 1.1.0 .. versionchanged:: 4.0.0 Renamed from ``pad_right`` to ``pad_end``. """ # pylint: disable=redefined-outer-name text = pyd.to_string(text) length = max((length, len(text))) return (text + repeat(chars, length))[:length] def pad_start(text: t.Any, length: int, chars: t.Any = " ") -> str: """ Pads `text` on the left side if it is shorter than the given padding length. The `chars` string may be truncated if the number of padding characters can't be evenly divided by the padding length. Args: text: String to pad. length: Amount to pad. chars: Characters to pad with. Defaults to ``" "``. Returns: Padded string. Example: >>> pad_start("abc", 5) ' abc' >>> pad_start("abc", 5, ".") '..abc' .. versionadded:: 1.1.0 .. versionchanged:: 4.0.0 Renamed from ``pad_left`` to ``pad_start``. """ # pylint: disable=redefined-outer-name text = pyd.to_string(text) length = max(length, len(text)) return (repeat(chars, length) + text)[-length:] def pascal_case(text: t.Any, strict: bool = True) -> str: """ Like :func:`camel_case` except the first letter is capitalized. Args: text: String to convert. strict: Whether to cast rest of string to lower case. Defaults to ``True``. Returns: String converted to class case. Example: >>> pascal_case("FOO BAR_bAz") 'FooBarBaz' >>> pascal_case("FOO BAR_bAz", False) 'FooBarBAz' .. versionadded:: 3.0.0 .. versionchanged:: 5.0.0 Improved unicode word support. """ text = pyd.to_string(text) if strict: text = text.lower() return capitalize(camel_case(text), strict=False) def predecessor(char: t.Any) -> str: """ Return the predecessor character of `char`. Args: char: Character to find the predecessor of. Returns: Predecessor character. Example: >>> predecessor("c") 'b' >>> predecessor("C") 'B' >>> predecessor("3") '2' .. versionadded:: 3.0.0 """ char = pyd.to_string(char) return chr(ord(char) - 1) def prune(text: t.Any, length: int = 0, omission: str = "...") -> str: """ Like :func:`truncate` except it ensures that the pruned string doesn't exceed the original length, i.e., it avoids half-chopped words when truncating. If the pruned text + `omission` text is longer than the original text, then the original text is returned. Args: text: String to prune. length: Target prune length. Defaults to ``0``. omission: Omission text to append to the end of the pruned string. Defaults to ``'...'``. Returns: Pruned string. Example: >>> prune("Fe fi fo fum", 5) 'Fe fi...' >>> prune("Fe fi fo fum", 6) 'Fe fi...' >>> prune("Fe fi fo fum", 7) 'Fe fi...' >>> prune("Fe fi fo fum", 8, ",,,") 'Fe fi fo,,,' .. versionadded:: 3.0.0 """ text = pyd.to_string(text) text_len = len(text) omission_len = len(omission) if text_len <= length: return text # Replace non-alphanumeric chars with whitespace. def repl(match): char = match.group(0) return " " if char.upper() == char.lower() else char subtext = reg_exp_replace(text[: length + 1], r".(?=\W*\w*$)", repl) if re.match(r"\w\w", subtext[-2:]): # Last two characters are alphanumeric. Remove last "word" from end of string so that we # prune to the next whole word. subtext = reg_exp_replace(subtext, r"\s*\S+$", "") else: # Last character (at least) is whitespace. So remove that character as well as any other # whitespace. subtext = subtext[:-1].rstrip() subtext_len = len(subtext) # Only add omission text if doing so will result in a string that is equal to or smaller than # the original. if (subtext_len + omission_len) <= text_len: text = text[:subtext_len] + omission return text def quote(text: t.Any, quote_char: t.Any = '"') -> str: """ Quote a string with another string. Args: text: String to be quoted. quote_char: the quote character. Defaults to ``'"'``. Returns: the quoted string. Example: >>> quote("To be or not to be") '"To be or not to be"' >>> quote("To be or not to be", "'") "'To be or not to be'" .. versionadded:: 2.4.0 """ return surround(text, quote_char) def reg_exp_js_match(text: t.Any, reg_exp: str) -> t.List[str]: """ Return list of matches using Javascript style regular expression. Args: text: String to evaluate. reg_exp: Javascript style regular expression. Returns: List of matches. Example: >>> reg_exp_js_match("aaBBcc", "/bb/") [] >>> reg_exp_js_match("aaBBcc", "/bb/i") ['BB'] >>> reg_exp_js_match("aaBBccbb", "/bb/i") ['BB'] >>> reg_exp_js_match("aaBBccbb", "/bb/gi") ['BB', 'bb'] .. versionadded:: 2.0.0 .. versionchanged:: 3.0.0 Reordered arguments to make `text` first. .. versionchanged:: 4.0.0 Renamed from ``js_match`` to ``reg_exp_js_match``. """ text = pyd.to_string(text) return JSRegExp(reg_exp).find(text) def reg_exp_js_replace( text: t.Any, reg_exp: str, repl: t.Union[str, t.Callable[[re.Match[str]], str]] ) -> str: """ Replace `text` with `repl` using Javascript style regular expression to find matches. Args: text: String to evaluate. reg_exp: Javascript style regular expression. repl: Replacement string or callable. Returns: Modified string. Example: >>> reg_exp_js_replace("aaBBcc", "/bb/", "X") 'aaBBcc' >>> reg_exp_js_replace("aaBBcc", "/bb/i", "X") 'aaXcc' >>> reg_exp_js_replace("aaBBccbb", "/bb/i", "X") 'aaXccbb' >>> reg_exp_js_replace("aaBBccbb", "/bb/gi", "X") 'aaXccX' .. versionadded:: 2.0.0 .. versionchanged:: 3.0.0 Reordered arguments to make `text` first. .. versionchanged:: 4.0.0 Renamed from ``js_replace`` to ``reg_exp_js_replace``. """ text = pyd.to_string(text) if not pyd.is_function(repl): repl = pyd.to_string(repl) return JSRegExp(reg_exp).replace(text, repl) def reg_exp_replace( text: t.Any, pattern: t.Any, repl: t.Union[str, t.Callable[[re.Match[str]], str]], ignore_case: bool = False, count: int = 0, ) -> str: """ Replace occurrences of regex `pattern` with `repl` in `text`. Optionally, ignore case when replacing. Optionally, set `count` to limit number of replacements. Args: text: String to replace. pattern: Pattern to find and replace. repl: String to substitute `pattern` with. ignore_case: Whether to ignore case when replacing. Defaults to ``False``. count: Maximum number of occurrences to replace. Defaults to ``0`` which replaces all. Returns: Replaced string. Example: >>> reg_exp_replace("aabbcc", "b", "X") 'aaXXcc' >>> reg_exp_replace("aabbcc", "B", "X", ignore_case=True) 'aaXXcc' >>> reg_exp_replace("aabbcc", "b", "X", count=1) 'aaXbcc' >>> reg_exp_replace("aabbcc", "[ab]", "X") 'XXXXcc' .. versionadded:: 3.0.0 .. versionchanged:: 4.0.0 Renamed from ``re_replace`` to ``reg_exp_replace``. """ if pattern is None: return pyd.to_string(text) return replace(text, pattern, repl, ignore_case=ignore_case, count=count, escape=False) def repeat(text: t.Any, n: t.SupportsInt = 0) -> str: """ Repeats the given string `n` times. Args: text: String to repeat. n: Number of times to repeat the string. Returns: Repeated string. Example: >>> repeat(".", 5) '.....' .. versionadded:: 1.1.0 """ return pyd.to_string(text) * int(n) def replace( text: t.Any, pattern: t.Any, repl: t.Union[str, t.Callable[[re.Match[str]], str]], ignore_case: bool = False, count: int = 0, escape: bool = True, from_start: bool = False, from_end: bool = False, ) -> str: """ Replace occurrences of `pattern` with `repl` in `text`. Optionally, ignore case when replacing. Optionally, set `count` to limit number of replacements. Args: text: String to replace. pattern: Pattern to find and replace. repl: String to substitute `pattern` with. ignore_case: Whether to ignore case when replacing. Defaults to ``False``. count: Maximum number of occurrences to replace. Defaults to ``0`` which replaces all. escape: Whether to escape `pattern` when searching. This is needed if a literal replacement is desired when `pattern` may contain special regular expression characters. Defaults to ``True``. from_start: Whether to limit replacement to start of string. from_end: Whether to limit replacement to end of string. Returns: Replaced string. Example: >>> replace("aabbcc", "b", "X") 'aaXXcc' >>> replace("aabbcc", "B", "X", ignore_case=True) 'aaXXcc' >>> replace("aabbcc", "b", "X", count=1) 'aaXbcc' >>> replace("aabbcc", "[ab]", "X") 'aabbcc' >>> replace("aabbcc", "[ab]", "X", escape=False) 'XXXXcc' .. versionadded:: 3.0.0 .. versionchanged:: 4.1.0 Added ``from_start`` and ``from_end`` arguments. .. versionchanged:: 5.0.0 Added support for ``pattern`` as ``typing.Pattern`` object. """ text = pyd.to_string(text) if pattern is None: return text if not pyd.is_function(repl): repl = pyd.to_string(repl) flags = re.IGNORECASE if ignore_case else 0 if isinstance(pattern, typing.Pattern): pat = pattern else: pattern = pyd.to_string(pattern) if escape: pattern = re.escape(pattern) if from_start and not pattern.startswith("^"): pattern = "^" + pattern if from_end and not pattern.endswith("$"): pattern += "$" pat = re.compile(pattern, flags=flags) return pat.sub(repl, text, count=count) def replace_end( text: t.Any, pattern: t.Any, repl: t.Union[str, t.Callable[[re.Match[str]], str]], ignore_case: bool = False, escape: bool = True, ) -> str: """ Like :func:`replace` except it only replaces `text` with `repl` if `pattern` mathces the end of `text`. Args: text: String to replace. pattern: Pattern to find and replace. repl: String to substitute `pattern` with. ignore_case: Whether to ignore case when replacing. Defaults to ``False``. escape: Whether to escape `pattern` when searching. This is needed if a literal replacement is desired when `pattern` may contain special regular expression characters. Defaults to ``True``. Returns: Replaced string. Example: >>> replace_end("aabbcc", "b", "X") 'aabbcc' >>> replace_end("aabbcc", "c", "X") 'aabbcX' .. versionadded:: 4.1.0 """ return replace(text, pattern, repl, ignore_case=ignore_case, escape=escape, from_end=True) def replace_start( text: t.Any, pattern: t.Any, repl: t.Union[str, t.Callable[[re.Match[str]], str]], ignore_case: bool = False, escape: bool = True, ) -> str: """ Like :func:`replace` except it only replaces `text` with `repl` if `pattern` mathces the start of `text`. Args: text: String to replace. pattern: Pattern to find and replace. repl: String to substitute `pattern` with. ignore_case: Whether to ignore case when replacing. Defaults to ``False``. escape: Whether to escape `pattern` when searching. This is needed if a literal replacement is desired when `pattern` may contain special regular expression characters. Defaults to ``True``. Returns: Replaced string. Example: >>> replace_start("aabbcc", "b", "X") 'aabbcc' >>> replace_start("aabbcc", "a", "X") 'Xabbcc' .. versionadded:: 4.1.0 """ return replace(text, pattern, repl, ignore_case=ignore_case, escape=escape, from_start=True) def separator_case(text: t.Any, separator: str) -> str: """ Splits `text` on words and joins with `separator`. Args: text: String to convert. separator: Separator to join words with. Returns: Converted string. Example: >>> separator_case("a!!b___c.d", "-") 'a-b-c-d' .. versionadded:: 3.0.0 .. versionchanged:: 5.0.0 Improved unicode word support. """ return separator.join(word.lower() for word in words(text) if word) def series_phrase( items: t.List[t.Any], separator: t.Any = ", ", last_separator: t.Any = " and ", serial: bool = False, ) -> str: """ Join items into a grammatical series phrase, e.g., ``"item1, item2, item3 and item4"``. Args: items: List of string items to join. separator: Item separator. Defaults to ``', '``. last_separator: Last item separator. Defaults to ``' and '``. serial: Whether to include `separator` with `last_separator` when number of items is greater than 2. Defaults to ``False``. Returns: Joined string. Example: >>> series_phrase(["apples", "bananas", "peaches"]) 'apples, bananas and peaches' >>> series_phrase(["apples", "bananas", "peaches"], serial=True) 'apples, bananas, and peaches' >>> series_phrase(["apples", "bananas", "peaches"], "; ", ", or ") 'apples; bananas, or peaches' .. versionadded:: 3.0.0 """ items = pyd.chain(items).map(pyd.to_string).compact().value() item_count = len(items) separator = pyd.to_string(separator) last_separator = pyd.to_string(last_separator) if item_count > 2 and serial: last_separator = separator.rstrip() + last_separator if item_count >= 2: items = items[:-2] + [last_separator.join(items[-2:])] return separator.join(items) def series_phrase_serial( items: t.List[t.Any], separator: t.Any = ", ", last_separator: t.Any = " and " ) -> str: """ Join items into a grammatical series phrase using a serial separator, e.g., ``"item1, item2, item3, and item4"``. Args: items: List of string items to join. separator: Item separator. Defaults to ``', '``. last_separator: Last item separator. Defaults to ``' and '``. Returns: Joined string. Example: >>> series_phrase_serial(["apples", "bananas", "peaches"]) 'apples, bananas, and peaches' .. versionadded:: 3.0.0 """ return series_phrase(items, separator, last_separator, serial=True) def slugify(text: t.Any, separator: str = "-") -> str: """ Convert `text` into an ASCII slug which can be used safely in URLs. Incoming `text` is converted to unicode and noramlzied using the ``NFKD`` form. This results in some accented characters being converted to their ASCII "equivalent" (e.g. ``é`` is converted to ``e``). Leading and trailing whitespace is trimmed and any remaining whitespace or other special characters without an ASCII equivalent are replaced with ``-``. Args: text: String to slugify. separator: Separator to use. Defaults to ``'-'``. Returns: Slugified string. Example: >>> slugify("This is a slug.") == "this-is-a-slug" True >>> slugify("This is a slug.", "+") == "this+is+a+slug" True .. versionadded:: 3.0.0 .. versionchanged:: 5.0.0 Improved unicode word support. .. versionchanged:: 7.0.0 Remove single quotes from output. """ normalized = ( unicodedata.normalize("NFKD", pyd.to_string(text)) .encode("ascii", "ignore") .decode("utf8") .replace("'", "") ) return separator_case(normalized, separator) def snake_case(text: t.Any) -> str: """ Converts `text` to snake case. Args: text: String to convert. Returns: String converted to snake case. Example: >>> snake_case("This is Snake Case!") 'this_is_snake_case' .. versionadded:: 1.1.0 .. versionchanged:: 4.0.0 Removed alias ``underscore_case``. .. versionchanged:: 5.0.0 Improved unicode word support. """ return "_".join(word.lower() for word in compounder(text) if word) def split(text: t.Any, separator: t.Union[str, Unset, None] = UNSET) -> t.List[str]: """ Splits `text` on `separator`. If `separator` not provided, then `text` is split on whitespace. If `separator` is falsey, then `text` is split on every character. Args: text: String to explode. separator: Separator string to split on. Defaults to ``NoValue``. Returns: Split string. Example: >>> split("one potato, two potatoes, three potatoes, four!") ['one', 'potato,', 'two', 'potatoes,', 'three', 'potatoes,', 'four!'] >>> split("one potato, two potatoes, three potatoes, four!", ",") ['one potato', ' two potatoes', ' three potatoes', ' four!'] .. versionadded:: 2.0.0 .. versionchanged:: 3.0.0 Changed `separator` default to ``NoValue`` and supported splitting on whitespace by default. .. versionchanged:: 4.0.0 Removed alias ``explode``. """ text = pyd.to_string(text) if separator is UNSET: ret = text.split() elif separator: ret = text.split(separator) else: ret = chars(text) return ret def start_case(text: t.Any) -> str: """ Convert `text` to start case. Args: text: String to convert. Returns: String converted to start case. Example: >>> start_case("fooBar") 'Foo Bar' .. versionadded:: 3.1.0 .. versionchanged:: 5.0.0 Improved unicode word support. """ return " ".join(capitalize(word, strict=False) for word in compounder(text)) def starts_with(text: t.Any, target: t.Any, position: int = 0) -> bool: """ Checks if `text` starts with a given target string. Args: text: String to check. target: String to check for. position: Position to search from. Defaults to beginning of `text`. Returns: Whether `text` starts with `target`. Example: >>> starts_with("abcdef", "a") True >>> starts_with("abcdef", "b") False >>> starts_with("abcdef", "a", 1) False .. versionadded:: 1.1.0 """ text = pyd.to_string(text) target = pyd.to_string(target) return text[position:].startswith(target) def strip_tags(text: t.Any) -> str: """ Removes all HTML tags from `text`. Args: text: String to strip. Returns: String without HTML tags. Example: >>> strip_tags('Some link') 'Some link' .. versionadded:: 3.0.0 """ return RE_HTML_TAGS.sub("", pyd.to_string(text)) def substr_left(text: t.Any, subtext: str) -> str: """ Searches `text` from left-to-right for `subtext` and returns a substring consisting of the characters in `text` that are to the left of `subtext` or all string if no match found. Args: text: String to partition. subtext: String to search for. Returns: Substring to left of `subtext`. Example: >>> substr_left("abcdefcdg", "cd") 'ab' .. versionadded:: 3.0.0 """ text = pyd.to_string(text) return text.partition(subtext)[0] if subtext else text def substr_left_end(text: t.Any, subtext: str) -> str: """ Searches `text` from right-to-left for `subtext` and returns a substring consisting of the characters in `text` that are to the left of `subtext` or all string if no match found. Args: text: String to partition. subtext: String to search for. Returns: Substring to left of `subtext`. Example: >>> substr_left_end("abcdefcdg", "cd") 'abcdef' .. versionadded:: 3.0.0 """ text = pyd.to_string(text) return text.rpartition(subtext)[0] or text if subtext else text def substr_right(text: t.Any, subtext: str) -> str: """ Searches `text` from right-to-left for `subtext` and returns a substring consisting of the characters in `text` that are to the right of `subtext` or all string if no match found. Args: text: String to partition. subtext: String to search for. Returns: Substring to right of `subtext`. Example: >>> substr_right("abcdefcdg", "cd") 'efcdg' .. versionadded:: 3.0.0 """ text = pyd.to_string(text) return text.partition(subtext)[2] or text if subtext else text def substr_right_end(text: t.Any, subtext: str) -> str: """ Searches `text` from left-to-right for `subtext` and returns a substring consisting of the characters in `text` that are to the right of `subtext` or all string if no match found. Args: text: String to partition. subtext: String to search for. Returns: Substring to right of `subtext`. Example: >>> substr_right_end("abcdefcdg", "cd") 'g' .. versionadded:: 3.0.0 """ text = pyd.to_string(text) return text.rpartition(subtext)[2] if subtext else text def successor(char: t.Any) -> str: """ Return the successor character of `char`. Args: char: Character to find the successor of. Returns: Successor character. Example: >>> successor("b") 'c' >>> successor("B") 'C' >>> successor("2") '3' .. versionadded:: 3.0.0 """ char = pyd.to_string(char) return chr(ord(char) + 1) def surround(text: t.Any, wrapper: t.Any) -> str: """ Surround a string with another string. Args: text: String to surround with `wrapper`. wrapper: String by which `text` is to be surrounded. Returns: Surrounded string. Example: >>> surround("abc", '"') '"abc"' >>> surround("abc", "!") '!abc!' .. versionadded:: 2.4.0 """ text = pyd.to_string(text) wrapper = pyd.to_string(wrapper) return f"{wrapper}{text}{wrapper}" def swap_case(text: t.Any) -> str: """ Swap case of `text` characters. Args: text: String to swap case. Returns: String with swapped case. Example: >>> swap_case("aBcDeF") 'AbCdEf' .. versionadded:: 3.0.0 """ text = pyd.to_string(text) return text.swapcase() def title_case(text: t.Any) -> str: """ Convert `text` to title case. Args: text: String to convert. Returns: String converted to title case. Example: >>> title_case("bob's shop") "Bob's Shop" .. versionadded:: 3.0.0 """ text = pyd.to_string(text) # NOTE: Can't use text.title() since it doesn't handle apostrophes. return " ".join(word.capitalize() for word in re.split(" ", text)) def to_lower(text: t.Any) -> str: """ Converts the given :attr:`text` to lower text. Args: text: String to convert. Returns: String converted to lower case. Example: >>> to_lower("--Foo-Bar--") '--foo-bar--' >>> to_lower("fooBar") 'foobar' >>> to_lower("__FOO_BAR__") '__foo_bar__' .. versionadded:: 4.0.0 """ return pyd.to_string(text).lower() def to_upper(text: t.Any) -> str: """ Converts the given :attr:`text` to upper text. Args: text: String to convert. Returns: String converted to upper case. Example: >>> to_upper("--Foo-Bar--") '--FOO-BAR--' >>> to_upper("fooBar") 'FOOBAR' >>> to_upper("__FOO_BAR__") '__FOO_BAR__' .. versionadded:: 4.0.0 """ return pyd.to_string(text).upper() def trim(text: t.Any, chars: t.Union[str, None] = None) -> str: r""" Removes leading and trailing whitespace or specified characters from `text`. Args: text: String to trim. chars: Specific characters to remove. Returns: Trimmed string. Example: >>> trim(" abc efg\r\n ") 'abc efg' .. versionadded:: 1.1.0 """ # pylint: disable=redefined-outer-name text = pyd.to_string(text) return text.strip(chars) def trim_end(text: t.Any, chars: t.Union[str, None] = None) -> str: r""" Removes trailing whitespace or specified characters from `text`. Args: text: String to trim. chars: Specific characters to remove. Returns: Trimmed string. Example: >>> trim_end(" abc efg\r\n ") ' abc efg' .. versionadded:: 1.1.0 .. versionchanged:: 4.0.0 Renamed from ``trim_right`` to ``trim_end``. """ text = pyd.to_string(text) return text.rstrip(chars) def trim_start(text: t.Any, chars: t.Union[str, None] = None) -> str: r""" Removes leading whitespace or specified characters from `text`. Args: text: String to trim. chars: Specific characters to remove. Returns: Trimmed string. Example: >>> trim_start(" abc efg\r\n ") 'abc efg\r\n ' .. versionadded:: 1.1.0 .. versionchanged:: 4.0.0 Renamed from ``trim_left`` to ``trim_start``. """ text = pyd.to_string(text) return text.lstrip(chars) def truncate( text: t.Any, length: int = 30, omission: str = "...", separator: t.Union[str, re.Pattern[str], None] = None, ) -> str: """ Truncates `text` if it is longer than the given maximum string length. The last characters of the truncated string are replaced with the omission string which defaults to ``...``. Args: text: String to truncate. length: Maximum string length. Defaults to ``30``. omission: String to indicate text is omitted. separator: Separator pattern to truncate to. Returns: Truncated string. Example: >>> truncate("hello world", 5) 'he...' >>> truncate("hello world", 5, "..") 'hel..' >>> truncate("hello world", 10) 'hello w...' >>> truncate("hello world", 10, separator=" ") 'hello...' .. versionadded:: 1.1.0 .. versionchanged:: 4.0.0 Removed alias ``trunc``. """ text = pyd.to_string(text) if len(text) <= length: return text omission_len = len(omission) text_len = length - omission_len text = text[:text_len] trunc_len = len(text) if pyd.is_string(separator): trunc_len = text.rfind(separator) elif pyd.is_reg_exp(separator): last = None for match in separator.finditer(text): last = match if last is not None: trunc_len = last.start() return text[:trunc_len] + omission def unescape(text: t.Any) -> str: """ The inverse of :func:`escape`. This method converts the HTML entities ``&``, ``<``, ``>``, ``"``, ``'``, and ````` in `text` to their corresponding characters. Args: text: String to unescape. Returns: HTML unescaped string. Example: >>> results = unescape(""1 > 2 && 3 < 4"") >>> results == '"1 > 2 && 3 < 4"' True .. versionadded:: 1.0.0 .. versionchanged:: 1.1.0 Moved to :mod:`pydash.strings`. """ text = pyd.to_string(text) return html.unescape(text) def upper_case(text: t.Any) -> str: """ Converts string to upper case, as space separated words. Args: text: String to be converted to uppercase. Returns: String converted to uppercase, as space separated words. Example: >>> upper_case("--foo-bar--") 'FOO BAR' >>> upper_case("fooBar") 'FOO BAR' >>> upper_case('/?*Foo10/;"B*Ar') 'FOO 10 B AR' .. versionadded:: 4.0.0 .. versionchanged:: 5.0.0 Improved unicode word support. """ return " ".join(compounder(text)).upper() def upper_first(text: str) -> str: """ Converts the first character of string to upper case. Args: text: String passed in by the user. Returns: String in which the first character is converted to upper case. Example: >>> upper_first("fred") 'Fred' >>> upper_first("foo bar") 'Foo bar' >>> upper_first("1foobar") '1foobar' >>> upper_first(";foobar") ';foobar' .. versionadded:: 4.0.0 """ return text[:1].upper() + text[1:] def unquote(text: t.Any, quote_char: t.Any = '"') -> str: """ Unquote `text` by removing `quote_char` if `text` begins and ends with it. Args: text: String to unquote. quote_char: Quote character to remove. Defaults to `"`. Returns: Unquoted string. Example: >>> unquote('"abc"') 'abc' >>> unquote('"abc"', "#") '"abc"' >>> unquote("#abc", "#") '#abc' >>> unquote("#abc#", "#") 'abc' .. versionadded:: 3.0.0 """ text = pyd.to_string(text) inner = text[1:-1] if text == f"{quote_char}{inner}{quote_char}": text = inner return text def url(*paths: t.Any, **params: t.Any) -> str: """ Combines a series of URL paths into a single URL. Optionally, pass in keyword arguments to append query parameters. Args: paths: URL paths to combine. Keyword Args: params: Query parameters. Returns: URL string. Example: >>> link = url("a", "b", ["c", "d"], "/", q="X", y="Z") >>> path, params = link.split("?") >>> path == "a/b/c/d/" True >>> set(params.split("&")) == set(["q=X", "y=Z"]) True .. versionadded:: 2.2.0 """ # allow reassignment different type paths = pyd.chain(paths).flatten_deep().map(pyd.to_string).value() # type: ignore paths_list = [] params_list = flatten_url_params(params) for path in paths: scheme, netloc, path, query, fragment = urlsplit(path) query = parse_qsl(query) params_list += query paths_list.append(urlunsplit((scheme, netloc, path, "", fragment))) path = delimitedpathjoin("/", *paths_list) scheme, netloc, path, query, fragment = urlsplit(path) query = urlencode(params_list) return urlunsplit((scheme, netloc, path, query, fragment)) def words(text: t.Any, pattern: t.Union[str, None] = None) -> t.List[str]: """ Return list of words contained in `text`. References: https://github.com/lodash/lodash/blob/master/words.js#L30 Args: text: String to split. pattern: Custom pattern to split words on. Defaults to ``None``. Returns: List of words. Example: >>> words("a b, c; d-e") ['a', 'b', 'c', 'd', 'e'] >>> words("fred, barney, & pebbles", "/[^, ]+/g") ['fred', 'barney', '&', 'pebbles'] .. versionadded:: 2.0.0 .. versionchanged:: 3.2.0 Added `pattern` argument. .. versionchanged:: 3.2.0 Improved matching for one character words. .. versionchanged:: 5.0.0 Improved unicode word support. """ text = pyd.to_string(text) if pattern is None: if has_unicode_word(text): reg_exp = JS_RE_UNICODE_WORDS else: reg_exp = JS_RE_ASCII_WORDS else: reg_exp = JSRegExp(pattern) return reg_exp.find(text) # # Utility functions not a part of main API # def compounder(text): """ Remove single quote before passing into words() to match Lodash-style outputs. Required by certain functions such as kebab_case, camel_case, start_case etc. References: https://github.com/lodash/lodash/blob/4.17.15/lodash.js#L4968 """ return words(deburr(RE_APOS.sub("", pyd.to_string(text)))) def has_unicode_word(text): """ Check if the text contains unicode or requires more complex regex to handle. References: https://github.com/lodash/lodash/blob/master/words.js#L3 """ result = RE_HAS_UNICODE_WORD.search(text) return bool(result) def delimitedpathjoin(delimiter, *paths): """ Join delimited path using specified delimiter. >>> assert delimitedpathjoin(".", "") == "" >>> assert delimitedpathjoin(".", ".") == "." >>> assert delimitedpathjoin(".", ["", ".a"]) == ".a" >>> assert delimitedpathjoin(".", ["a", "."]) == "a." >>> assert delimitedpathjoin(".", ["", ".a", "", "", "b"]) == ".a.b" >>> ret = ".a.b.c.d.e." >>> assert delimitedpathjoin(".", [".a.", "b.", ".c", "d", "e."]) == ret >>> assert delimitedpathjoin(".", ["a", "b", "c"]) == "a.b.c" >>> ret = "a.b.c.d.e.f" >>> assert delimitedpathjoin(".", ["a.b", ".c.d.", ".e.f"]) == ret >>> ret = ".a.b.c.1." >>> assert delimitedpathjoin(".", ".", "a", "b", "c", 1, ".") == ret >>> assert delimitedpathjoin(".", []) == "" """ paths = [pyd.to_string(path) for path in pyd.flatten_deep(paths) if path] if len(paths) == 1: # Special case where there's no need to join anything. Doing this because if # path==[delimiter], then an extra delimiter would be added if the else clause ran instead. path = paths[0] else: leading = delimiter if paths and paths[0].startswith(delimiter) else "" trailing = delimiter if paths and paths[-1].endswith(delimiter) else "" middle = delimiter.join([path.strip(delimiter) for path in paths if path.strip(delimiter)]) path = "".join([leading, middle, trailing]) return path def flatten_url_params( params: t.Union[ t.Dict[T, t.Union[T2, t.Iterable[T2]]], t.List[t.Tuple[T, t.Union[T2, t.Iterable[T2]]]], ], ) -> t.List[t.Tuple[T, T2]]: """ Flatten URL params into list of tuples. If any param value is a list or tuple, then map each value to the param key. >>> params = [("a", 1), ("a", [2, 3])] >>> assert flatten_url_params(params) == [("a", 1), ("a", 2), ("a", 3)] >>> params = {"a": [1, 2, 3]} >>> assert flatten_url_params(params) == [("a", 1), ("a", 2), ("a", 3)] """ if isinstance(params, dict): params = list(params.items()) flattened: t.List[t.Any] = [] for param, value in params: if isinstance(value, (list, tuple)): flattened += zip([param] * len(value), value) else: flattened.append((param, value)) return flattened pydash-8.0.3/src/pydash/types.py000066400000000000000000000012331464745015500166210ustar00rootroot00000000000000""" Common types. .. versionadded:: 7.0.0 """ from __future__ import annotations from decimal import Decimal import typing as t from typing_extensions import Protocol IterateeObjT = t.Union[int, str, t.List[t.Any], t.Tuple[t.Any, ...], t.Dict[t.Any, t.Any]] NumberT = t.Union[float, int, Decimal] NumberNoDecimalT = t.Union[float, int] PathT = t.Union[t.Hashable, t.List[t.Hashable]] _T_co = t.TypeVar("_T_co", covariant=True) _T_contra = t.TypeVar("_T_contra", contravariant=True) class SupportsMul(Protocol[_T_contra, _T_co]): def __mul__(self, x: _T_contra) -> _T_co: ... class SupportsRound(Protocol[_T_co]): def __round__(self) -> _T_co: ... pydash-8.0.3/src/pydash/utilities.py000066400000000000000000001127541464745015500175030ustar00rootroot00000000000000""" Utility functions. .. versionadded:: 1.0.0 """ from __future__ import annotations from collections import namedtuple from datetime import datetime, timezone from functools import partial, wraps import math from random import randint, uniform import re import time import typing as t from typing_extensions import Literal, ParamSpec, Protocol, Type import pydash as pyd from .helpers import NUMBER_TYPES, UNSET, base_get, callit, getargcount, iterator from .types import PathT __all__ = ( "attempt", "cond", "conforms", "conforms_to", "constant", "default_to", "default_to_any", "identity", "iteratee", "matches", "matches_property", "memoize", "method", "method_of", "noop", "nth_arg", "now", "over", "over_every", "over_some", "properties", "property_", "property_of", "random", "range_", "range_right", "result", "retry", "stub_list", "stub_dict", "stub_false", "stub_string", "stub_true", "times", "to_path", "unique_id", ) T = t.TypeVar("T") T2 = t.TypeVar("T2") CallableT = t.TypeVar("CallableT", bound=t.Callable[..., t.Any]) P = ParamSpec("P") # These regexes are used in to_path() to parse deep path strings. # This is used to split a deep path string into dict keys or list indexes. This matches "." as # delimiter (unless it is escaped by "//") and "[]" as delimiter while keeping the # "[]" as an item. RE_PATH_KEY_DELIM = re.compile(r"(?]". This is used to test whether a path string part is a # list index. RE_PATH_LIST_INDEX = re.compile(r"^\[-?\d+\]$") ID_COUNTER = 0 PathToken = namedtuple("PathToken", ["key", "default_factory"]) def attempt(func: t.Callable[P, T], *args: "P.args", **kwargs: "P.kwargs") -> t.Union[T, Exception]: """ Attempts to execute `func`, returning either the result or the caught error object. Args: func: The function to attempt. Returns: Returns the `func` result or error object. Example: >>> results = attempt(lambda x: x / 0, 1) >>> assert isinstance(results, ZeroDivisionError) .. versionadded:: 1.1.0 """ try: ret = func(*args, **kwargs) except Exception as ex: # allow different type reassignment ret = ex # type: ignore return ret @t.overload def cond( pairs: t.List[t.Tuple[t.Callable[P, t.Any], t.Callable[P, T]]], *extra_pairs: t.Tuple[t.Callable[P, t.Any], t.Callable[P, T]], ) -> t.Callable[P, T]: ... @t.overload def cond( pairs: t.List[t.List[t.Callable[P, t.Any]]], *extra_pairs: t.List[t.Callable[P, t.Any]] ) -> t.Callable[P, t.Any]: ... def cond(pairs, *extra_pairs): """ Creates a function that iterates over `pairs` and invokes the corresponding function of the first predicate to return truthy. Args: pairs: A list of predicate-function pairs. Returns: Returns the new composite function. Example: >>> func = cond([[matches({'a': 1}), constant('matches A')],\ [matches({'b': 2}), constant('matches B')],\ [stub_true, lambda value: value]]) >>> func({'a': 1, 'b': 2}) 'matches A' >>> func({'a': 0, 'b': 2}) 'matches B' >>> func({'a': 0, 'b': 0}) == {'a': 0, 'b': 0} True .. versionadded:: 4.0.0 .. versionchanged:: 4.2.0 Fixed missing argument passing to matched function and added support for passing in a single list of pairs instead of just pairs as separate arguments. """ if extra_pairs: pairs = [pairs] + list(extra_pairs) for pair in pairs: is_valid = False try: is_valid = len(pair) == 2 except Exception: pass if not is_valid: raise ValueError("Each predicate-function pair should contain " "exactly two elements") if not all(map(callable, pair)): raise TypeError("Both predicate-function pair should be callable") def _cond(*args): for pair in pairs: predicate, iteratee = pair if callit(predicate, *args): return iteratee(*args) return _cond @t.overload def conforms(source: t.Dict[T, t.Callable[[T2], t.Any]]) -> t.Callable[[t.Dict[T, T2]], bool]: ... @t.overload def conforms(source: t.List[t.Callable[[T], t.Any]]) -> t.Callable[[t.List[T]], bool]: ... def conforms(source: t.Union[t.List[t.Any], t.Dict[t.Any, t.Any]]) -> t.Callable[..., t.Any]: """ Creates a function that invokes the predicate properties of `source` with the corresponding property values of a given object, returning ``True`` if all predicates return truthy, else ``False``. Args: source: The object of property predicates to conform to. Returns: Returns the new spec function. Example: >>> func = conforms({"b": lambda n: n > 1}) >>> func({"b": 2}) True >>> func({"b": 0}) False >>> func = conforms([lambda n: n > 1, lambda n: n == 0]) >>> func([2, 0]) True >>> func([0, 0]) False .. versionadded:: 4.0.0 """ def _conforms(obj): for key, predicate in iterator(source): if not pyd.has(obj, key) or not predicate(obj[key]): return False return True return _conforms @t.overload def conforms_to(obj: t.Dict[T, T2], source: t.Dict[T, t.Callable[[T2], t.Any]]) -> bool: ... @t.overload def conforms_to(obj: t.List[T], source: t.List[t.Callable[[T], t.Any]]) -> bool: ... def conforms_to(obj, source): """ Checks if `obj` conforms to `source` by invoking the predicate properties of `source` with the corresponding property values of `obj`. Args: obj: The object to inspect. source: The object of property predicates to conform to. Example: >>> conforms_to({"b": 2}, {"b": lambda n: n > 1}) True >>> conforms_to({"b": 0}, {"b": lambda n: n > 1}) False >>> conforms_to([2, 0], [lambda n: n > 1, lambda n: n == 0]) True >>> conforms_to([0, 0], [lambda n: n > 1, lambda n: n == 0]) False .. versionadded:: 4.0.0 """ return conforms(source)(obj) def constant(value: T) -> t.Callable[..., T]: """ Creates a function that returns `value`. Args: value: Constant value to return. Returns: Function that always returns `value`. Example: >>> pi = constant(3.14) >>> pi() == 3.14 True .. versionadded:: 1.0.0 .. versionchanged:: 4.0.0 Returned function ignores arguments instead of raising exception. """ return partial(identity, value) def default_to(value: t.Union[T, None], default_value: T2) -> t.Union[T, T2]: """ Checks `value` to determine whether a default value should be returned in its place. The `default_value` is returned if value is None. Args: default_value: Default value passed in by the user. Returns: Returns `value` if :attr:`value` is given otherwise returns `default_value`. Example: >>> default_to(1, 10) 1 >>> default_to(None, 10) 10 .. versionadded:: 4.0.0 """ return default_to_any(value, default_value) @t.overload def default_to_any(value: None, *default_values: None) -> None: ... @t.overload def default_to_any( value: t.Union[T, None], default_value1: None, default_value2: T2, ) -> t.Union[T, T2]: ... @t.overload def default_to_any( value: t.Union[T, None], default_value1: None, default_value2: None, default_value3: T2, ) -> t.Union[T, T2]: ... @t.overload def default_to_any( value: t.Union[T, None], default_value1: None, default_value2: None, default_value3: None, default_value4: T2, ) -> t.Union[T, T2]: ... @t.overload def default_to_any( value: t.Union[T, None], default_value1: None, default_value2: None, default_value3: None, default_value4: None, default_value5: T2, ) -> t.Union[T, T2]: ... @t.overload def default_to_any(value: t.Union[T, None], *default_values: T2) -> t.Union[T, T2]: ... def default_to_any(value, *default_values): """ Checks `value` to determine whether a default value should be returned in its place. The first item that is not None of the `default_values` is returned. Args: value: Value passed in by the user. *default_values: Default values passed in by the user. Returns: Returns `value` if :attr:`value` is given otherwise returns the first not None value of `default_values`. Example: >>> default_to_any(1, 10, 20) 1 >>> default_to_any(None, 10, 20) 10 >>> default_to_any(None, None, 20) 20 .. versionadded:: 4.9.0 """ values = (value,) + default_values for val in values: if val is not None: return val @t.overload def identity(arg: T, *args: t.Any) -> T: ... @t.overload def identity(arg: None = None, *args: t.Any) -> None: ... def identity(arg=None, *args): """ Return the first argument provided to it. Args: *args: Arguments. Returns: First argument or ``None``. Example: >>> identity(1) 1 >>> identity(1, 2, 3) 1 >>> identity() is None True .. versionadded:: 1.0.0 """ return arg @t.overload def iteratee(func: t.Callable[P, T]) -> t.Callable[P, T]: ... @t.overload def iteratee(func: t.Any) -> t.Callable[..., t.Any]: ... def iteratee(func): """ Return a pydash style iteratee. If `func` is a property name the created iteratee will return the property value for a given element. If `func` is an object the created iteratee will return ``True`` for elements that contain the equivalent object properties, otherwise it will return ``False``. Args: func: Object to create iteratee function from. Returns: Iteratee function. Example: >>> get_data = iteratee("data") >>> get_data({"data": [1, 2, 3]}) [1, 2, 3] >>> is_active = iteratee({"active": True}) >>> is_active({"active": True}) True >>> is_active({"active": 0}) False >>> iteratee(["a", 5])({"a": 5}) True >>> iteratee(["a.b"])({"a.b": 5}) 5 >>> iteratee("a.b")({"a": {"b": 5}}) 5 >>> iteratee(("a", ["c", "d", "e"]))({"a": 1, "c": {"d": {"e": 3}}}) [1, 3] >>> iteratee(lambda a, b: a + b)(1, 2) 3 >>> ident = iteratee(None) >>> ident("a") 'a' >>> ident(1, 2, 3) 1 .. versionadded:: 1.0.0 .. versionchanged:: 2.0.0 Renamed ``create_iteratee()`` to :func:`iteratee`. .. versionchanged:: 3.0.0 Made pluck style iteratee support deep property access. .. versionchanged:: 3.1.0 - Added support for shallow pluck style property access via single item list/tuple. - Added support for matches property style iteratee via two item list/tuple. .. versionchanged:: 4.0.0 Removed alias ``callback``. .. versionchanged:: 4.1.0 Return :func:`properties` callback when `func` is a ``tuple``. """ if callable(func): cbk = func else: if isinstance(func, int): func = str(func) if isinstance(func, str): cbk = property_(func) elif isinstance(func, list) and len(func) == 1: cbk = property_(func) elif isinstance(func, list) and len(func) > 1: cbk = matches_property(*func[:2]) elif isinstance(func, tuple): cbk = properties(*func) elif isinstance(func, dict): cbk = matches(func) else: cbk = identity # Optimize iteratee by specifying the exact number of arguments the iteratee takes so that # arg inspection (costly process) can be skipped in helpers.callit(). cbk._argcount = 1 return cbk def matches(source: t.Any) -> t.Callable[[t.Any], bool]: """ Creates a matches-style predicate function which performs a deep comparison between a given object and the `source` object, returning ``True`` if the given object has equivalent property values, else ``False``. Args: source: Source object used for comparision. Returns: Function that compares an object to `source` and returns whether the two objects contain the same items. Example: >>> matches({"a": {"b": 2}})({"a": {"b": 2, "c": 3}}) True >>> matches({"a": 1})({"b": 2, "a": 1}) True >>> matches({"a": 1})({"b": 2, "a": 2}) False .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0 Use :func:`pydash.predicates.is_match` as matching function. """ return lambda obj: pyd.is_match(obj, source) def matches_property(key: t.Any, value: t.Any) -> t.Callable[[t.Any], bool]: """ Creates a function that compares the property value of `key` on a given object to `value`. Args: key: Object key to match against. value: Value to compare to. Returns: Function that compares `value` to an object's `key` and returns whether they are equal. Example: >>> matches_property("a", 1)({"a": 1, "b": 2}) True >>> matches_property(0, 1)([1, 2, 3]) True >>> matches_property("a", 2)({"a": 1, "b": 2}) False .. versionadded:: 3.1.0 """ prop_accessor = property_(key) return lambda obj: matches(value)(prop_accessor(obj)) class MemoizedFunc(Protocol[P, T, T2]): cache: t.Dict[T2, T] def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T: ... # pragma: no cover @t.overload def memoize(func: t.Callable[P, T], resolver: None = None) -> MemoizedFunc[P, T, str]: ... @t.overload def memoize( func: t.Callable[P, T], resolver: t.Union[t.Callable[P, T2], None] = None ) -> MemoizedFunc[P, T, T2]: ... def memoize(func, resolver=None): """ Creates a function that memoizes the result of `func`. If `resolver` is provided it will be used to determine the cache key for storing the result based on the arguments provided to the memoized function. By default, all arguments provided to the memoized function are used as the cache key. The result cache is exposed as the cache property on the memoized function. Args: func: Function to memoize. resolver: Function that returns the cache key to use. Returns: Memoized function. Example: >>> ident = memoize(identity) >>> ident(1) 1 >>> ident.cache["(1,){}"] == 1 True >>> ident(1, 2, 3) 1 >>> ident.cache["(1, 2, 3){}"] == 1 True .. versionadded:: 1.0.0 """ def memoized(*args: P.args, **kwargs: P.kwargs): if resolver: key = resolver(*args, **kwargs) else: key = f"{args}{kwargs}" if key not in memoized.cache: # type: ignore memoized.cache[key] = func(*args, **kwargs) # type:ignore return memoized.cache[key] # type: ignore memoized.cache = {} return memoized def method(path: PathT, *args: t.Any, **kwargs: t.Any) -> t.Callable[..., t.Any]: """ Creates a function that invokes the method at `path` on a given object. Any additional arguments are provided to the invoked method. Args: path: Object path of method to invoke. *args: Global arguments to apply to method when invoked. **kwargs: Global keyword argument to apply to method when invoked. Returns: Function that invokes method located at path for object. Example: >>> obj = {"a": {"b": [None, lambda x: x]}} >>> echo = method("a.b.1") >>> echo(obj, 1) == 1 True >>> echo(obj, "one") == "one" True .. versionadded:: 3.3.0 """ def _method(obj, *_args, **_kwargs): func = pyd.partial(pyd.get(obj, path), *args, **kwargs) return func(*_args, **_kwargs) return _method def method_of(obj: t.Any, *args: t.Any, **kwargs: t.Any) -> t.Callable[..., t.Any]: """ The opposite of :func:`method`. This method creates a function that invokes the method at a given path on object. Any additional arguments are provided to the invoked method. Args: obj: The object to query. *args: Global arguments to apply to method when invoked. **kwargs: Global keyword argument to apply to method when invoked. Returns: Function that invokes method located at path for object. Example: >>> obj = {"a": {"b": [None, lambda x: x]}} >>> dispatch = method_of(obj) >>> dispatch("a.b.1", 1) == 1 True >>> dispatch("a.b.1", "one") == "one" True .. versionadded:: 3.3.0 """ def _method_of(path, *_args, **_kwargs): func = pyd.partial(pyd.get(obj, path), *args, **kwargs) return func(*_args, **_kwargs) return _method_of def noop(*args: t.Any, **kwargs: t.Any) -> None: # pylint: disable=unused-argument """ A no-operation function. .. versionadded:: 1.0.0 """ pass def nth_arg(pos: int = 0) -> t.Callable[..., t.Any]: """ Creates a function that gets the argument at index n. If n is negative, the nth argument from the end is returned. Args: pos: The index of the argument to return. Returns: Returns the new pass-thru function. Example: >>> func = nth_arg(1) >>> func(11, 22, 33, 44) 22 >>> func = nth_arg(-1) >>> func(11, 22, 33, 44) 44 .. versionadded:: 4.0.0 """ def _nth_arg(*args): try: position = math.ceil(float(pos)) except ValueError: position = 0 return pyd.get(args, position) return _nth_arg def now() -> int: """ Return the number of milliseconds that have elapsed since the Unix epoch (1 January 1970 00:00:00 UTC). Returns: Milliseconds since Unix epoch. .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0 Use ``datetime`` module for calculating elapsed time. """ epoch = datetime.fromtimestamp(0, timezone.utc) delta = datetime.now(timezone.utc) - epoch return int(delta.total_seconds() * 1000) def over(funcs: t.Iterable[t.Callable[P, T]]) -> t.Callable[P, t.List[T]]: """ Creates a function that invokes all functions in `funcs` with the arguments it receives and returns their results. Args: funcs: List of functions to be invoked. Returns: Returns the new pass-thru function. Example: >>> func = over([max, min]) >>> func(1, 2, 3, 4) [4, 1] .. versionadded:: 4.0.0 """ def _over(*args: P.args, **kwargs: P.kwargs) -> t.List[T]: return [func(*args, **kwargs) for func in funcs] return _over def over_every(funcs: t.Iterable[t.Callable[P, t.Any]]) -> t.Callable[P, bool]: """ Creates a function that checks if all the functions in `funcs` return truthy when invoked with the arguments it receives. Args: funcs: List of functions to be invoked. Returns: Returns the new pass-thru function. Example: >>> func = over_every([bool, lambda x: x is not None]) >>> func(1) True .. versionadded:: 4.0.0 """ def _over_every(*args: P.args, **kwargs: P.kwargs) -> bool: return all(func(*args, **kwargs) for func in funcs) return _over_every def over_some(funcs: t.Iterable[t.Callable[P, t.Any]]) -> t.Callable[P, bool]: """ Creates a function that checks if any of the functions in `funcs` return truthy when invoked with the arguments it receives. Args: funcs: List of functions to be invoked. Returns: Returns the new pass-thru function. Example: >>> func = over_some([bool, lambda x: x is None]) >>> func(1) True .. versionadded:: 4.0.0 """ def _over_some(*args: P.args, **kwargs: P.kwargs) -> bool: return any(func(*args, **kwargs) for func in funcs) return _over_some def property_(path: PathT) -> t.Callable[[t.Any], t.Any]: """ Creates a function that returns the value at path of a given object. Args: path: Path value to fetch from object. Returns: Function that returns object's path value. Example: >>> get_data = property_("data") >>> get_data({"data": 1}) 1 >>> get_data({}) is None True >>> get_first = property_(0) >>> get_first([1, 2, 3]) 1 .. versionadded:: 1.0.0 .. versionchanged:: 4.0.1 Made property accessor work with deep path strings. """ return lambda obj: pyd.get(obj, path) def properties(*paths: t.Any) -> t.Callable[[t.Any], t.Any]: """ Like :func:`property_` except that it returns a list of values at each path in `paths`. Args: *path: Path values to fetch from object. Returns: Function that returns object's path value. Example: >>> getter = properties("a", "b", ["c", "d", "e"]) >>> getter({"a": 1, "b": 2, "c": {"d": {"e": 3}}}) [1, 2, 3] .. versionadded:: 4.1.0 """ return lambda obj: [getter(obj) for getter in (pyd.property_(path) for path in paths)] def property_of(obj: t.Any) -> t.Callable[[PathT], t.Any]: """ The inverse of :func:`property_`. This method creates a function that returns the key value of a given key on `obj`. Args: obj: Object to fetch values from. Returns: Function that returns object's key value. Example: >>> getter = property_of({"a": 1, "b": 2, "c": 3}) >>> getter("a") 1 >>> getter("b") 2 >>> getter("x") is None True .. versionadded:: 3.0.0 .. versionchanged:: 4.0.0 Removed alias ``prop_of``. """ return lambda key: pyd.get(obj, key) @t.overload def random(start: int = 0, stop: int = 1, *, floating: Literal[False] = False) -> int: ... @t.overload def random(start: float, stop: int = 1, floating: bool = False) -> float: ... @t.overload def random(start: int = 0, *, stop: float, floating: bool = False) -> float: ... @t.overload def random(start: float, stop: float, floating: bool = False) -> float: ... @t.overload def random( start: t.Union[float, int] = 0, stop: t.Union[float, int] = 1, *, floating: Literal[True] ) -> float: ... def random(start: t.Union[float, int] = 0, stop: t.Union[float, int] = 1, floating: bool = False): """ Produces a random number between `start` and `stop` (inclusive). If only one argument is provided a number between 0 and the given number will be returned. If floating is truthy or either `start` or `stop` are floats a floating-point number will be returned instead of an integer. Args: start: Minimum value. stop: Maximum value. floating: Whether to force random value to ``float``. Defaults to ``False``. Returns: Random value. Example: >>> 0 <= random() <= 1 True >>> 5 <= random(5, 10) <= 10 True >>> isinstance(random(floating=True), float) True .. versionadded:: 1.0.0 """ floating = isinstance(start, float) or isinstance(stop, float) or floating is True if stop < start: stop, start = start, stop if floating: rnd = uniform(start, stop) else: rnd = randint(start, stop) # type: ignore return rnd @t.overload def range_(stop: int) -> t.Generator[int, None, None]: ... @t.overload def range_(start: int, stop: int, step: int = 1) -> t.Generator[int, None, None]: ... def range_(*args): """ Creates a list of numbers (positive and/or negative) progressing from start up to but not including end. If `start` is less than `stop`, a zero-length range is created unless a negative `step` is specified. Args: start: Integer to start with. Defaults to ``0``. stop: Integer to stop at. step: The value to increment or decrement by. Defaults to ``1``. Yields: Next integer in range. Example: >>> list(range_(5)) [0, 1, 2, 3, 4] >>> list(range_(1, 4)) [1, 2, 3] >>> list(range_(0, 6, 2)) [0, 2, 4] >>> list(range_(4, 1)) [4, 3, 2] .. versionadded:: 1.0.0 .. versionchanged:: 1.1.0 Moved to :mod:`pydash.uilities`. .. versionchanged:: 3.0.0 Return generator instead of list. .. versionchanged:: 4.0.0 Support decrementing when start argument is greater than stop argument. """ return base_range(*args) @t.overload def range_right(stop: int) -> t.Generator[int, None, None]: ... @t.overload def range_right(start: int, stop: int, step: int = 1) -> t.Generator[int, None, None]: ... def range_right(*args): """ Similar to :func:`range_`, except that it populates the values in descending order. Args: start: Integer to start with. Defaults to ``0``. stop: Integer to stop at. step: The value to increment or decrement by. Defaults to ``1`` if `start` < `stop` else ``-1``. Yields: Next integer in range. Example: >>> list(range_right(5)) [4, 3, 2, 1, 0] >>> list(range_right(1, 4)) [3, 2, 1] >>> list(range_right(0, 6, 2)) [4, 2, 0] .. versionadded:: 4.0.0 """ return base_range(*args, from_right=True) # TODO @t.overload def result(obj: None, key: t.Any, default: None = None) -> None: ... @t.overload def result(obj: None, key: t.Any, default: T) -> T: ... @t.overload def result(obj: t.Any, key: t.Any, default: t.Any = None) -> t.Any: ... def result(obj, key, default=None): """ Return the value of property `key` on `obj`. If `key` value is a function it will be invoked and its result returned, else the property value is returned. If `obj` is falsey then `default` is returned. Args: obj: Object to retrieve result from. key: Key or index to get result from. default: Default value to return if `obj` is falsey. Defaults to ``None``. Returns: Result of ``obj[key]`` or ``None``. Example: >>> result({"a": 1, "b": lambda: 2}, "a") 1 >>> result({"a": 1, "b": lambda: 2}, "b") 2 >>> result({"a": 1, "b": lambda: 2}, "c") is None True >>> result({"a": 1, "b": lambda: 2}, "c", default=False) False .. versionadded:: 1.0.0 .. versionchanged:: 2.0.0 Added ``default`` argument. """ if not obj: return default ret = base_get(obj, key, default=default) if callable(ret): ret = ret() return ret def retry( attempts: int = 3, delay: t.Union[int, float] = 0.5, max_delay: t.Union[int, float] = 150.0, scale: t.Union[int, float] = 2.0, jitter: t.Union[int, float, t.Tuple[t.Union[int, float], t.Union[int, float]]] = 0, exceptions: t.Iterable[Type[Exception]] = (Exception,), on_exception: t.Union[t.Callable[[Exception, int], t.Any], None] = None, ) -> t.Callable[[CallableT], CallableT]: """ Decorator that retries a function multiple times if it raises an exception with an optional delay between each attempt. When a `delay` is supplied, there will be a sleep period in between retry attempts. The first delay time will always be equal to `delay`. After subsequent retries, the delay time will be scaled by `scale` up to `max_delay`. If `max_delay` is ``0``, then `delay` can increase unbounded. Args: attempts: Number of retry attempts. Defaults to ``3``. delay: Base amount of seconds to sleep between retry attempts. Defaults to ``0.5``. max_delay: Maximum number of seconds to sleep between retries. Is ignored when equal to ``0``. Defaults to ``150.0`` (2.5 minutes). scale: Scale factor to increase `delay` after first retry fails. Defaults to ``2.0``. jitter: Random jitter to add to `delay` time. Can be a positive number or 2-item tuple of numbers representing the random range to choose from. When a number is given, the random range will be from ``[0, jitter]``. When jitter is a float or contains a float, then a random float will be chosen; otherwise, a random integer will be selected. Defaults to ``0`` which disables jitter. exceptions: Tuple of exceptions that trigger a retry attempt. Exceptions not in the tuple will be ignored. Defaults to ``(Exception,)`` (all exceptions). on_exception: Function that is called when a retryable exception is caught. It is invoked with ``on_exception(exc, attempt)`` where ``exc`` is the caught exception and ``attempt`` is the attempt count. All arguments are optional. Defaults to ``None``. Example: >>> @retry(attempts=3, delay=0) ... def do_something(): ... print("something") ... raise Exception("something went wrong") >>> try: ... do_something() ... except Exception: ... print("caught something") something something something caught something ..versionadded:: 4.4.0 ..versionchanged:: 4.5.0 Added ``jitter`` argument. """ if not isinstance(attempts, int) or attempts <= 0: raise ValueError("attempts must be an integer greater than 0") if not isinstance(delay, NUMBER_TYPES) or delay < 0: raise ValueError("delay must be a number greater than or equal to 0") if not isinstance(max_delay, NUMBER_TYPES) or max_delay < 0: raise ValueError("scale must be a number greater than or equal to 0") if not isinstance(scale, NUMBER_TYPES) or scale <= 0: raise ValueError("scale must be a number greater than 0") if ( not isinstance(jitter, NUMBER_TYPES + (tuple,)) or (isinstance(jitter, NUMBER_TYPES) and jitter < 0) or ( isinstance(jitter, tuple) and (len(jitter) != 2 or not all(isinstance(jit, NUMBER_TYPES) for jit in jitter)) ) ): raise ValueError("jitter must be a number greater than 0 or a 2-item tuple of " "numbers") if not isinstance(exceptions, tuple) or not all( issubclass(exc, Exception) for exc in exceptions ): raise TypeError("exceptions must be a tuple of Exception types") if on_exception and not callable(on_exception): raise TypeError("on_exception must be a callable") if jitter and not isinstance(jitter, tuple): jitter = (0, jitter) on_exc_argcount = getargcount(on_exception, maxargs=2) if on_exception else None def decorator(func): @wraps(func) def decorated(*args, **kwargs): delay_time = delay for attempt in range(1, attempts + 1): # pylint: disable=catching-non-exception try: return func(*args, **kwargs) except exceptions as exc: if on_exception: callit(on_exception, exc, attempt, argcount=on_exc_argcount) if attempt == attempts: raise if jitter: delay_time += max(0, random(*jitter)) if delay_time < 0: # pragma: no cover continue if max_delay: delay_time = min(delay_time, max_delay) time.sleep(delay_time) # Scale after first iteration. delay_time *= scale return decorated return decorator def stub_list() -> t.List[t.Any]: """ Returns empty "list". Returns: Empty list. Example: >>> stub_list() [] .. versionadded:: 4.0.0 """ return [] def stub_dict() -> t.Dict[t.Any, t.Any]: """ Returns empty "dict". Returns: Empty dict. Example: >>> stub_dict() {} .. versionadded:: 4.0.0 """ return {} def stub_false() -> Literal[False]: """ Returns ``False``. Returns: False Example: >>> stub_false() False .. versionadded:: 4.0.0 """ return False def stub_string() -> str: """ Returns an empty string. Returns: Empty string Example: >>> stub_string() '' .. versionadded:: 4.0.0 """ return "" def stub_true() -> Literal[True]: """ Returns ``True``. Returns: True Example: >>> stub_true() True .. versionadded:: 4.0.0 """ return True @t.overload def times(n: int, iteratee: t.Callable[..., T]) -> t.List[T]: ... @t.overload def times(n: int, iteratee: None = None) -> t.List[int]: ... def times(n: int, iteratee=None): """ Executes the iteratee `n` times, returning a list of the results of each iteratee execution. The iteratee is invoked with one argument: ``(index)``. Args: n: Number of times to execute `iteratee`. iteratee: Function to execute. Returns: A list of results from calling `iteratee`. Example: >>> times(5, lambda i: i) [0, 1, 2, 3, 4] .. versionadded:: 1.0.0 .. versionchanged:: 3.0.0 Reordered arguments to make `iteratee` first. .. versionchanged:: 4.0.0 - Re-reordered arguments to make `iteratee` last argument. - Added functionality for handling `iteratee` with zero positional arguments. """ if iteratee is None: iteratee = identity argcount = 1 else: argcount = getargcount(iteratee, maxargs=1) return [callit(iteratee, index, argcount=argcount) for index in range(n)] def to_path(value: PathT) -> t.List[t.Hashable]: """ Converts values to a property path array. Args: value: Value to convert. Returns: Returns the new property path array. Example: >>> to_path("a.b.c") ['a', 'b', 'c'] >>> to_path("a[0].b.c") ['a', 0, 'b', 'c'] >>> to_path("a[0][1][2].b.c") ['a', 0, 1, 2, 'b', 'c'] .. versionadded:: 4.0.0 .. versionchanged:: 4.2.1 Ensure returned path is always a list. """ path = [token.key for token in to_path_tokens(value)] return path def unique_id(prefix: t.Union[str, None] = None) -> str: """ Generates a unique ID. If `prefix` is provided the ID will be appended to it. Args: prefix: String prefix to prepend to ID value. Returns: ID value. Example: >>> unique_id() '1' >>> unique_id("id_") 'id_2' >>> unique_id() '3' .. versionadded:: 1.0.0 """ # pylint: disable=global-statement global ID_COUNTER # noqa: PLW0603 ID_COUNTER += 1 if prefix is None: prefix = "" else: prefix = pyd.to_string(prefix) return f"{prefix}{ID_COUNTER}" # # Helper functions not a part of main API # def _maybe_list_index(key): if isinstance(key, int): return key if pyd.is_string(key) and RE_PATH_LIST_INDEX.match(key): return int(key[1:-1]) return None def _to_path_token(key) -> PathToken: list_index = _maybe_list_index(key) if list_index is not None: return PathToken(list_index, default_factory=list) return PathToken( unescape_path_key(key) if pyd.is_string(key) else key, default_factory=dict, ) def to_path_tokens(value) -> t.List[PathToken]: """Parse `value` into :class:`PathToken` objects.""" if pyd.is_string(value) and ("." in value or "[" in value): # Since we can't tell whether a bare number is supposed to be dict key or a list index, we # support a special syntax where any string-integer surrounded by brackets is treated as a # list index and converted to an integer. keys = [_to_path_token(key) for key in filter(None, RE_PATH_KEY_DELIM.split(value))] elif pyd.is_string(value) or pyd.is_number(value): keys = [PathToken(value, default_factory=dict)] elif value is UNSET: keys = [] elif pyd.is_list(value): keys = [_to_path_token(key) for key in value] else: keys = [_to_path_token(value)] return keys def unescape_path_key(key): """Unescape path key.""" key = key.replace(r"\\", "\\") key = key.replace(r"\.", r".") return key def base_range(*args, **kwargs): """Yield range values.""" from_right = kwargs.get("from_right", False) if len(args) >= 3: args = args[:3] elif len(args) == 2: args = (args[0], args[1], None) elif len(args) == 1: args = (0, args[0], None) if args and args[2] is None: check_args = args[:2] else: check_args = args for arg in check_args: if not isinstance(arg, int): # pragma: no cover raise TypeError(f"range cannot interpret {type(arg).__name__!r} object as an integer") def gen(): if not args: return start, stop, step = args if step is None: step = 1 if start < stop else -1 length = int(max([math.ceil((stop - start) / (step or 1)), 0])) if from_right: start += (step * length) - step step *= -1 while length: yield start start += step length -= 1 return gen() pydash-8.0.3/tasks.py000066400000000000000000000135471464745015500145360ustar00rootroot00000000000000""" This module provides the CLI interface for invoke tasks. All tasks can be executed from this file's directory using: $ inv Where is a function defined below with the @task decorator. """ from functools import partial import os from pathlib import Path import sys import tempfile from invoke import Context, Exit, UnexpectedExit, run as _run, task PACKAGE_NAME = "pydash" PACKAGE_SOURCE = f"src/{PACKAGE_NAME}" MYPY_TESTS_DIR = "tests/pytest_mypy_testing" TEST_TARGETS = f"{PACKAGE_SOURCE} tests" LINT_TARGETS = f"{TEST_TARGETS} tasks.py" EXIT_EXCEPTIONS = (Exit, UnexpectedExit, SystemExit) # Set pyt=True to enable colored output when available. run = partial(_run, pty=True) @task() def fmt(ctx: Context, target: str = "", quiet: bool = False) -> None: """Autoformat code and docstrings.""" if not quiet: print("Running ruff format") ruff_format(ctx, target, quiet=quiet) if not quiet: print("Running ruff lint fixes") ruff_fix(ctx, target, quiet=quiet) @task() def ruff_format(ctx: Context, target: str = "", quiet: bool = False) -> None: """Autoformat code and docstrings using ruff.""" run(f"ruff format {target}", hide=quiet) @task() def ruff_fix(ctx: Context, target: str = "", quiet: bool = False) -> None: """Autofix fixable lint issues using ruff.""" run(f"ruff check {target} --fix", hide=quiet) @task() def ruff_format_check(ctx: Context) -> None: """Check code for static errors using pylint.""" run("ruff format --check") @task() def ruff_check(ctx: Context) -> None: """Check code for static errors using pylint.""" run("ruff check") @task() def mypy(ctx: Context) -> None: """Check code using mypy type checker.""" run(f"mypy {LINT_TARGETS} --no-error-summary") @task() def lint(ctx: Context) -> None: """Run linters.""" linters = { "ruff-format-check": ruff_format_check, "ruff-check": ruff_check, "mypy": mypy, } # in python 3.8 and before the ast module doesn't have the `unparse` function # which is needed for the generation if sys.version_info >= (3, 9): linters["chaining-types-update-required"] = chaining_types_update_required failures = [] print(f"Preparing to run linters: {', '.join(linters)}\n") for name, linter in linters.items(): print(f"Running {name}") try: linter(ctx) except EXIT_EXCEPTIONS: failures.append(name) result = "FAILED" else: result = "PASSED" print(f"{result}\n") if failures: failed = ", ".join(failures) raise Exit(f"ERROR: linters failed: {failed}") @task(help={"args": "Override default pytest arguments"}) def test( ctx: Context, args: str = f"{TEST_TARGETS} --cov={PACKAGE_NAME}", with_mypy_tests: bool = False ) -> None: """Run unit tests using pytest.""" tox_env_site_packages_dir = os.getenv("TOX_ENV_SITE_PACKAGES_DIR") if tox_env_site_packages_dir: # Re-path package source to match tox env so that we generate proper coverage report. tox_env_pkg_src = os.path.join(tox_env_site_packages_dir, os.path.basename(PACKAGE_SOURCE)) args = args.replace(PACKAGE_SOURCE, tox_env_pkg_src) ignored_dirs = f"--ignore={MYPY_TESTS_DIR}" if with_mypy_tests is False else "" run(f"pytest {args} {ignored_dirs}") @task() def ci(ctx: Context) -> None: """Run linters and tests.""" print("Building package") build(ctx) print("Building docs") docs(ctx) print("Checking linters") lint(ctx) print("Running unit tests") test(ctx) @task() def docs(ctx: Context, serve: bool = False, bind: str = "127.0.0.1", port: int = 8000) -> None: """Build docs.""" run("rm -rf docs/_build") run("sphinx-build -q -W -b html docs docs/_build/html") if serve: print(f"Serving docs on {bind} port {port} (http://{bind}:{port}/) ...") run(f"python -m http.server -b {bind} --directory docs/_build/html {port}", hide=True) @task() def build(ctx: Context) -> None: """Build Python package.""" run("rm -rf dist build docs/_build") run("python -m build") @task() def clean(ctx: Context) -> None: """Remove temporary files related to development.""" run("find . -type f -name '*.py[cod]' -delete -o -type d -name __pycache__ -delete") run("rm -rf .tox .coverage .cache .pytest_cache **/.egg* **/*.egg* dist build .mypy_cache") @task(pre=[build]) def release(ctx: Context) -> None: """Release Python package.""" run("twine upload dist/*") @task() def generate_mypy_test(ctx: Context, file: str) -> None: """Generate base mypy test ready to be filled from doctests inside a python file.""" run( "python scripts/mypy_doctests_generator.py" f" {file} tests/pytest_mypy_testing/test_{Path(file).name}" ) @task() def generate_chaining_types( ctx: Context, output: str = "src/pydash/chaining/all_funcs.pyi" ) -> None: """Generates `all_funcs.pyi` stub file that types the chaining interface.""" run( "python scripts/chaining_type_generator.py" f" --class_name AllFuncs --output {output} --wrapper Chain" ) fmt(ctx, output, quiet=True) @task() def chaining_types_update_required(ctx: Context) -> None: with tempfile.NamedTemporaryFile(suffix=".pyi", dir=".") as tmp_file: generate_chaining_types(ctx, tmp_file.name) with open("src/pydash/chaining/all_funcs.pyi", "rb") as current_file: current = current_file.read() with open(tmp_file.name, "rb") as formatted_tmp_file: new = formatted_tmp_file.read() if current != new: err_msg = ( "ERROR: src/pydash/chaining/all_funcs.pyi is out of date. Please run " "`inv generate-chaining-types` and commit the changes." ) print(err_msg, file=sys.stderr) raise Exit() pydash-8.0.3/tests/000077500000000000000000000000001464745015500141675ustar00rootroot00000000000000pydash-8.0.3/tests/__init__.py000066400000000000000000000000001464745015500162660ustar00rootroot00000000000000pydash-8.0.3/tests/conftest.py000066400000000000000000000002171464745015500163660ustar00rootroot00000000000000from unittest import mock import pytest @pytest.fixture def mock_sleep(): with mock.patch("time.sleep") as mocked: yield mocked pydash-8.0.3/tests/helpers.py000066400000000000000000000035161464745015500162100ustar00rootroot00000000000000class Object(object): def __init__(self, **attrs): for key, value in attrs.items(): setattr(self, key, value) class ItemsObject(object): def __init__(self, items): self._items = items def items(self): if isinstance(self._items, dict): return list(self._items.items()) else: return enumerate(self._items) class IteritemsObject(object): def __init__(self, items): self._items = items def iteritems(self): if isinstance(self._items, dict): for key, value in self._items.items(): yield key, value else: for i, item in enumerate(self._items): yield i, item class Filter(object): def __init__(self, predicate): self.predicate = predicate def __call__(self, item): return self.predicate(item) def reduce_iteratee0(total, num): return total + num def reduce_iteratee1(result, num, key): result[key] = num * 3 return result def reduce_right_iteratee0(a, b): return a + b def noop(*args, **kwargs): pass def transform_iteratee0(result, num): num *= num if num % 2: result.append(num) return len(result) < 3 def is_equal_iteratee0(a, b): a_greet = a.startswith("h") if hasattr(a, "startswith") else False b_greet = b.startswith("h") if hasattr(b, "startswith") else False return a_greet == b_greet if a_greet or b_greet else None def for_in_iteratee0(value, key, obj): obj[key] += value def for_in_iteratee1(value, key, obj): obj[key] += value return False def for_in_iteratee2(value, index, obj): if index == 2: obj[index] = "index:2" return True elif index == 0: obj[index] = False return True else: obj[index] = True return False pydash-8.0.3/tests/pytest_mypy_testing/000077500000000000000000000000001464745015500203325ustar00rootroot00000000000000pydash-8.0.3/tests/pytest_mypy_testing/__init__.py000066400000000000000000000000001464745015500224310ustar00rootroot00000000000000pydash-8.0.3/tests/pytest_mypy_testing/test_arrays.py000066400000000000000000000415701464745015500232530ustar00rootroot00000000000000import typing as t import pytest import pydash as _ @pytest.mark.mypy_testing def test_mypy_chunk() -> None: reveal_type(_.chunk([1, 2, 3, 4, 5], 2)) # R: builtins.list[typing.Sequence[builtins.int]] @pytest.mark.mypy_testing def test_mypy_compact() -> None: reveal_type(_.compact([True, False, None])) # R: builtins.list[builtins.bool] reveal_type(_.compact(['', 'hello', None])) # R: builtins.list[builtins.str] reveal_type(_.compact([0, 1, None])) # R: builtins.list[builtins.int] reveal_type(_.compact([0, 1])) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_concat() -> None: reveal_type(_.concat([1, 2], [3, 4])) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_difference() -> None: reveal_type(_.difference([1, 2, 3], [1], [2])) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_difference_by() -> None: reveal_type(_.difference_by([1.2, 1.5, 1.7, 2.8], [0.9, 3.2], round)) # R: builtins.list[builtins.float] reveal_type(_.difference_by([{"hello": 1}], [{"hello": 2}], lambda d: d["hello"])) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] @pytest.mark.mypy_testing def test_mypy_difference_with() -> None: array = ['apple', 'banana', 'pear'] others = (['avocado', 'pumpkin'], ['peach']) def comparator(a: str, b: str) -> bool: return a[0] == b[0] reveal_type(_.difference_with(array, *others, comparator=comparator)) # R: builtins.list[builtins.str] @pytest.mark.mypy_testing def test_mypy_drop() -> None: reveal_type(_.drop([1, 2, 3, 4], 2)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_drop_right() -> None: reveal_type(_.drop_right([1, 2, 3, 4], 2)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_drop_right_while() -> None: reveal_type(_.drop_right_while([1, 2, 3, 4], lambda x: x >= 3)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_drop_while() -> None: reveal_type(_.drop_while([1, 2, 3, 4], lambda x: x < 3)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_duplicates() -> None: reveal_type(_.duplicates([0, 1, 3, 2, 3, 1])) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_fill() -> None: reveal_type(_.fill([1, 2, 3, 4, 5], 0)) # R: builtins.list[builtins.int] reveal_type(_.fill([1, 2, 3, 4, 5], 0, 1, 3)) # R: builtins.list[builtins.int] reveal_type(_.fill([1, 2, 3, 4, 5], 0, 0, 100)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_find_index() -> None: reveal_type(_.find_index([1, 2, 3, 4], lambda x: x >= 3)) # R: builtins.int reveal_type(_.find_index([1, 2, 3, 4], lambda x: x > 4)) # R: builtins.int reveal_type(_.find_index([{"a": 0, "b": 3}, {"a": "1", "c": 5}], {"a": 0})) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_find_last_index() -> None: reveal_type(_.find_last_index([1, 2, 3, 4], lambda x: x >= 3)) # R: builtins.int reveal_type(_.find_last_index([1, 2, 3, 4], lambda x: x > 4)) # R: builtins.int reveal_type(_.find_last_index([{"a": 0, "b": 3}, {"a": "1", "c": 5}], {"a": 0})) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_flatten() -> None: my_list: t.List[t.List[t.Union[int, t.List[int]]]] = [[1], [2, [3]], [[4]]] reveal_type(_.flatten(my_list)) # R: builtins.list[Union[builtins.int, builtins.list[builtins.int]]] @pytest.mark.mypy_testing def test_mypy_flatten_deep() -> None: reveal_type(_.flatten_deep([[1], [2, [3]], [[4]]])) # R: builtins.list[Any] @pytest.mark.mypy_testing def test_mypy_flatten_depth() -> None: reveal_type(_.flatten_depth([[[1], [2, [3]], [[4]]]], 1)) # R: builtins.list[Any] @pytest.mark.mypy_testing def test_mypy_from_pairs() -> None: my_list: t.List[t.List[t.Union[str, int]]] = [['a', 1], ['b', 2]] reveal_type(_.from_pairs(my_list)) # R: builtins.dict[Union[builtins.str, builtins.int], Union[builtins.str, builtins.int]] my_list2: t.List[t.Tuple[str, int]] = [('a', 1), ('b', 2)] reveal_type(_.from_pairs(my_list2)) # R: builtins.dict[builtins.str, builtins.int] # # @pytest.mark.mypy_testing def test_mypy_head() -> None: reveal_type(_.head([1, 2, 3, 4])) # R: Union[builtins.int, None] @pytest.mark.mypy_testing def test_mypy_index_of() -> None: reveal_type(_.index_of([1, 2, 3, 4], 2)) # R: builtins.int reveal_type(_.index_of([2, 1, 2, 3], 2, from_index=1)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_initial() -> None: reveal_type(_.initial([1, 2, 3, 4])) # R: typing.Sequence[builtins.int] @pytest.mark.mypy_testing def test_mypy_intercalate() -> None: my_list: t.List[t.List[int]] = [[2], [3]] reveal_type(_.intercalate(my_list, 'x')) # R: builtins.list[Union[builtins.int, builtins.str]] @pytest.mark.mypy_testing def test_mypy_interleave() -> None: reveal_type(_.interleave([1, 2, 3], [4, 5, 6], [7, 8, 9])) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_intersection() -> None: reveal_type(_.intersection([1, 2, 3], [1, 2, 3, 4, 5], [2, 3])) # R: builtins.list[builtins.int] reveal_type(_.intersection([1, 2, 3])) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_intersection_by() -> None: reveal_type(_.intersection_by([1.2, 1.5, 1.7, 2.8], [0.9, 3.2], round)) # R: builtins.list[builtins.float] reveal_type(_.intersection_by([{"hello": 1}], [{"hello": 2}], lambda d: d["hello"])) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] @pytest.mark.mypy_testing def test_mypy_intersection_with() -> None: array = ['apple', 'banana', 'pear'] others = (['avocado', 'pumpkin'], ['peach']) def comparator(a: str, b:str) -> bool: return a[0] == b[0] reveal_type(_.intersection_with(array, *others, comparator=comparator)) # R: builtins.list[builtins.str] @pytest.mark.mypy_testing def test_mypy_intersperse() -> None: my_list: t.List[t.Union[int, t.List[int]]] = [1, [2], [3], 4] reveal_type(_.intersperse(my_list, 'x')) # R: builtins.list[Union[builtins.int, builtins.list[builtins.int], builtins.str]] @pytest.mark.mypy_testing def test_mypy_last() -> None: reveal_type(_.last([1, 2, 3, 4])) # R: Union[builtins.int, None] @pytest.mark.mypy_testing def test_mypy_last_index_of() -> None: reveal_type(_.last_index_of([1, 2, 2, 4], 2)) # R: builtins.int reveal_type(_.last_index_of([1, 2, 2, 4], 2, from_index=1)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_mapcat() -> None: def to_list(x: int) -> t.List[int]: return list(range(x)) reveal_type(_.mapcat(list(range(4)), to_list)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_nth() -> None: reveal_type(_.nth([1, 2, 3], 0)) # R: Union[builtins.int, None] reveal_type(_.nth([11, 22, 33])) # R: Union[builtins.int, None] @pytest.mark.mypy_testing def test_mypy_pop() -> None: array = [1, 2, 3, 4] reveal_type(_.pop(array)) # R: builtins.int reveal_type(_.pop(array, index=0)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_pull() -> None: reveal_type(_.pull([1, 2, 2, 3, 3, 4], 2, 3)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_pull_all() -> None: reveal_type(_.pull_all([1, 2, 2, 3, 3, 4], [2, 3])) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_pull_all_by() -> None: array = [{'x': 1}, {'x': 2}, {'x': 3}, {'x': 1}] reveal_type(_.pull_all_by(array, [{'x': 1}, {'x': 3}], 'x')) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] @pytest.mark.mypy_testing def test_mypy_pull_all_with() -> None: array = [{'x': 1, 'y': 2}, {'x': 3, 'y': 4}, {'x': 5, 'y': 6}] reveal_type(_.pull_all_with(array, [{'x': 3, 'y': 4}], lambda a, b: a == b)) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] reveal_type(_.pull_all_with(array, [{'x': 3, 'y': 4}], lambda a, b: a != b)) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] @pytest.mark.mypy_testing def test_mypy_pull_at() -> None: reveal_type(_.pull_at([1, 2, 3, 4], 0, 2)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_push() -> None: array = [1, 2, 3] reveal_type(_.push(array, [4], [6])) # R: builtins.list[Union[builtins.int, builtins.list[builtins.int]]] @pytest.mark.mypy_testing def test_mypy_remove() -> None: reveal_type(_.remove([1, 2, 3, 4], lambda x: x >= 3)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_reverse() -> None: reveal_type(_.reverse([1, 2, 3, 4])) # R: builtins.list[builtins.int] reveal_type(_.reverse("hello")) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_shift() -> None: array = [1, 2, 3, 4] reveal_type(_.shift(array)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_slice_() -> None: reveal_type(_.slice_([1, 2, 3, 4])) # R: builtins.list[builtins.int] reveal_type(_.slice_([1, 2, 3, 4], 1)) # R: builtins.list[builtins.int] reveal_type(_.slice_([1, 2, 3, 4], 1, 3)) # R: builtins.list[builtins.int] reveal_type(_.slice_("hello", 1, 3)) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_sort() -> None: reveal_type(_.sort([2, 1, 4, 3])) # R: builtins.list[builtins.int] reveal_type(_.sort([2, 1, 4, 3], reverse=True)) # R: builtins.list[builtins.int] value = _.sort( [{'a': 2, 'b': 1}, {'a': 3, 'b': 2}, {'a': 0, 'b': 3}], key=lambda item: item['a'] ) reveal_type(value) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] @pytest.mark.mypy_testing def test_mypy_sorted_index() -> None: reveal_type(_.sorted_index([1, 2, 2, 3, 4], 2)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_sorted_index_by() -> None: array = [{'x': 4}, {'x': 5}] reveal_type(_.sorted_index_by(array, {'x': 4}, lambda o: o['x'])) # R: builtins.int reveal_type(_.sorted_index_by(array, {'x': 4}, 'x')) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_sorted_index_of() -> None: reveal_type(_.sorted_index_of([3, 5, 7, 10], 3)) # R: builtins.int reveal_type(_.sorted_index_of([10, 10, 5, 7, 3], 10)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_sorted_last_index() -> None: reveal_type(_.sorted_last_index([1, 2, 2, 3, 4], 2)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_sorted_last_index_by() -> None: array = [{'x': 4}, {'x': 5}] reveal_type(_.sorted_last_index_by(array, {'x': 4}, lambda o: o['x'])) # R: builtins.int reveal_type(_.sorted_last_index_by(array, {'x': 4}, 'x')) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_sorted_last_index_of() -> None: reveal_type(_.sorted_last_index_of([4, 5, 5, 5, 6], 5)) # R: builtins.int reveal_type(_.sorted_last_index_of([6, 5, 5, 5, 4], 6)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_sorted_uniq() -> None: reveal_type(_.sorted_uniq([4, 2, 2, 5])) # R: builtins.list[builtins.int] reveal_type(_.sorted_uniq([-2, -2, 4, 1])) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_sorted_uniq_by() -> None: reveal_type(_.sorted_uniq_by([3, 2, 1, 3, 2, 1], lambda val: val % 2)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_splice() -> None: array = [1, 2, 3, 4] reveal_type(_.splice(array, 1)) # R: builtins.list[builtins.int] array = [1, 2, 3, 4] reveal_type(_.splice(array, 1, 2)) # R: builtins.list[builtins.int] reveal_type(_.splice(array, 1, 2, 0, 0)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_split_at() -> None: reveal_type(_.split_at([1, 2, 3, 4], 2)) # R: builtins.list[typing.Sequence[builtins.int]] @pytest.mark.mypy_testing def test_mypy_tail() -> None: reveal_type(_.tail([1, 2, 3, 4])) # R: typing.Sequence[builtins.int] @pytest.mark.mypy_testing def test_mypy_take() -> None: reveal_type(_.take([1, 2, 3, 4], 2)) # R: typing.Sequence[builtins.int] @pytest.mark.mypy_testing def test_mypy_take_right() -> None: reveal_type(_.take_right([1, 2, 3, 4], 2)) # R: typing.Sequence[builtins.int] @pytest.mark.mypy_testing def test_mypy_take_right_while() -> None: reveal_type(_.take_right_while([1, 2, 3, 4], lambda x: x >= 3)) # R: typing.Sequence[builtins.int] @pytest.mark.mypy_testing def test_mypy_take_while() -> None: reveal_type(_.take_while([1, 2, 3, 4], lambda x: x < 3)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_union() -> None: reveal_type(_.union([1, 2, 3], [2, 3, 4], [3, 4, 5])) # R: builtins.list[builtins.int] reveal_type(_.union([1, 2, 3], ["hello"])) # R: builtins.list[Union[builtins.int, builtins.str]] @pytest.mark.mypy_testing def test_mypy_union_by() -> None: reveal_type(_.union_by([1, 2, 3], [2, 3, 4], iteratee=lambda x: x % 2)) # R: builtins.list[builtins.int] reveal_type(_.union_by([{"hello": 1}], [{"hello": 2}], lambda d: d["hello"])) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] @pytest.mark.mypy_testing def test_mypy_union_with() -> None: comparator = lambda a, b: (a % 2) == (b % 2) reveal_type(_.union_with([1, 2, 3], [2, 3, 4], comparator=comparator)) # R: builtins.list[builtins.int] reveal_type(_.union_with([1, 2, 3], [2, 3, 4])) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_uniq() -> None: reveal_type(_.uniq([1, 2, 3, 1, 2, 3])) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_uniq_by() -> None: reveal_type(_.uniq_by([1, 2, 3, 1, 2, 3], lambda val: val % 2)) # R: builtins.list[builtins.int] reveal_type(_.uniq_by([{"hello": 1}, {"hello": 1}], lambda val: val["hello"])) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] @pytest.mark.mypy_testing def test_mypy_uniq_with() -> None: reveal_type(_.uniq_with([1, 2, 3, 4, 5], lambda a, b: (a % 2) == (b % 2))) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_unshift() -> None: array = [1, 2, 3, 4] reveal_type(_.unshift(array, -1, -2)) # R: builtins.list[builtins.int] reveal_type(_.unshift(array, "hello")) # R: builtins.list[Union[builtins.int, builtins.str]] @pytest.mark.mypy_testing def test_mypy_unzip() -> None: reveal_type(_.unzip([(1, 4, 7), (2, 5, 8), (3, 6, 9)])) # R: builtins.list[Tuple[builtins.int, builtins.int, builtins.int]] @pytest.mark.mypy_testing def test_mypy_unzip_with() -> None: def add(x: int, y: int) -> int: return x + y reveal_type(_.unzip_with([[1, 10, 100], [2, 20, 200]], add)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_without() -> None: reveal_type(_.without([1, 2, 3, 2, 4, 4], 2, 4)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_xor() -> None: reveal_type(_.xor([1, 3, 4], [1, 2, 4], [2])) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_xor_by() -> None: reveal_type(_.xor_by([2.1, 1.2], [2.3, 3.4], round)) # R: builtins.list[builtins.float] reveal_type(_.xor_by([{'x': 1}], [{'x': 2}, {'x': 1}], iteratee='x')) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] reveal_type(_.xor_by([{"hello": 1}], [{"hello": 2}], lambda d: d["hello"])) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] @pytest.mark.mypy_testing def test_mypy_xor_with() -> None: objects = [{'x': 1, 'y': 2}, {'x': 2, 'y': 1}] others = [{'x': 1, 'y': 1}, {'x': 1, 'y': 2}] reveal_type(_.xor_with(objects, others, lambda a, b: a == b)) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] @pytest.mark.mypy_testing def test_mypy_zip_() -> None: reveal_type(_.zip_([1, 2, 3], [4, 5, 6], [7, 8, 9])) # R: builtins.list[Tuple[builtins.int, builtins.int, builtins.int]] reveal_type(_.zip_([1, 2, 3], ["one", "two", "three"])) # R: builtins.list[Tuple[builtins.int, builtins.str]] @pytest.mark.mypy_testing def test_mypy_zip_object() -> None: reveal_type(_.zip_object([1, 2, 3], [4, 5, 6])) # R: builtins.dict[builtins.int, builtins.int] reveal_type(_.zip_object([1, 2, 3], ["hello", "good", "friend"])) # R: builtins.dict[builtins.int, builtins.str] my_list: t.List[t.Tuple[int, str]] = [(1, "hello"), (2, "good"), (3, "friend")] reveal_type(_.zip_object(my_list)) # R: builtins.dict[builtins.int, builtins.str] @pytest.mark.mypy_testing def test_mypy_zip_object_deep() -> None: reveal_type(_.zip_object_deep(['a.b.c', 'a.b.d'], [1, 2])) # R: builtins.dict[Any, Any] @pytest.mark.mypy_testing def test_mypy_zip_with() -> None: def add(x: int, y: int) -> int: return x + y reveal_type(_.zip_with([1, 2], [10, 20], add)) # R: builtins.list[builtins.int] reveal_type(_.zip_with([1, 2], [10, 20], [100, 200], iteratee=add)) # R: builtins.list[builtins.int] def more_hello(s: str, n: int) -> str: return s * n reveal_type(_.zip_with(["hello", "hello", "hello"], [1, 2, 3], iteratee=more_hello)) # R: builtins.list[builtins.str] pydash-8.0.3/tests/pytest_mypy_testing/test_chaining.py000066400000000000000000000020611464745015500235220ustar00rootroot00000000000000import pytest import pydash as _ @pytest.mark.mypy_testing def test_mypy_chain() -> None: reveal_type(_.chain([1, 2, 3, 4]).map(lambda x: x * 2).sum().value()) # R: builtins.int summer = _.chain([1, 2, 3, 4]).sum() reveal_type(summer) # R: pydash.chaining.chaining.Chain[builtins.int] new_summer = summer.plant([1, 2]) reveal_type(new_summer) # R: pydash.chaining.chaining.Chain[builtins.int] reveal_type(new_summer.value()) # R: builtins.int reveal_type(summer.value()) # R: builtins.int def echo(item): print(item) summer = _.chain([1, 2, 3, 4]).for_each(echo).sum() committed = summer.commit() reveal_type(committed) # R: pydash.chaining.chaining.Chain[builtins.int] reveal_type(committed.value()) # R: builtins.int reveal_type(summer.value()) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_tap() -> None: data = [] def log(value): data.append(value) reveal_type(_.chain([1, 2, 3, 4]).map(lambda x: x * 2).tap(log).value()) # R: builtins.list[builtins.int] pydash-8.0.3/tests/pytest_mypy_testing/test_collections.py000066400000000000000000000210251464745015500242610ustar00rootroot00000000000000import typing as t import pytest import pydash as _ @pytest.mark.mypy_testing def test_mypy_at() -> None: reveal_type(_.at([1, 2, 3, 4], 0, 2)) # R: builtins.list[Union[builtins.int, None]] reveal_type(_.at({"a": 1, "b": 2, "c": 3, "d": 4}, "a", "c")) # R: builtins.list[Union[builtins.int, None]] reveal_type(_.at({"a": 1, "b": 2, "c": {"d": {"e": 3}}}, "a", ["c", "d", "e"])) # R: builtins.list[Any] @pytest.mark.mypy_testing def test_mypy_count_by() -> None: reveal_type(_.count_by([1, 2, 1, 2, 3, 4])) # R: builtins.dict[builtins.int, builtins.int] reveal_type(_.count_by(["a", "A", "B", "b"], lambda x: x.lower())) # R: builtins.dict[builtins.str, builtins.int] reveal_type(_.count_by({"a": 1, "b": 1, "c": 3, "d": 3})) # R: builtins.dict[builtins.int, builtins.int] reveal_type(_.count_by({"5": 5, "6": 6}, lambda x: int(x))) # R: builtins.dict[builtins.int, builtins.int] @pytest.mark.mypy_testing def test_mypy_every() -> None: reveal_type(_.every([1, True, "hello"])) # R: builtins.bool reveal_type(_.every([1, False, "hello"])) # R: builtins.bool reveal_type(_.every([{"a": 1}, {"a": True}, {"a": "hello"}], "a")) # R: builtins.bool reveal_type(_.every([{"a": 1}, {"a": False}, {"a": "hello"}], "a")) # R: builtins.bool reveal_type(_.every([{"a": 1}, {"a": 1}], {"a": 1})) # R: builtins.bool reveal_type(_.every([{"a": 1}, {"a": 2}], {"a": 1})) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_filter_() -> None: reveal_type(_.filter_([{"a": 1}, {"b": 2}, {"a": 1, "b": 3}], {"a": 1})) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] reveal_type(_.filter_([1, 2, 3, 4], lambda x: x >= 3)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_find() -> None: reveal_type(_.find([1, 2, 3, 4], lambda x: x >= 3)) # R: Union[builtins.int, None] reveal_type(_.find([{"a": 1}, {"b": 2}, {"a": 1, "b": 2}], {"a": 1})) # R: Union[builtins.dict[builtins.str, builtins.int], None] @pytest.mark.mypy_testing def test_mypy_find_last() -> None: reveal_type(_.find_last([1, 2, 3, 4], lambda x: x >= 3)) # R: Union[builtins.int, None] reveal_type(_.find_last([{"a": 1}, {"b": 2}, {"a": 1, "b": 2}], {"a": 1})) # R: Union[builtins.dict[builtins.str, builtins.int], None] @pytest.mark.mypy_testing def test_mypy_flat_map() -> None: def listify(n: int) -> t.List[t.List[int]]: return [[n, n]] reveal_type(_.flat_map([1, 2], listify)) # R: builtins.list[builtins.list[builtins.int]] @pytest.mark.mypy_testing def test_mypy_flat_map_deep() -> None: reveal_type(_.flat_map_deep([1, 2], lambda n: [[n, n]])) # R: builtins.list[Any] @pytest.mark.mypy_testing def test_mypy_flat_map_depth() -> None: reveal_type(_.flat_map_depth([1, 2], lambda n: [[n, n]], 1)) # R: builtins.list[Any] reveal_type(_.flat_map_depth([1, 2], lambda n: [[n, n]], 2)) # R: builtins.list[Any] @pytest.mark.mypy_testing def test_mypy_for_each() -> None: results = {} def cb(x): results[x] = x**2 reveal_type(_.for_each([1, 2, 3, 4], cb)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_for_each_right() -> None: results = {"total": 1} def cb(x): results["total"] = x * results["total"] reveal_type(_.for_each_right([1, 2, 3, 4], cb)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_group_by() -> None: reveal_type(_.group_by([{"a": 1, "b": 2}, {"a": 3, "b": 4}], "a")) # R: builtins.dict[Any, builtins.list[builtins.dict[builtins.str, builtins.int]]] reveal_type(_.group_by([{"a": 1, "b": 2}, {"a": 3, "b": 4}], lambda d: d == {"a": 1})) # R: builtins.dict[builtins.bool, builtins.list[builtins.dict[builtins.str, builtins.int]]] @pytest.mark.mypy_testing def test_mypy_includes() -> None: reveal_type(_.includes([1, 2, 3, 4], 2)) # R: builtins.bool reveal_type(_.includes([1, 2, 3, 4], 2, from_index=2)) # R: builtins.bool reveal_type(_.includes({"a": 1, "b": 2, "c": 3, "d": 4}, 2)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_invoke_map() -> None: reveal_type(_.invoke_map([{"a": [{"b": 1}]}, {"a": [{"c": 2}]}], "a[0].items")) # R: builtins.list[Any] @pytest.mark.mypy_testing def test_mypy_key_by() -> None: reveal_type(_.key_by([{"a": 1, "b": 2}, {"a": 3, "b": 4}], "a")) # R: builtins.dict[Any, Any] reveal_type(_.key_by([{"a": 1, "b": 2}, {"a": 3, "b": 4}], lambda d: d["a"])) # R: builtins.dict[builtins.int, builtins.dict[builtins.str, builtins.int]] @pytest.mark.mypy_testing def test_mypy_map_() -> None: reveal_type(_.map_([1, 2, 3, 4], str)) # R: builtins.list[builtins.str] reveal_type(_.map_([{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}], "a")) # R: builtins.list[Any] @pytest.mark.mypy_testing def test_mypy_nest() -> None: reveal_type(_.nest({"a": 1})) # R: Any reveal_type(_.nest([0, 1])) # R: Any reveal_type(_.nest({"a": 1}, "a")) # R: Any @pytest.mark.mypy_testing def test_mypy_order_by() -> None: reveal_type(_.order_by([{"a": 2, "b": 1}, {"a": 3, "b": 2}, {"a": 1, "b": 3}], ["b", "a"])) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] reveal_type(_.order_by([{"a": 2, "b": 1}, {"a": 3, "b": 2}, {"a": 1, "b": 3}], ["a", "b"], [False, True])) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] @pytest.mark.mypy_testing def test_mypy_partition() -> None: reveal_type(_.partition([1, 2, 3, 4], lambda x: x >= 3)) # R: builtins.list[builtins.list[builtins.int]] @pytest.mark.mypy_testing def test_mypy_pluck() -> None: reveal_type(_.pluck([{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}], "a")) # R: builtins.list[Any] reveal_type(_.pluck([[[0, 1]], [[2, 3]], [[4, 5]]], "0.1")) # R: builtins.list[Any] reveal_type(_.pluck([{"a": {"b": [0, 1]}}, {"a": {"b": [2, 3]}}], ["a", "b", 1])) # R: builtins.list[Any] @pytest.mark.mypy_testing def test_mypy_reduce_() -> None: reveal_type(_.reduce_([1, 2, 3, 4], lambda total, x: total * x)) # R: builtins.int reveal_type(_.reduce_(["a", "b", "c"], lambda x, y: x + 1 if y == "b" else x, 1)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_reduce_right() -> None: reveal_type(_.reduce_right([1, 2, 3, 4], lambda total, x: total * x)) # R: builtins.int reveal_type(_.reduce_right(["a", "b", "c"], lambda x, y: x + 1 if y == "b" else x, 1)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_reductions() -> None: reveal_type(_.reductions([1, 2, 3, 4], lambda total, x: total * x)) # R: builtins.list[builtins.int] reveal_type(_.reductions(["a", "b", "c"], lambda x, y: x + 1 if y == "b" else x, 1)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_reductions_right() -> None: reveal_type(_.reductions_right([1, 2, 3, 4], lambda total, x: total * x)) # R: builtins.list[builtins.int] reveal_type(_.reductions_right(["a", "b", "c"], lambda x, y: x + 1 if y == "b" else x, 1)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_reject() -> None: reveal_type(_.reject([1, 2, 3, 4], lambda x: x >= 3)) # R: builtins.list[builtins.int] reveal_type(_.reject([{"a": 0}, {"a": 1}, {"a": 2}], "a")) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] reveal_type(_.reject([{"a": 0}, {"a": 1}, {"a": 2}], {"a": 1})) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] @pytest.mark.mypy_testing def test_mypy_sample() -> None: reveal_type(_.sample([1, 2, 3, 4, 5])) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_sample_size() -> None: reveal_type(_.sample_size([1, 2, 3, 4, 5], 2)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_shuffle() -> None: reveal_type(_.shuffle([1, 2, 3, 4])) # R: builtins.list[builtins.int] reveal_type(_.shuffle({1: "a", 2: "b"})) # R: builtins.list[builtins.str] @pytest.mark.mypy_testing def test_mypy_size() -> None: reveal_type(_.size([1, 2, 3, 4])) # R: builtins.int reveal_type(_.size({"a": 1, "b": 1})) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_some() -> None: reveal_type(_.some([False, True, 0])) # R: builtins.bool reveal_type(_.some([1, 2, 3, 4], lambda x: x >= 3)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_sort_by() -> None: reveal_type(_.sort_by({"a": 2, "b": 3, "c": 1})) # R: builtins.list[builtins.int] reveal_type(_.sort_by({"a": 2, "b": 3, "c": 1}, reverse=True)) # R: builtins.list[builtins.int] reveal_type(_.sort_by([{"a": 2}, {"a": 3}, {"a": 1}], "a")) # R: builtins.list[builtins.dict[builtins.str, builtins.int]] pydash-8.0.3/tests/pytest_mypy_testing/test_functions.py000066400000000000000000000232161464745015500237570ustar00rootroot00000000000000import typing as t import pytest import pydash as _ @pytest.mark.mypy_testing def test_mypy_after() -> None: def func(a: int, b: t.Dict[str, int], c: bytes) -> str: return f"{a} {b} {c!r}" after_func = _.after(func, 1) reveal_type(after_func) # R: pydash.functions.After[[a: builtins.int, b: builtins.dict[builtins.str, builtins.int], c: builtins.bytes], builtins.str] reveal_type(after_func(1, {}, b"")) # R: Union[builtins.str, None] @pytest.mark.mypy_testing def test_mypy_ary() -> None: def func(a: int, b: int, c: int = 0, d: int = 5) -> t.Tuple[int, int, int, int]: return (a, b, c, d) ary_func = _.ary(func, 2) reveal_type(ary_func(1, 2, 3, 4, 5, 6)) # R: Tuple[builtins.int, builtins.int, builtins.int, builtins.int] reveal_type(ary_func(1, 2, 3, 4, 5, 6, c=10, d=20)) # R: Tuple[builtins.int, builtins.int, builtins.int, builtins.int] @pytest.mark.mypy_testing def test_mypy_before() -> None: def func(a: int, b: int, c: int) -> t.Tuple[int, int, int]: return (a, b, c) before_func = _.before(func, 3) reveal_type(before_func(1, 2, 3)) # R: Union[Tuple[builtins.int, builtins.int, builtins.int], None] @pytest.mark.mypy_testing def test_mypy_conjoin() -> None: def up_3(x: int) -> bool: return x > 3 def is_int(x: t.Any) -> int: return isinstance(x, int) conjoiner = _.conjoin(is_int, up_3) reveal_type(conjoiner([1, 2, 3])) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_curry() -> None: def func(a: int, b: str, c: bytes) -> t.Tuple[int, str, bytes]: return (a, b, c) currier = _.curry(func) reveal_type(currier) # R: pydash.functions.CurryThree[builtins.int, builtins.str, builtins.bytes, Tuple[builtins.int, builtins.str, builtins.bytes]] currier1 = currier(1) reveal_type(currier1) # R: pydash.functions.CurryTwo[builtins.str, builtins.bytes, Tuple[builtins.int, builtins.str, builtins.bytes]] currier2 = currier1("hi") reveal_type(currier2) # R: pydash.functions.CurryOne[builtins.bytes, Tuple[builtins.int, builtins.str, builtins.bytes]] currier3 = currier2(b"hi again") reveal_type(currier3) # R: Tuple[builtins.int, builtins.str, builtins.bytes] @pytest.mark.mypy_testing def test_mypy_curry_right() -> None: def func(a: int, b: str, c: bytes) -> t.Tuple[int, str, bytes]: return (a, b, c) currier = _.curry_right(func) reveal_type(currier) # R: pydash.functions.CurryRightThree[builtins.bytes, builtins.str, builtins.int, Tuple[builtins.int, builtins.str, builtins.bytes]] currier1 = currier(b"hi again") reveal_type(currier1) # R: pydash.functions.CurryRightTwo[builtins.str, builtins.int, Tuple[builtins.int, builtins.str, builtins.bytes]] currier2 = currier1("hi") reveal_type(currier2) # R: pydash.functions.CurryRightOne[builtins.int, Tuple[builtins.int, builtins.str, builtins.bytes]] currier3 = currier2(1) reveal_type(currier3) # R: Tuple[builtins.int, builtins.str, builtins.bytes] @pytest.mark.mypy_testing def test_mypy_debounce() -> None: def func(a: int, b: str) -> t.Tuple[int, str]: return (a, b) debounced = _.debounce(func, 5000) reveal_type(debounced) # R: pydash.functions.Debounce[[a: builtins.int, b: builtins.str], Tuple[builtins.int, builtins.str]] reveal_type(debounced(5, "hi")) # R: Tuple[builtins.int, builtins.str] @pytest.mark.mypy_testing def test_mypy_delay() -> None: def func(a: int, b: str) -> t.Tuple[int, str]: return (a, b) reveal_type(_.delay(func, 0, 5, "hi")) # R: Tuple[builtins.int, builtins.str] @pytest.mark.mypy_testing def test_mypy_disjoin() -> None: def is_float(x: t.Any) -> bool: return isinstance(x, float) def is_int(x: t.Any) -> bool: return isinstance(x, int) disjoiner = _.disjoin(is_float, is_int) reveal_type(disjoiner) # R: pydash.functions.Disjoin[Any] reveal_type(disjoiner([1, '2', '3'])) # R: builtins.bool reveal_type(disjoiner([1.0, '2', '3'])) # R: builtins.bool reveal_type(disjoiner(['1', '2', '3'])) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_flip() -> None: def func(a: int, b: str, c: bytes) -> t.Tuple[int, str, bytes]: return (a, b, c) reveal_type(_.flip(func)) # R: def (builtins.bytes, builtins.str, builtins.int) -> Tuple[builtins.int, builtins.str, builtins.bytes] @pytest.mark.mypy_testing def test_mypy_flow() -> None: def mult_5(x: int) -> int: return x * 5 def div_10(x: int) -> float: return x / 10.0 def pow_2(x: float) -> float: return x ** 2 def sum_list(x: t.List[int]) -> int: return sum(x) ops = _.flow(sum_list, mult_5, div_10, pow_2) reveal_type(ops) # R: pydash.functions.Flow[[x: builtins.list[builtins.int]], builtins.float] reveal_type(ops([1, 2, 3, 4])) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_flow_right() -> None: def mult_5(x: float) -> float: return x * 5 def div_10(x: int) -> float: return x / 10 def pow_2(x: int) -> int: return x ** 2 def sum_list(x: t.List[int]) -> int: return sum(x) ops = _.flow_right(mult_5, div_10, pow_2, sum_list) reveal_type(ops) # R: pydash.functions.Flow[[x: builtins.list[builtins.int]], builtins.float] reveal_type(ops([1, 2, 3, 4])) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_iterated() -> None: def double(x: int) -> int: return x * 2 doubler = _.iterated(double) reveal_type(doubler) # R: pydash.functions.Iterated[builtins.int] reveal_type(doubler(4, 5)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_juxtapose() -> None: def double(x: int) -> int: return x * 2 def triple(x: int) -> int: return x * 3 def quadruple(x: int) -> int: return x * 4 f = _.juxtapose(double, triple, quadruple) reveal_type(f) # R: pydash.functions.Juxtapose[[x: builtins.int], builtins.int] reveal_type(f(5)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_negate() -> None: def is_number(x: t.Any) -> bool: return isinstance(x, (int, float)) not_is_number = _.negate(is_number) reveal_type(not_is_number) # R: pydash.functions.Negate[[x: Any]] reveal_type(not_is_number('1')) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_once() -> None: def first_arg(*args: int) -> int: return args[0] oncer = _.once(first_arg) reveal_type(oncer) # R: pydash.functions.Once[[*args: builtins.int], builtins.int] reveal_type(oncer(6)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_over_args() -> None: def squared(x: int) -> int: return x ** 2 def double(x: int) -> int: return x * 2 def in_list(x: int, y: int) -> t.List[int]: return [x, y] modder = _.over_args(in_list, squared, double) reveal_type(modder) # R: def (builtins.int, builtins.int) -> builtins.list[builtins.int] reveal_type(modder(5, 10)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_partial() -> None: def cut(array: t.List[int], n: int) -> t.List[int]: return array[n:] dropper = _.partial(cut, [1, 2, 3, 4]) reveal_type(dropper) # R: pydash.functions.Partial[builtins.list[builtins.int]] reveal_type(dropper(2)) # R: builtins.list[builtins.int] myrest = _.partial(cut, n=1) reveal_type(myrest) # R: pydash.functions.Partial[builtins.list[builtins.int]] reveal_type(myrest([1, 2, 3, 4])) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_partial_right() -> None: def cut(array: t.List[int], n: int) -> t.List[int]: return array[n:] myrest = _.partial_right(cut, 1) reveal_type(myrest) # R: pydash.functions.Partial[builtins.list[builtins.int]] reveal_type(myrest([1, 2, 3, 4])) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_rearg() -> None: def func(x: int, y: int) -> t.List[int]: return [x, y] jumble = _.rearg(func, 1, 2, 3) reveal_type(jumble) # R: pydash.functions.Rearg[[x: builtins.int, y: builtins.int], builtins.list[builtins.int]] reveal_type(jumble(1, 2)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_spread() -> None: def greet_people(*people: str) -> str: return 'Hello ' + ', '.join(people) + '!' greet = _.spread(greet_people) reveal_type(greet) # R: pydash.functions.Spread[builtins.str] reveal_type(greet(['Mike', 'Don', 'Leo'])) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_throttle() -> None: def func(x: int) -> int: return x throttled = _.throttle(func, 0) reveal_type(throttled) # R: pydash.functions.Throttle[[x: builtins.int], builtins.int] reveal_type(throttled(5)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_unary() -> None: def func(a: int, b: int = 1, c: int = 0, d: int = 5) -> t.Tuple[int, int, int, int]: return (a, b, c, d) unary_func = _.unary(func) reveal_type(unary_func) # R: pydash.functions.Ary[Tuple[builtins.int, builtins.int, builtins.int, builtins.int]] reveal_type(unary_func(1, 2, 3, 4, 5, 6)) # R: Tuple[builtins.int, builtins.int, builtins.int, builtins.int] reveal_type(unary_func(1, 2, 3, 4, 5, 6, c=10, d=20)) # R: Tuple[builtins.int, builtins.int, builtins.int, builtins.int] @pytest.mark.mypy_testing def test_mypy_wrap() -> None: def as_tuple(x: str, y: int) -> t.Tuple[str, int]: return (x, y) wrapper = _.wrap('hello', as_tuple) reveal_type(wrapper) # R: pydash.functions.Partial[Tuple[builtins.str, builtins.int]] reveal_type(wrapper(1)) # R: Tuple[builtins.str, builtins.int] pydash-8.0.3/tests/pytest_mypy_testing/test_numerical.py000066400000000000000000000127161464745015500237310ustar00rootroot00000000000000import math import typing as t import pytest import pydash as _ @pytest.mark.mypy_testing def test_mypy_add() -> None: reveal_type(_.add(10, 5)) # R: builtins.int reveal_type(_.add(10.1, 5)) # R: builtins.float reveal_type(_.add(10, 5.5)) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_sum_() -> None: reveal_type(_.sum_([1, 2, 3, 4])) # R: builtins.int reveal_type(_.sum_([1.5, 2, 3, 4])) # R: builtins.float reveal_type(_.sum_({"hello": 1, "bye": 2})) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_sum_by() -> None: reveal_type(_.sum_by([1, 2, 3, 4], lambda x: x ** 2)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_mean() -> None: reveal_type(_.mean([1, 2, 3, 4])) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_mean_by() -> None: reveal_type(_.mean_by([1, 2, 3, 4], lambda x: x ** 2)) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_ceil() -> None: reveal_type(_.ceil(3.275)) # R: builtins.float reveal_type(_.ceil(3.215, 1)) # R: builtins.float reveal_type(_.ceil(6.004, 2)) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_clamp() -> None: reveal_type(_.clamp(-10, -5, 5)) # R: builtins.int reveal_type(_.clamp(10, -5, 5)) # R: builtins.int reveal_type(_.clamp(10, 5)) # R: builtins.int reveal_type(_.clamp(-10, 5)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_divide() -> None: reveal_type(_.divide(20, 5)) # R: builtins.float reveal_type(_.divide(1.5, 3)) # R: builtins.float reveal_type(_.divide(None, None)) # R: builtins.float reveal_type(_.divide(5, None)) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_floor() -> None: reveal_type(_.floor(3.75)) # R: builtins.float reveal_type(_.floor(3.215, 1)) # R: builtins.float reveal_type(_.floor(0.046, 2)) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_max_() -> None: reveal_type(_.max_([1, 2, 3, 4])) # R: builtins.int empty_int_list: t.List[int] = [] reveal_type(_.max_(empty_int_list, default=-1)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_max_by() -> None: def floor(x: float) -> int: return math.floor(x) reveal_type(_.max_by([1.0, 1.5, 1.8], floor)) # R: builtins.float reveal_type(_.max_by([{'a': 1}, {'a': 2}, {'a': 3}], 'a')) # R: builtins.dict[builtins.str, builtins.int] empty_int_list: t.List[int] = [] reveal_type(_.max_by(empty_int_list, default=-1)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_median() -> None: reveal_type(_.median([1, 2, 3, 4, 5])) # R: Union[builtins.float, builtins.int] reveal_type(_.median([1, 2, 3, 4])) # R: Union[builtins.float, builtins.int] @pytest.mark.mypy_testing def test_mypy_min_() -> None: reveal_type(_.min_([1, 2, 3, 4])) # R: builtins.int empty_int_list: t.List[int] = [] reveal_type(_.min_(empty_int_list, default=100)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_min_by() -> None: def floor(x: float) -> int: return math.floor(x) reveal_type(_.min_by([1.8, 1.5, 1.0], floor)) # R: builtins.float reveal_type(_.min_by([{'a': 1}, {'a': 2}, {'a': 3}], 'a')) # R: builtins.dict[builtins.str, builtins.int] empty_int_list: t.List[int] = [] reveal_type(_.min_by(empty_int_list, default=100)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_moving_mean() -> None: reveal_type(_.moving_mean(range(10), 1)) # R: builtins.list[builtins.float] @pytest.mark.mypy_testing def test_mypy_multiply() -> None: reveal_type(_.multiply(4, 5)) # R: builtins.int reveal_type(_.multiply(10.5, 4)) # R: builtins.float reveal_type(_.multiply(10, 4.5)) # R: builtins.float reveal_type(_.multiply(None, 10)) # R: builtins.int reveal_type(_.multiply(10, None)) # R: builtins.int reveal_type(_.multiply(None, 5.5)) # R: builtins.float reveal_type(_.multiply(5.5, None)) # R: builtins.float reveal_type(_.multiply(None, None)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_power() -> None: reveal_type(_.power(5, 2)) # R: Union[builtins.int, builtins.float] reveal_type(_.power(12.5, 3)) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_round_() -> None: reveal_type(_.round_(3.275)) # R: builtins.float reveal_type(_.round_(3.275, 1)) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_scale() -> None: reveal_type(_.scale([1, 2, 3, 4])) # R: builtins.list[builtins.float] reveal_type(_.scale([1, 2, 3, 4], 1)) # R: builtins.list[builtins.float] @pytest.mark.mypy_testing def test_mypy_slope() -> None: reveal_type(_.slope((1, 2), (4, 8))) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_std_deviation() -> None: reveal_type(_.std_deviation([1, 18, 20, 4])) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_subtract() -> None: reveal_type(_.subtract(10, 5)) # R: builtins.int reveal_type(_.subtract(-10, 4)) # R: builtins.int reveal_type(_.subtract(2, 0.5)) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_transpose() -> None: reveal_type(_.transpose([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) # R: builtins.list[builtins.list[builtins.int]] @pytest.mark.mypy_testing def test_mypy_variance() -> None: reveal_type(_.variance([1, 18, 20, 4])) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_var() -> None: reveal_type(_.zscore([1, 2, 3])) # R: builtins.list[builtins.float] pydash-8.0.3/tests/pytest_mypy_testing/test_objects.py000066400000000000000000000337111464745015500234010ustar00rootroot00000000000000import typing as t import pytest import pydash as _ class MyClass: def __init__(self) -> None: self.x = 5 self.lst: t.List[int] = [] def get_x(self) -> int: return self.x @pytest.mark.mypy_testing def test_mypy_assign() -> None: obj = {b"d": 5.5} reveal_type(_.assign(obj, {"a": 1}, {"b": 2}, {"c": 3})) # R: builtins.dict[Union[builtins.bytes, builtins.str], Union[builtins.float, builtins.int]] @pytest.mark.mypy_testing def test_mypy_assign_with() -> None: def customizer(x: t.Union[int, None], y: int) -> float: return float(y) if x is None else float(x + y) reveal_type(_.assign_with({"a": 1}, {"b": 2}, {"a": 3}, customizer=customizer)) # R: builtins.dict[builtins.str, Union[builtins.int, builtins.float]] @pytest.mark.mypy_testing def test_mypy_callables() -> None: reveal_type(_.callables({"a": 1, "b": lambda: 2, "c": lambda: 3})) # R: builtins.list[builtins.str] @pytest.mark.mypy_testing def test_mypy_clone() -> None: reveal_type(_.clone({"hello": 5})) # R: builtins.dict[builtins.str, builtins.int] @pytest.mark.mypy_testing def test_mypy_clone_with() -> None: x: t.Dict[str, t.Union[int, t.Dict[str, int]]] = {"a": 1, "b": 2, "c": {"d": 3}} def cbk(v: t.Union[int, t.Dict[str, int]], k: t.Union[str, None]) -> t.Union[int, None]: return v + 2 if isinstance(v, int) and k else None reveal_type(_.clone_with(x, cbk)) # R: builtins.dict[builtins.str, Union[builtins.int, builtins.dict[builtins.str, builtins.int], None]] @pytest.mark.mypy_testing def test_mypy_clone_deep() -> None: x: t.Dict[str, t.Union[int, t.Dict[str, int]]] = {"a": 1, "b": 2, "c": {"d": 3}} reveal_type(_.clone_deep(x)) # R: builtins.dict[builtins.str, Union[builtins.int, builtins.dict[builtins.str, builtins.int]]] @pytest.mark.mypy_testing def test_mypy_clone_deep_with() -> None: x: t.Dict[str, t.Union[int, t.Dict[str, int]]] = {"a": 1, "b": 2, "c": {"d": 3}} def cbk(v: t.Union[int, t.Dict[str, int]], k: t.Union[str, None]) -> t.Union[int, None]: return v + 2 if isinstance(v, int) and k else None reveal_type(_.clone_deep_with(x, cbk)) # R: builtins.dict[builtins.str, Union[builtins.int, builtins.dict[builtins.str, builtins.int], None]] @pytest.mark.mypy_testing def test_mypy_defaults() -> None: obj = {"a": 1} reveal_type(_.defaults(obj, {"b": 2}, {"c": 3}, {"a": 4})) # R: builtins.dict[builtins.str, builtins.int] @pytest.mark.mypy_testing def test_mypy_defaults_deep() -> None: obj = {"a": {"b": 1}} reveal_type(_.defaults_deep(obj, {"a": {"b": 2, "c": 3}})) # R: builtins.dict[builtins.str, builtins.dict[builtins.str, builtins.int]] @pytest.mark.mypy_testing def test_mypy_find_key() -> None: def is_one(x: int) -> bool: return x == 1 reveal_type(_.find_key({"a": 1, "b": 2, "c": 3}, is_one)) # R: Union[builtins.str, None] reveal_type(_.find_key([1, 2, 3, 4], is_one)) # R: Union[builtins.int, None] @pytest.mark.mypy_testing def test_mypy_find_last_key() -> None: def is_one(x: int) -> bool: return x == 1 reveal_type(_.find_last_key({"a": 1, "b": 2, "c": 3}, is_one)) # R: Union[builtins.str, None] reveal_type(_.find_last_key([1, 2, 3, 1], is_one)) # R: Union[builtins.int, None] @pytest.mark.mypy_testing def test_mypy_for_in() -> None: def cb(v: int, k: str) -> None: return None reveal_type(_.for_in({"a": 1, "b": 2, "c": 3}, cb)) # R: builtins.dict[builtins.str, builtins.int] @pytest.mark.mypy_testing def test_mypy_for_in_right() -> None: def cb(v: int) -> None: return None reveal_type(_.for_in_right([1, 2, 3, 4], cb)) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_get() -> None: reveal_type(_.get({}, "a.b.c")) # R: Any reveal_type(_.get({"a": {"b": {"c": [1, 2, 3, 4]}}}, "a.b.c[1]")) # R: Any reveal_type(_.get({"a": {"b": [0, {"c": [1, 2]}]}}, "a.b.1.c.2")) # R: Any reveal_type(_.get(["a", "b"], 0)) # R: Union[builtins.str, None] reveal_type(_.get(["a", "b"], 0, "c")) # R: builtins.str reveal_type(_.get(MyClass(), "x")) # R: Any @pytest.mark.mypy_testing def test_mypy_has() -> None: reveal_type(_.has([1, 2, 3], 1)) # R: builtins.bool reveal_type(_.has({"a": 1, "b": 2}, "b")) # R: builtins.bool reveal_type(_.has(MyClass(), "get_x")) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_invert() -> None: reveal_type(_.invert({"a": 1, "b": 2, "c": 3})) # R: builtins.dict[builtins.int, builtins.str] @pytest.mark.mypy_testing def test_mypy_invert_by() -> None: obj = {"a": 1, "b": 2, "c": 1} reveal_type(_.invert_by(obj)) # R: builtins.dict[builtins.int, builtins.list[builtins.str]] def group_prefix(x: int) -> str: return "group" + str(x) reveal_type(_.invert_by(obj, group_prefix)) # R: builtins.dict[builtins.str, builtins.list[builtins.str]] @pytest.mark.mypy_testing def test_mypy_invoke() -> None: obj = {"a": [{"b": {"c": [1, 2, 3, 4]}}]} reveal_type(_.invoke(obj, "a[0].b.c.pop", 1)) # R: Any reveal_type(_.invoke(MyClass(), "get_x")) # R: Any @pytest.mark.mypy_testing def test_mypy_keys() -> None: reveal_type(_.keys([1, 2, 3])) # R: builtins.list[builtins.int] reveal_type(_.keys({"a": 1, "b": 2})) # R: builtins.list[builtins.str] reveal_type(_.keys(MyClass())) # R: builtins.list[Any] @pytest.mark.mypy_testing def test_mypy_map_keys() -> None: def callback(value: int, key: str) -> str: return key * 2 reveal_type(_.map_keys({"a": 1, "b": 2, "c": 3}, callback)) # R: builtins.dict[builtins.str, builtins.int] @pytest.mark.mypy_testing def test_mypy_map_values() -> None: def times_two(x: int) -> str: return str(x * 2) reveal_type(_.map_values({"a": 1, "b": 2, "c": 3}, times_two)) # R: builtins.dict[builtins.str, builtins.str] reveal_type(_.map_values({"a": 1, "b": {"d": 4}, "c": 3}, {"d": 4})) # R: builtins.dict[Any, Any] @pytest.mark.mypy_testing def test_mypy_map_values_deep() -> None: x = {"a": 1, "b": {"c": 2}} def times_two(x: int) -> int: return x * 2 reveal_type(_.map_values_deep(x, times_two)) # R: Any @pytest.mark.mypy_testing def test_mypy_merge() -> None: obj = {"a": 2} reveal_type(_.merge(obj, {"a": 1}, {"b": 2, "c": 3}, {"d": 4})) # R: builtins.dict[builtins.str, builtins.int] @pytest.mark.mypy_testing def test_mypy_merge_with() -> None: cbk = lambda obj_val, src_val: obj_val + src_val obj1 = {"a": [1], "b": [2]} obj2 = {"a": [3], "b": [4]} reveal_type(_.merge_with(obj1, obj2, cbk)) # R: Any @pytest.mark.mypy_testing def test_mypy_omit() -> None: reveal_type(_.omit({"a": 1, "b": 2, "c": 3}, "b", "c")) # R: builtins.dict[builtins.str, builtins.int] reveal_type(_.omit({"a": 1, "b": 2, "c": 3}, ["a", "c"])) # R: builtins.dict[builtins.str, builtins.int] reveal_type(_.omit([1, 2, 3, 4], 0, 3)) # R: builtins.dict[builtins.int, builtins.int] reveal_type(_.omit({"a": {"b": {"c": "d"}}}, "a.b.c")) # R: builtins.dict[builtins.str, builtins.dict[builtins.str, builtins.dict[builtins.str, builtins.str]]] reveal_type(_.omit(MyClass(), "x")) # R: builtins.dict[Any, Any] @pytest.mark.mypy_testing def test_mypy_omit_by() -> None: def is_int(v: t.Union[str, int]) -> bool: return isinstance(v, int) obj: t.Dict[str, t.Union[str, int]] = {"a": 1, "b": "2", "c": 3} reveal_type(_.omit_by(obj, is_int)) # R: builtins.dict[builtins.str, Union[builtins.str, builtins.int]] reveal_type(_.omit_by([1, 2, 3, 4], is_int)) # R: builtins.dict[builtins.int, builtins.int] reveal_type(_.omit_by(MyClass(), is_int)) # R: builtins.dict[Any, Any] @pytest.mark.mypy_testing def test_mypy_parse_int() -> None: reveal_type(_.parse_int("5")) # R: Union[builtins.int, None] reveal_type(_.parse_int("12", 8)) # R: Union[builtins.int, None] @pytest.mark.mypy_testing def test_mypy_pick() -> None: reveal_type(_.pick({"a": 1, "b": 2, "c": 3}, "a", "b")) # R: builtins.dict[builtins.str, builtins.int] reveal_type(_.pick(MyClass(), "x")) # R: builtins.dict[Any, Any] @pytest.mark.mypy_testing def test_mypy_pick_by() -> None: def is_int(v: t.Union[int, str]) -> bool: return isinstance(v, int) obj: t.Dict[str, t.Union[int, str]] = {"a": 1, "b": "2", "c": 3} reveal_type(_.pick_by(obj, is_int)) # R: builtins.dict[builtins.str, Union[builtins.int, builtins.str]] reveal_type(_.pick(MyClass(), lambda v: isinstance(v, int))) # R: builtins.dict[Any, Any] @pytest.mark.mypy_testing def test_mypy_rename_keys() -> None: reveal_type(_.rename_keys({"a": 1, "b": 2, "c": 3}, {"a": "A", "b": "B"})) # R: builtins.dict[builtins.str, builtins.int] @pytest.mark.mypy_testing def test_mypy_set_() -> None: reveal_type(_.set_({}, "a.b.c", 1)) # R: builtins.dict[Never, Never] reveal_type(_.set_(MyClass(), "x", 10)) # R: tests.pytest_mypy_testing.test_objects.MyClass @pytest.mark.mypy_testing def test_mypy_set_with() -> None: reveal_type(_.set_with({}, "[0][1]", "a", lambda: {})) # R: builtins.dict[Never, Never] reveal_type(_.set_with(MyClass(), "x", lambda: 10)) # R: tests.pytest_mypy_testing.test_objects.MyClass @pytest.mark.mypy_testing def test_mypy_to_boolean() -> None: reveal_type(_.to_boolean("true")) # R: Union[builtins.bool, None] @pytest.mark.mypy_testing def test_mypy_to_dict() -> None: obj = {"a": 1, "b": 2} reveal_type(_.to_dict(obj)) # R: builtins.dict[builtins.str, builtins.int] reveal_type(_.to_dict(MyClass())) # R: builtins.dict[Any, Any] reveal_type(_.to_dict([1, 2, 3, 4])) # R: builtins.dict[builtins.int, builtins.int] reveal_type(_.to_dict([(1, 2), (3, 4)])) # R: builtins.dict[builtins.int, Tuple[builtins.int, builtins.int]] @pytest.mark.mypy_testing def test_mypy_to_integer() -> None: reveal_type(_.to_integer(3.2)) # R: builtins.int reveal_type(_.to_integer("3.2")) # R: builtins.int reveal_type(_.to_integer("invalid")) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_to_list() -> None: reveal_type(_.to_list({"a": 1, "b": 2, "c": 3})) # R: builtins.list[builtins.int] reveal_type(_.to_list((1, 2, 3, 4))) # R: builtins.list[builtins.int] reveal_type(_.to_list(1)) # R: builtins.list[builtins.int] reveal_type(_.to_list([1])) # R: builtins.list[builtins.int] reveal_type(_.to_list(a for a in [1, 2, 3])) # R: builtins.list[builtins.int] reveal_type(_.to_list("cat")) # R: builtins.list[builtins.str] reveal_type(_.to_list("cat", split_strings=False)) # R: builtins.list[builtins.str] @pytest.mark.mypy_testing def test_mypy_to_number() -> None: reveal_type(_.to_number("1234.5678")) # R: Union[builtins.float, None] reveal_type(_.to_number("1234.5678", 4)) # R: Union[builtins.float, None] reveal_type(_.to_number(1, 2)) # R: Union[builtins.float, None] @pytest.mark.mypy_testing def test_mypy_to_pairs() -> None: reveal_type(_.to_pairs([1, 2, 3, 4])) # R: builtins.list[Tuple[builtins.int, builtins.int]] reveal_type(_.to_pairs({"a": 1})) # R: builtins.list[Tuple[builtins.str, builtins.int]] reveal_type(_.to_pairs(MyClass())) # R: builtins.list[Any] @pytest.mark.mypy_testing def test_mypy_to_string() -> None: reveal_type(_.to_string(1)) # R: builtins.str reveal_type(_.to_string(None)) # R: builtins.str reveal_type(_.to_string([1, 2, 3])) # R: builtins.str reveal_type(_.to_string("a")) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_transform() -> None: def build_list(acc: t.List[t.Tuple[int, int]], v: int, k: int) -> None: return acc.append((k, v)) base_list: t.List[t.Tuple[int, int]] = [] reveal_type(_.transform([1, 2, 3, 4], build_list, base_list)) # R: builtins.list[Tuple[builtins.int, builtins.int]] @pytest.mark.mypy_testing def test_mypy_update() -> None: reveal_type(_.update({}, ["a", "b"], lambda value: value)) # R: builtins.dict[Any, Any] reveal_type(_.update([], [0, 0], lambda value: 1)) # R: builtins.list[Any] reveal_type(_.update(MyClass(), "x", lambda value: 10)) # R: tests.pytest_mypy_testing.test_objects.MyClass @pytest.mark.mypy_testing def test_mypy_update_with() -> None: reveal_type(_.update_with({}, "[0][1]", lambda x: "a", lambda x: {})) # R: builtins.dict[Any, Any] reveal_type(_.update_with([], [0, 0], lambda x: 1, lambda x: [])) # R: builtins.list[Any] reveal_type(_.update_with(MyClass(), "lst.0", lambda value: 10, lambda x: [])) # R: tests.pytest_mypy_testing.test_objects.MyClass @pytest.mark.mypy_testing def test_mypy_unset() -> None: reveal_type(_.unset({"a": [{"b": {"c": 7}}]}, "a[0].b.c")) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_values() -> None: reveal_type(_.values({"a": "a", "b": "b", "c": "c"})) # R: builtins.list[builtins.str] reveal_type(_.values({"a": 1, "b": 2, "c": 3})) # R: builtins.list[builtins.int] reveal_type(_.values([2, 4, 6, 8])) # R: builtins.list[builtins.int] reveal_type(_.values(MyClass())) # R: builtins.list[Any] @pytest.mark.mypy_testing def test_mypy_apply() -> None: reveal_type(_.apply("1", lambda x: int(x))) # R: builtins.int reveal_type(_.apply(1, lambda x: x + 1)) # R: builtins.int reveal_type(_.apply("hello", lambda x: x.upper())) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_apply_if() -> None: reveal_type(_.apply_if("5", lambda x: int(x), lambda x: x.isdecimal())) # R: Union[builtins.str, builtins.int] @pytest.mark.mypy_testing def test_mypy_apply_if_not_none() -> None: reveal_type(_.apply_if_not_none(1, lambda x: x + 1)) # R: Union[builtins.int, None] reveal_type(_.apply_if_not_none(None, lambda x: x + 1)) # R: Union[builtins.int, None] reveal_type(_.apply_if_not_none("hello", lambda x: x.upper())) # R: Union[builtins.str, None] @pytest.mark.mypy_testing def test_mypy_apply_catch() -> None: reveal_type(_.apply_catch(5, lambda x: x / 0, [ZeroDivisionError])) # R: Union[builtins.int, builtins.float] reveal_type(_.apply_catch(5, lambda x: x / 0, [ZeroDivisionError], "error")) # R: Union[builtins.float, builtins.str] pydash-8.0.3/tests/pytest_mypy_testing/test_predicates.py000066400000000000000000000250421464745015500240710ustar00rootroot00000000000000import typing as t import pytest import pydash as _ @pytest.mark.mypy_testing def test_mypy_eq() -> None: reveal_type(_.eq(None, None)) # R: builtins.bool reveal_type(_.eq(None, '')) # R: builtins.bool reveal_type(_.eq('a', 'a')) # R: builtins.bool reveal_type(_.eq(1, str(1))) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_gt() -> None: reveal_type(_.gt(5, 3)) # R: builtins.bool # _.gt({}, {}) # E: Argument 1 to "gt" has incompatible type "Dict[, ]"; expected "SupportsDunderGT[Dict[, ]]" @pytest.mark.mypy_testing def test_mypy_gte() -> None: reveal_type(_.gte(5, 3)) # R: builtins.bool # _.gte({}, {}) # E: Argument 1 to "gte" has incompatible type "Dict[, ]"; expected "SupportsDunderGE[Dict[, ]]" @pytest.mark.mypy_testing def test_mypy_lt() -> None: reveal_type(_.lt(5, 3)) # R: builtins.bool # _.lt({}, {}) # E: Argument 1 to "lt" has incompatible type "Dict[, ]"; expected "SupportsDunderLT[Dict[, ]]" @pytest.mark.mypy_testing def test_mypy_lte() -> None: reveal_type(_.lte(5, 3)) # R: builtins.bool # _.lte({}, {}) # E: Argument 1 to "lte" has incompatible type "Dict[, ]"; expected "SupportsDunderLE[Dict[, ]]" @pytest.mark.mypy_testing def test_mypy_in_range() -> None: reveal_type(_.in_range(4, 2)) # R: builtins.bool reveal_type(_.in_range(3, 1, 2)) # R: builtins.bool reveal_type(_.in_range(3.5, 2.5)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_associative() -> None: reveal_type(_.is_associative([])) # R: builtins.bool reveal_type(_.is_associative({})) # R: builtins.bool reveal_type(_.is_associative(1)) # R: builtins.bool reveal_type(_.is_associative(True)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_blank() -> None: reveal_type(_.is_blank('')) # R: builtins.bool reveal_type(_.is_blank(' \r\n ')) # R: builtins.bool reveal_type(_.is_blank(False)) # R: builtins.bool x: t.Any = ... if _.is_blank(x): reveal_type(x) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_is_boolean() -> None: reveal_type(_.is_boolean(True)) # R: builtins.bool reveal_type(_.is_boolean(False)) # R: builtins.bool reveal_type(_.is_boolean(0)) # R: builtins.bool x: t.Any = ... if _.is_boolean(x): reveal_type(x) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_builtin() -> None: reveal_type(_.is_builtin(1)) # R: builtins.bool reveal_type(_.is_builtin(list)) # R: builtins.bool reveal_type(_.is_builtin('foo')) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_date() -> None: import datetime reveal_type(_.is_date(datetime.date.today())) # R: builtins.bool reveal_type(_.is_date(datetime.datetime.today())) # R: builtins.bool reveal_type(_.is_date('2014-01-01')) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_decreasing() -> None: reveal_type(_.is_decreasing([5, 4, 4, 3])) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_dict() -> None: reveal_type(_.is_dict({})) # R: builtins.bool reveal_type(_.is_dict([])) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_empty() -> None: reveal_type(_.is_empty(0)) # R: builtins.bool reveal_type(_.is_empty(1)) # R: builtins.bool reveal_type(_.is_empty(True)) # R: builtins.bool reveal_type(_.is_empty('foo')) # R: builtins.bool reveal_type(_.is_empty(None)) # R: builtins.bool reveal_type(_.is_empty({})) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_equal() -> None: reveal_type(_.is_equal([1, 2, 3], [1, 2, 3])) # R: builtins.bool reveal_type(_.is_equal('a', 'A')) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_equal_with() -> None: reveal_type(_.is_equal_with([1, 2, 3], [1, 2, 3], None)) # R: builtins.bool reveal_type(_.is_equal_with('a', 'A', None)) # R: builtins.bool reveal_type(_.is_equal_with('a', 'A', lambda a, b: a.lower() == b.lower())) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_error() -> None: reveal_type(_.is_error(Exception())) # R: builtins.bool reveal_type(_.is_error(Exception)) # R: builtins.bool reveal_type(_.is_error(None)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_even() -> None: reveal_type(_.is_even(2)) # R: builtins.bool reveal_type(_.is_even(False)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_float() -> None: reveal_type(_.is_float(1.0)) # R: builtins.bool reveal_type(_.is_float(1)) # R: builtins.bool x: t.Any = ... if _.is_float(x): reveal_type(x) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_is_function() -> None: reveal_type(_.is_function(list)) # R: builtins.bool reveal_type(_.is_function(lambda: True)) # R: builtins.bool reveal_type(_.is_function(1)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_increasing() -> None: reveal_type(_.is_increasing([1, 3, 5])) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_indexed() -> None: reveal_type(_.is_indexed('')) # R: builtins.bool reveal_type(_.is_indexed([])) # R: builtins.bool reveal_type(_.is_indexed(())) # R: builtins.bool reveal_type(_.is_indexed({})) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_instance_of() -> None: reveal_type(_.is_instance_of({}, dict)) # R: builtins.bool reveal_type(_.is_instance_of({}, list)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_integer() -> None: reveal_type(_.is_integer(1)) # R: builtins.bool reveal_type(_.is_integer(1.0)) # R: builtins.bool reveal_type(_.is_integer(True)) # R: builtins.bool x: t.Any = ... if _.is_integer(x): reveal_type(x) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_is_iterable() -> None: reveal_type(_.is_iterable([])) # R: builtins.bool reveal_type(_.is_iterable({})) # R: builtins.bool reveal_type(_.is_iterable(())) # R: builtins.bool reveal_type(_.is_iterable(5)) # R: builtins.bool reveal_type(_.is_iterable(True)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_json() -> None: reveal_type(_.is_json({})) # R: builtins.bool reveal_type(_.is_json('{}')) # R: builtins.bool reveal_type(_.is_json({"hello": 1, "world": 2})) # R: builtins.bool reveal_type(_.is_json('{"hello": 1, "world": 2}')) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_list() -> None: reveal_type(_.is_list([])) # R: builtins.bool reveal_type(_.is_list({})) # R: builtins.bool reveal_type(_.is_list(())) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_match() -> None: reveal_type(_.is_match({'a': 1, 'b': 2}, {'b': 2})) # R: builtins.bool reveal_type(_.is_match({'a': 1, 'b': 2}, {'b': 3})) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_match_with() -> None: is_greeting = lambda val: val in ('hello', 'hi') customizer = lambda ov, sv: is_greeting(ov) and is_greeting(sv) obj = {'greeting': 'hello'} src = {'greeting': 'hi'} reveal_type(_.is_match_with(obj, src, customizer)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_monotone() -> None: import operator reveal_type(_.is_monotone([1, 1, 2, 3], operator.le)) # R: builtins.bool reveal_type(_.is_monotone([1, 1, 2, 3], operator.lt)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_nan() -> None: reveal_type(_.is_nan('a')) # R: builtins.bool reveal_type(_.is_nan(1)) # R: builtins.bool reveal_type(_.is_nan(1.0)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_negative() -> None: reveal_type(_.is_negative(-1)) # R: builtins.bool reveal_type(_.is_negative(0)) # R: builtins.bool reveal_type(_.is_negative(1)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_none() -> None: reveal_type(_.is_none(None)) # R: builtins.bool reveal_type(_.is_none(False)) # R: builtins.bool x: t.Any = ... if _.is_none(x): reveal_type(x) # R: None @pytest.mark.mypy_testing def test_mypy_is_number() -> None: reveal_type(_.is_number(1)) # R: builtins.bool reveal_type(_.is_number(1.0)) # R: builtins.bool reveal_type(_.is_number('a')) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_object() -> None: reveal_type(_.is_object([])) # R: builtins.bool reveal_type(_.is_object({})) # R: builtins.bool reveal_type(_.is_object(())) # R: builtins.bool reveal_type(_.is_object(1)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_odd() -> None: reveal_type(_.is_odd(3)) # R: builtins.bool reveal_type(_.is_odd('a')) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_positive() -> None: reveal_type(_.is_positive(1)) # R: builtins.bool reveal_type(_.is_positive(0)) # R: builtins.bool reveal_type(_.is_positive(-1)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_reg_exp() -> None: import re reveal_type(_.is_reg_exp(re.compile(''))) # R: builtins.bool reveal_type(_.is_reg_exp('')) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_set() -> None: reveal_type(_.is_set(set([1, 2]))) # R: builtins.bool reveal_type(_.is_set([1, 2, 3])) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_strictly_decreasing() -> None: reveal_type(_.is_strictly_decreasing([4, 3, 2, 1])) # R: builtins.bool reveal_type(_.is_strictly_decreasing([4, 4, 2, 1])) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_strictly_increasing() -> None: reveal_type(_.is_strictly_increasing([1, 2, 3, 4])) # R: builtins.bool reveal_type(_.is_strictly_increasing([1, 1, 3, 4])) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_string() -> None: reveal_type(_.is_string('')) # R: builtins.bool reveal_type(_.is_string(1)) # R: builtins.bool x: t.Any = ... if _.is_string(x): reveal_type(x) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_is_tuple() -> None: reveal_type(_.is_tuple(())) # R: builtins.bool reveal_type(_.is_tuple({})) # R: builtins.bool reveal_type(_.is_tuple([])) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_is_zero() -> None: reveal_type(_.is_zero(0)) # R: builtins.bool reveal_type(_.is_zero(1)) # R: builtins.bool x: t.Any = ... if _.is_zero(x): reveal_type(x) # R: builtins.int pydash-8.0.3/tests/pytest_mypy_testing/test_strings.py000066400000000000000000000317561464745015500234500ustar00rootroot00000000000000import pytest import pydash as _ @pytest.mark.mypy_testing def test_mypy_camel_case() -> None: reveal_type(_.camel_case('FOO BAR_bAz')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_capitalize() -> None: reveal_type(_.capitalize('once upon a TIME')) # R: builtins.str reveal_type(_.capitalize('once upon a TIME', False)) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_chars() -> None: reveal_type(_.chars('onetwo')) # R: builtins.list[builtins.str] @pytest.mark.mypy_testing def test_mypy_chop() -> None: reveal_type(_.chop('abcdefg', 3)) # R: builtins.list[builtins.str] @pytest.mark.mypy_testing def test_mypy_chop_right() -> None: reveal_type(_.chop_right('abcdefg', 3)) # R: builtins.list[builtins.str] @pytest.mark.mypy_testing def test_mypy_clean() -> None: reveal_type(_.clean('a b c d')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_count_substr() -> None: reveal_type(_.count_substr('aabbccddaabbccdd', 'bc')) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_deburr() -> None: reveal_type(_.deburr('déjà vu')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_decapitalize() -> None: reveal_type(_.decapitalize('FOO BAR')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_ends_with() -> None: reveal_type(_.ends_with('abc def', 'def')) # R: builtins.bool reveal_type(_.ends_with('abc def', 4)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_ensure_ends_with() -> None: reveal_type(_.ensure_ends_with('foo bar', '!')) # R: builtins.str reveal_type(_.ensure_ends_with('foo bar!', '!')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_ensure_starts_with() -> None: reveal_type(_.ensure_starts_with('foo bar', 'Oh my! ')) # R: builtins.str reveal_type(_.ensure_starts_with('Oh my! foo bar', 'Oh my! ')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_escape() -> None: reveal_type(_.escape('"1 > 2 && 3 < 4"')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_escape_reg_exp() -> None: reveal_type(_.escape_reg_exp('[()]')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_has_substr() -> None: reveal_type(_.has_substr('abcdef', 'bc')) # R: builtins.bool reveal_type(_.has_substr('abcdef', 'bb')) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_human_case() -> None: reveal_type(_.human_case('abc-def_hij lmn')) # R: builtins.str reveal_type(_.human_case('user_id')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_insert_substr() -> None: reveal_type(_.insert_substr('abcdef', 3, '--')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_join() -> None: reveal_type(_.join(['a', 'b', 'c'])) # R: builtins.str reveal_type(_.join([1, 2, 3, 4], '&')) # R: builtins.str reveal_type(_.join('abcdef', '-')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_kebab_case() -> None: reveal_type(_.kebab_case('a b c_d-e!f')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_lines() -> None: reveal_type(_.lines('a\nb\r\nc')) # R: builtins.list[builtins.str] @pytest.mark.mypy_testing def test_mypy_lower_case() -> None: reveal_type(_.lower_case('fooBar')) # R: builtins.str reveal_type(_.lower_case('--foo-Bar--')) # R: builtins.str reveal_type(_.lower_case('/?*Foo10/;"B*Ar')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_lower_first() -> None: reveal_type(_.lower_first('FRED')) # R: builtins.str reveal_type(_.lower_first('Foo Bar')) # R: builtins.str reveal_type(_.lower_first('1foobar')) # R: builtins.str reveal_type(_.lower_first(';foobar')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_number_format() -> None: reveal_type(_.number_format(1234.5678)) # R: builtins.str reveal_type(_.number_format(1234.5678, 2, ',', '.')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_pad() -> None: reveal_type(_.pad('abc', 5)) # R: builtins.str reveal_type(_.pad('abc', 6, 'x')) # R: builtins.str reveal_type(_.pad('abc', 5, '...')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_pad_end() -> None: reveal_type(_.pad_end('abc', 5)) # R: builtins.str reveal_type(_.pad_end('abc', 5, '.')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_pad_start() -> None: reveal_type(_.pad_start('abc', 5)) # R: builtins.str reveal_type(_.pad_start('abc', 5, '.')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_pascal_case() -> None: reveal_type(_.pascal_case('FOO BAR_bAz')) # R: builtins.str reveal_type(_.pascal_case('FOO BAR_bAz', False)) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_predecessor() -> None: reveal_type(_.predecessor('c')) # R: builtins.str reveal_type(_.predecessor('C')) # R: builtins.str reveal_type(_.predecessor('3')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_prune() -> None: reveal_type(_.prune('Fe fi fo fum', 5)) # R: builtins.str reveal_type(_.prune('Fe fi fo fum', 6)) # R: builtins.str reveal_type(_.prune('Fe fi fo fum', 7)) # R: builtins.str reveal_type(_.prune('Fe fi fo fum', 8, ',,,')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_quote() -> None: reveal_type(_.quote('To be or not to be')) # R: builtins.str reveal_type(_.quote('To be or not to be', "'")) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_reg_exp_js_match() -> None: reveal_type(_.reg_exp_js_match('aaBBcc', '/bb/')) # R: builtins.list[builtins.str] reveal_type(_.reg_exp_js_match('aaBBcc', '/bb/i')) # R: builtins.list[builtins.str] reveal_type(_.reg_exp_js_match('aaBBccbb', '/bb/i')) # R: builtins.list[builtins.str] reveal_type(_.reg_exp_js_match('aaBBccbb', '/bb/gi')) # R: builtins.list[builtins.str] @pytest.mark.mypy_testing def test_mypy_reg_exp_js_replace() -> None: reveal_type(_.reg_exp_js_replace('aaBBcc', '/bb/', 'X')) # R: builtins.str reveal_type(_.reg_exp_js_replace('aaBBcc', '/bb/i', 'X')) # R: builtins.str reveal_type(_.reg_exp_js_replace('aaBBccbb', '/bb/i', 'X')) # R: builtins.str reveal_type(_.reg_exp_js_replace('aaBBccbb', '/bb/gi', 'X')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_reg_exp_replace() -> None: reveal_type(_.reg_exp_replace('aabbcc', 'b', 'X')) # R: builtins.str reveal_type(_.reg_exp_replace('aabbcc', 'B', 'X', ignore_case=True)) # R: builtins.str reveal_type(_.reg_exp_replace('aabbcc', 'b', 'X', count=1)) # R: builtins.str reveal_type(_.reg_exp_replace('aabbcc', '[ab]', 'X')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_repeat() -> None: reveal_type(_.repeat('.', 5)) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_replace() -> None: reveal_type(_.replace('aabbcc', 'b', 'X')) # R: builtins.str reveal_type(_.replace('aabbcc', 'B', 'X', ignore_case=True)) # R: builtins.str reveal_type(_.replace('aabbcc', 'b', 'X', count=1)) # R: builtins.str reveal_type(_.replace('aabbcc', '[ab]', 'X')) # R: builtins.str reveal_type(_.replace('aabbcc', '[ab]', 'X', escape=False)) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_replace_end() -> None: reveal_type(_.replace_end('aabbcc', 'b', 'X')) # R: builtins.str reveal_type(_.replace_end('aabbcc', 'c', 'X')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_replace_start() -> None: reveal_type(_.replace_start('aabbcc', 'b', 'X')) # R: builtins.str reveal_type(_.replace_start('aabbcc', 'a', 'X')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_separator_case() -> None: reveal_type(_.separator_case('a!!b___c.d', '-')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_series_phrase() -> None: reveal_type(_.series_phrase(['apples', 'bananas', 'peaches'])) # R: builtins.str reveal_type(_.series_phrase(['apples', 'bananas', 'peaches'], serial=True)) # R: builtins.str reveal_type(_.series_phrase(['apples', 'bananas', 'peaches'], '; ', ', or ')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_series_phrase_serial() -> None: reveal_type(_.series_phrase_serial(['apples', 'bananas', 'peaches'])) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_slugify() -> None: reveal_type(_.slugify('This is a slug.')) # R: builtins.str reveal_type(_.slugify('This is a slug.', '+')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_snake_case() -> None: reveal_type(_.snake_case('This is Snake Case!')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_split() -> None: reveal_type(_.split('one potato, two potatoes, three potatoes, four!')) # R: builtins.list[builtins.str] reveal_type(_.split('one potato, two potatoes, three potatoes, four!', ',')) # R: builtins.list[builtins.str] @pytest.mark.mypy_testing def test_mypy_start_case() -> None: reveal_type(_.start_case("fooBar")) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_starts_with() -> None: reveal_type(_.starts_with('abcdef', 'a')) # R: builtins.bool reveal_type(_.starts_with('abcdef', 'b')) # R: builtins.bool reveal_type(_.starts_with('abcdef', 'a', 1)) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_strip_tags() -> None: reveal_type(_.strip_tags('Some link')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_substr_left() -> None: reveal_type(_.substr_left('abcdefcdg', 'cd')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_substr_left_end() -> None: reveal_type(_.substr_left_end('abcdefcdg', 'cd')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_substr_right() -> None: reveal_type(_.substr_right('abcdefcdg', 'cd')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_substr_right_end() -> None: reveal_type(_.substr_right_end('abcdefcdg', 'cd')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_successor() -> None: reveal_type(_.successor('b')) # R: builtins.str reveal_type(_.successor('B')) # R: builtins.str reveal_type(_.successor('2')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_surround() -> None: reveal_type(_.surround('abc', '"')) # R: builtins.str reveal_type(_.surround('abc', '!')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_swap_case() -> None: reveal_type(_.swap_case('aBcDeF')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_title_case() -> None: reveal_type(_.title_case("bob's shop")) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_to_lower() -> None: reveal_type(_.to_lower('--Foo-Bar--')) # R: builtins.str reveal_type(_.to_lower('fooBar')) # R: builtins.str reveal_type(_.to_lower('__FOO_BAR__')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_to_upper() -> None: reveal_type(_.to_upper('--Foo-Bar--')) # R: builtins.str reveal_type(_.to_upper('fooBar')) # R: builtins.str reveal_type(_.to_upper('__FOO_BAR__')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_trim() -> None: reveal_type(_.trim(' abc efg\r\n ')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_trim_end() -> None: reveal_type(_.trim_end(' abc efg\r\n ')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_trim_start() -> None: reveal_type(_.trim_start(' abc efg\r\n ')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_truncate() -> None: reveal_type(_.truncate('hello world', 5)) # R: builtins.str reveal_type(_.truncate('hello world', 5, '..')) # R: builtins.str reveal_type(_.truncate('hello world', 10)) # R: builtins.str reveal_type(_.truncate('hello world', 10, separator=' ')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_unescape() -> None: reveal_type(_.unescape('"1 > 2 && 3 < 4"')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_upper_case() -> None: reveal_type(_.upper_case('--foo-bar--')) # R: builtins.str reveal_type(_.upper_case('fooBar')) # R: builtins.str reveal_type(_.upper_case('/?*Foo10/;"B*Ar')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_upper_first() -> None: reveal_type(_.upper_first('fred')) # R: builtins.str reveal_type(_.upper_first('foo bar')) # R: builtins.str reveal_type(_.upper_first('1foobar')) # R: builtins.str reveal_type(_.upper_first(';foobar')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_unquote() -> None: reveal_type(_.unquote('"abc"')) # R: builtins.str reveal_type(_.unquote('"abc"', '#')) # R: builtins.str reveal_type(_.unquote('#abc', '#')) # R: builtins.str reveal_type(_.unquote('#abc#', '#')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_url() -> None: reveal_type(_.url('a', 'b', ['c', 'd'], '/', q='X', y='Z')) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_words() -> None: reveal_type(_.words('a b, c; d-e')) # R: builtins.list[builtins.str] reveal_type(_.words('fred, barney, & pebbles', '/[^, ]+/g')) # R: builtins.list[builtins.str] pydash-8.0.3/tests/pytest_mypy_testing/test_utilities.py000066400000000000000000000206311464745015500237600ustar00rootroot00000000000000import typing as t import pytest import pydash as _ @pytest.mark.mypy_testing def test_mypy_attempt() -> None: def divide_by_zero(n: int) -> float: return n/0 reveal_type(_.attempt(divide_by_zero, 1)) # R: Union[builtins.float, builtins.Exception] @pytest.mark.mypy_testing def test_mypy_cond() -> None: def is_1(n: int) -> bool: return n == 1 func = _.cond( [ (is_1, _.constant("matches 1")), ] ) reveal_type(func) # R: def (*Any, **Any) -> builtins.str @pytest.mark.mypy_testing def test_mypy_conforms() -> None: def higher_than_1(n: int) -> bool: return n > 1 def is_0(n: int) -> bool: return n == 0 reveal_type(_.conforms({'b': higher_than_1})) # R: def (builtins.dict[builtins.str, builtins.int]) -> builtins.bool reveal_type(_.conforms([higher_than_1, is_0])) # R: def (builtins.list[builtins.int]) -> builtins.bool @pytest.mark.mypy_testing def test_mypy_conforms_to() -> None: reveal_type(_.conforms_to({'b': 2}, {'b': lambda n: n > 1})) # R: builtins.bool reveal_type(_.conforms_to({'b': 0}, {'b': lambda n: n > 1})) # R: builtins.bool reveal_type(_.conforms_to([2, 0], [lambda n: n > 1, lambda n: n == 0])) # R: builtins.bool reveal_type(_.conforms_to([0, 0], [lambda n: n > 1, lambda n: n == 0])) # R: builtins.bool @pytest.mark.mypy_testing def test_mypy_constant() -> None: reveal_type(_.constant(3.14)) # R: def (*Any, **Any) -> builtins.float reveal_type(_.constant("hello")) # R: def (*Any, **Any) -> builtins.str @pytest.mark.mypy_testing def test_mypy_default_to() -> None: reveal_type(_.default_to(1, 10)) # R: builtins.int reveal_type(_.default_to(None, 10)) # R: builtins.int reveal_type(_.default_to(None, None)) # R: None @pytest.mark.mypy_testing def test_mypy_default_to_any() -> None: n1, n2, n3 = 1, 10, 20 reveal_type(_.default_to_any(n1, n2, n3)) # R: builtins.int reveal_type(_.default_to_any(None, n2, n3)) # R: builtins.int reveal_type(_.default_to_any(None, None, n3)) # R: builtins.int reveal_type(_.default_to_any(None, None, None)) # R: None @pytest.mark.mypy_testing def test_mypy_identity() -> None: reveal_type(_.identity(1)) # R: builtins.int reveal_type(_.identity(1, 2, 3)) # R: builtins.int reveal_type(_.identity()) # R: None @pytest.mark.mypy_testing def test_mypy_iteratee() -> None: def add(x: int, y: int) -> int: return x + y reveal_type(_.iteratee('data')) # R: def (*Any, **Any) -> Any reveal_type(_.iteratee({'active': True})) # R: def (*Any, **Any) -> Any reveal_type(_.iteratee(add)) # R: def (x: builtins.int, y: builtins.int) -> builtins.int @pytest.mark.mypy_testing def test_mypy_matches() -> None: reveal_type(_.matches({'a': {'b': 2}})) # R: def (Any) -> builtins.bool @pytest.mark.mypy_testing def test_mypy_matches_property() -> None: reveal_type(_.matches_property('a', 1)) # R: def (Any) -> builtins.bool reveal_type(_.matches_property(0, 1)) # R: def (Any) -> builtins.bool reveal_type(_.matches_property('a', 2)) # R: def (Any) -> builtins.bool @pytest.mark.mypy_testing def test_mypy_memoize() -> None: def add(x: int, y: int) -> int: return x + y def mul(x: int, y: int) -> int: return x * y memoized_add = _.memoize(add) reveal_type(memoized_add) # R: pydash.utilities.MemoizedFunc[[x: builtins.int, y: builtins.int], builtins.int, builtins.str] reveal_type(memoized_add.cache) # R: builtins.dict[builtins.str, builtins.int] memoized_add_resolv = _.memoize(add, mul) reveal_type(memoized_add_resolv) # R: pydash.utilities.MemoizedFunc[[x: builtins.int, y: builtins.int], builtins.int, builtins.int] reveal_type(memoized_add_resolv.cache) # R: builtins.dict[builtins.int, builtins.int] @pytest.mark.mypy_testing def test_mypy_method() -> None: obj = {'a': {'b': [None, lambda x: x]}} reveal_type(_.method('a.b.1')) # R: def (*Any, **Any) -> Any @pytest.mark.mypy_testing def test_mypy_method_of() -> None: obj = {'a': {'b': [None, lambda x: x]}} reveal_type(_.method_of(obj)) # R: def (*Any, **Any) -> Any @pytest.mark.mypy_testing def test_mypy_noop() -> None: reveal_type(_.noop(5, "hello", {})) # R: None @pytest.mark.mypy_testing def test_mypy_nth_arg() -> None: reveal_type(_.nth_arg(1)) # R: def (*Any, **Any) -> Any @pytest.mark.mypy_testing def test_mypy_now() -> None: reveal_type(_.now()) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_over() -> None: def first(*args: int) -> int: return args[0] def second(*args: int) -> int: return args[1] reveal_type(_.over([first, second])) # R: def (*args: builtins.int) -> builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_over_every() -> None: def is_two(x: int) -> bool: return x == 2 def is_one(x: int) -> bool: return x == 1 reveal_type(_.over_every([is_one, is_two])) # R: def (x: builtins.int) -> builtins.bool @pytest.mark.mypy_testing def test_mypy_over_some() -> None: def is_two(x: int) -> bool: return x == 2 def is_one(x: int) -> bool: return x == 1 reveal_type(_.over_some([is_one, is_two])) # R: def (x: builtins.int) -> builtins.bool @pytest.mark.mypy_testing def test_mypy_property_() -> None: reveal_type(_.property_('data')) # R: def (Any) -> Any reveal_type(_.property_(0)) # R: def (Any) -> Any @pytest.mark.mypy_testing def test_mypy_properties() -> None: reveal_type(_.properties('a', 'b', ['c', 'd', 'e'])) # R: def (Any) -> Any @pytest.mark.mypy_testing def test_mypy_property_of() -> None: reveal_type(_.property_of({'a': 1, 'b': 2, 'c': 3})) # R: def (Union[typing.Hashable, builtins.list[typing.Hashable]]) -> Any @pytest.mark.mypy_testing def test_mypy_random() -> None: reveal_type(_.random()) # R: builtins.int reveal_type(_.random(5, 10)) # R: builtins.int reveal_type(_.random(floating=True)) # R: builtins.float reveal_type(_.random(5.4, 10)) # R: builtins.float reveal_type(_.random(5, 10.0)) # R: builtins.float reveal_type(_.random(5.5, 10.0)) # R: builtins.float @pytest.mark.mypy_testing def test_mypy_range_() -> None: reveal_type(list(_.range_(5))) # R: builtins.list[builtins.int] reveal_type(list(_.range_(1, 4))) # R: builtins.list[builtins.int] reveal_type(list(_.range_(0, 6, 2))) # R: builtins.list[builtins.int] reveal_type(list(_.range_(4, 1))) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_range_right() -> None: reveal_type(list(_.range_right(5))) # R: builtins.list[builtins.int] reveal_type(list(_.range_right(1, 4))) # R: builtins.list[builtins.int] reveal_type(list(_.range_right(0, 6, 2))) # R: builtins.list[builtins.int] reveal_type(list(_.range_right(4, 1))) # R: builtins.list[builtins.int] @pytest.mark.mypy_testing def test_mypy_result() -> None: reveal_type(_.result({'a': 1, 'b': lambda: 2}, 'a')) # R: Any reveal_type(_.result(None, 'a', None)) # R: None reveal_type(_.result(None, 'a', 5)) # R: builtins.int @pytest.mark.mypy_testing def test_mypy_retry() -> None: def add(x: int, y: int) -> int: return x + y r = _.retry(attempts=3, delay=0) reveal_type(r(add)) # R: def (x: builtins.int, y: builtins.int) -> builtins.int @pytest.mark.mypy_testing def test_mypy_stub_list() -> None: reveal_type(_.stub_list()) # R: builtins.list[Any] @pytest.mark.mypy_testing def test_mypy_stub_dict() -> None: reveal_type(_.stub_dict()) # R: builtins.dict[Any, Any] @pytest.mark.mypy_testing def test_mypy_stub_false() -> None: reveal_type(_.stub_false()) # R: Literal[False] @pytest.mark.mypy_testing def test_mypy_stub_string() -> None: reveal_type(_.stub_string()) # R: builtins.str @pytest.mark.mypy_testing def test_mypy_stub_true() -> None: reveal_type(_.stub_true()) # R: Literal[True] @pytest.mark.mypy_testing def test_mypy_times() -> None: def to_str(x: int) -> str: return str(x) reveal_type(_.times(5)) # R: builtins.list[builtins.int] reveal_type(_.times(5, to_str)) # R: builtins.list[builtins.str] @pytest.mark.mypy_testing def test_mypy_to_path() -> None: reveal_type(_.to_path('a.b.c')) # R: builtins.list[typing.Hashable] @pytest.mark.mypy_testing def test_mypy_unique_id() -> None: reveal_type(_.unique_id()) # R: builtins.str reveal_type(_.unique_id('id_')) # R: builtins.str pydash-8.0.3/tests/test_annotations.py000066400000000000000000000003171464745015500201360ustar00rootroot00000000000000import typing as t import pydash as _ def typed_function(row: int, index: int, col: t.List[t.Any]): return row + 1 def test_annotated_iteratee(): assert _.map_([1, 2], typed_function) == [2, 3] pydash-8.0.3/tests/test_arrays.py000066400000000000000000000571421464745015500171120ustar00rootroot00000000000000import math import pytest import pydash as _ parametrize = pytest.mark.parametrize @parametrize( "case,expected", [ (([1, 2, 3, 4, 5],), [[1], [2], [3], [4], [5]]), (([1, 2, 3, 4, 5], 2), [[1, 2], [3, 4], [5]]), (([1, 2, 3, 4, 5], 3), [[1, 2, 3], [4, 5]]), (([1, 2, 3, 4, 5], 4), [[1, 2, 3, 4], [5]]), (([1, 2, 3, 4, 5], 5), [[1, 2, 3, 4, 5]]), (([1, 2, 3, 4, 5], 6), [[1, 2, 3, 4, 5]]), ], ) def test_chunk(case, expected): assert _.chunk(*case) == expected @parametrize( "case,expected", [([0, 1, 2, 3], [1, 2, 3]), ([True, False, None, True, 1, "foo"], [True, True, 1, "foo"])], ) def test_compact(case, expected): assert _.compact(case) == expected @parametrize( "case,expected", [ ((), []), (([],), []), (([1, 2, 3],), [1, 2, 3]), (([1, 2, 3], [4, 5, 6]), [1, 2, 3, 4, 5, 6]), (([1, 2, 3], [4, 5, 6], [7]), [1, 2, 3, 4, 5, 6, 7]), ((1, [2], 3, 4), [1, 2, 3, 4]), ], ) def test_concat(case, expected): assert _.concat(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4],), [1, 2, 3, 4]), (([1, 2, 3, 4], []), [1, 2, 3, 4]), (([1, 2, 3, 4], [2, 4], [3, 5, 6]), [1]), (([1, 1, 1, 1], [2, 4], [3, 5, 6]), [1, 1, 1, 1]), ], ) def test_difference(case, expected): assert _.difference(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4],), [1, 2, 3, 4]), (([1, 2, 3, 4], []), [1, 2, 3, 4]), (([{"a": 1}, {"a": 2, "b": 2}], [{"a": 1}], "a"), [{"a": 2, "b": 2}]), ], ) def test_difference_by(case, expected): assert _.difference_by(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4],), [1, 2, 3, 4]), (([1, 2, 3, 4], []), [1, 2, 3, 4]), ( ([{"a": 1}, {"a": 2, "b": 2}], [{"a": 1}], lambda item, other: item["a"] == other["a"]), [{"a": 2, "b": 2}], ), ], ) def test_difference_with(case, expected): assert _.difference_with(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4, 5],), [2, 3, 4, 5]), (([1, 2, 3, 4, 5], 1), [2, 3, 4, 5]), (([1, 2, 3, 4, 5], 2), [3, 4, 5]), (([1, 2, 3, 4, 5], 5), []), (([1, 2, 3, 4, 5], 6), []), ], ) def test_drop(case, expected): assert _.drop(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4, 5], lambda item: item < 3), [3, 4, 5]), ], ) def test_drop_while(case, expected): assert _.drop_while(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4, 5],), [1, 2, 3, 4]), (([1, 2, 3, 4, 5], 1), [1, 2, 3, 4]), (([1, 2, 3, 4, 5], 2), [1, 2, 3]), (([1, 2, 3, 4, 5], 5), []), (([1, 2, 3, 4, 5], 6), []), ], ) def test_drop_right(case, expected): assert _.drop_right(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4, 5], lambda item: item > 3), [1, 2, 3]), ], ) def test_drop_right_while(case, expected): assert _.drop_right_while(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 2, 1, 5, 6, 5, 5, 5],), [2, 1, 5]), ((["A", "b", "C", "a", "B", "c"], lambda letter: letter.lower()), ["a", "B", "c"]), ], ) def test_duplicates(case, expected): assert _.duplicates(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4, 5], 0), [0, 0, 0, 0, 0]), (([1, 2, 3, 4, 5], 0, 2), [1, 2, 0, 0, 0]), (([1, 2, 3, 4, 5], 0, 2, 3), [1, 2, 0, 4, 5]), (([1, 2, 3, 4, 5], 0, 0, 5), [0, 0, 0, 0, 0]), (([1, 2, 3, 4, 5], 0, 0, 8), [0, 0, 0, 0, 0]), (([1, 2, 3, 4, 5], 0, 0, -1), [0, 0, 0, 0, 5]), ], ) def test_fill(case, expected): array = case[0] assert _.fill(*case) == expected assert array == expected @parametrize( "case,filter_by,expected", [ (["apple", "banana", "beet"], lambda item: item.startswith("b"), 1), ( [ {"name": "apple", "type": "fruit"}, {"name": "banana", "type": "fruit"}, {"name": "beet", "type": "vegetable"}, ], {"name": "banana"}, 1, ), (["apple", "banana", "beet"], lambda: False, -1), ], ) def test_find_index(case, filter_by, expected): assert _.find_index(case, filter_by) == expected @parametrize( "case,filter_by,expected", [ (["apple", "banana", "beet"], lambda item: item.startswith("b"), 2), ( [ {"name": "apple", "type": "fruit"}, {"name": "banana", "type": "fruit"}, {"name": "beet", "type": "vegetable"}, ], {"type": "fruit"}, 1, ), (["apple", "banana", "beet"], lambda: False, -1), ], ) def test_find_last_index(case, filter_by, expected): assert _.find_last_index(case, filter_by) == expected @parametrize( "case,expected", [ ([1, ["2222"], [3, [[4]]]], [1, "2222", 3, [[4]]]), ], ) def test_flatten(case, expected): assert _.flatten(case) == expected @parametrize( "case,expected", [ ([1, ["2222"], [3, [[4]]]], [1, "2222", 3, 4]), ], ) def test_flatten_deep(case, expected): assert _.flatten_deep(case) == expected @parametrize( "case,expected", [ (([1, ["2222"], [3, [[4]]]],), [1, "2222", 3, [[4]]]), (([1, ["2222"], [3, [[4]]]], 1), [1, "2222", 3, [[4]]]), (([1, ["2222"], [3, [[4]]]], 2), [1, "2222", 3, [4]]), (([1, ["2222"], [3, [[4]]]], 3), [1, "2222", 3, 4]), ], ) def test_flatten_depth(case, expected): assert _.flatten_depth(*case) == expected @parametrize( "case,expected", [ ([["a", 1], ["b", 2]], {"a": 1, "b": 2}), ([["a", 1], ["b", 2], ["c", 3]], {"a": 1, "b": 2, "c": 3}), ], ) def test_from_pairs(case, expected): assert _.from_pairs(case) == expected @parametrize("case,expected", [([1, 2, 3], 1), ([], None)]) def test_head(case, expected): assert _.head(case) == expected @parametrize( "case,value,from_index,expected", [ ([1, 2, 3, 1, 2, 3], 2, 0, 1), ([1, 2, 3, 1, 2, 3], 2, 3, 4), ([1, 1, 2, 2, 3, 3], 2, True, 2), ([1, 1, 2, 2, 3, 3], 4, 0, -1), ([1, 1, 2, 2, 3, 3], 2, 10, -1), ([1, 1, 2, 2, 3, 3], 0, 0, -1), ], ) def test_index_of(case, value, from_index, expected): assert _.index_of(case, value, from_index) == expected @parametrize("case,expected", [([1, 2, 3], [1, 2]), ([1], [])]) def test_initial(case, expected): assert _.initial(case) == expected @parametrize( "case,expected", [ (([[10, 20], [30, 40], [50, 60]], [1, 2, 3]), [10, 20, 1, 2, 3, 30, 40, 1, 2, 3, 50, 60]), ( ([[[10, 20]], [[30, 40]], [50, [60]]], [1, 2, 3]), [[10, 20], 1, 2, 3, [30, 40], 1, 2, 3, 50, [60]], ), ], ) def test_intercalate(case, expected): assert _.intercalate(*case) == expected @parametrize( "case,expected", [ (([1, 2], [3, 4]), [1, 3, 2, 4]), (([1, 2], [3, 4], [5, 6]), [1, 3, 5, 2, 4, 6]), (([1, 2], [3, 4, 5], [6]), [1, 3, 6, 2, 4, 5]), (([1, 2, 3], [4], [5, 6]), [1, 4, 5, 2, 6, 3]), ], ) def test_interleave(case, expected): assert _.interleave(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3], [101, 2, 1, 10], [2, 1]), [1, 2]), (([1, 1, 2, 2], [1, 1, 2, 2]), [1, 2]), (([1, 2, 3], [4]), []), (([1, 2, 3],), [1, 2, 3]), (([], [101, 2, 1, 10], [2, 1]), []), (([],), []), ], ) def test_intersection(case, expected): assert _.intersection(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3], [101, 2, 1, 10], [2, 1], None), [1, 2]), (([1, 2, 3], [4]), []), (([1, 2, 3],), [1, 2, 3]), (([], [101, 2, 1, 10], [2, 1]), []), (([],), []), (([1, 2, 3], [101, 2, 1, 10], [2, 1], lambda a: 1 if a < 10 else 0), [1]), (([{"a": 1}, {"a": 2}, {"a": 3}], [{"a": 2}], "a"), [{"a": 2}]), ], ) def test_intersection_by(case, expected): assert _.intersection_by(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3], [101, 2, 1, 10], [2, 1], None), [1, 2]), (([1, 2, 3], [4]), []), (([1, 2, 3],), [1, 2, 3]), (([], [101, 2, 1, 10], [2, 1]), []), (([],), []), ( (["A", "b", "cC"], ["a", "cc"], ["A", "CC"], lambda a, b: a.lower() == b.lower()), ["A", "cC"], ), ], ) def test_intersection_with(case, expected): assert _.intersection_with(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4], 10), [1, 10, 2, 10, 3, 10, 4]), (([1, 2, 3, 4], [0, 0, 0]), [1, [0, 0, 0], 2, [0, 0, 0], 3, [0, 0, 0], 4]), ( ([[1, 2, 3], [4, 5, 6], [7, 8, 9]], [0, 0, 0]), [[1, 2, 3], [0, 0, 0], [4, 5, 6], [0, 0, 0], [7, 8, 9]], ), ], ) def test_intersperse(case, expected): assert _.intersperse(*case) == expected @parametrize("case,expected", [([1, 2, 3], 3), ([], None)]) def test_last(case, expected): assert _.last(case) == expected @parametrize( "case,value,from_index,expected", [ ([1, 2, 3, 1, 2, 3], 2, 0, -1), ([1, 2, 3, 1, 2, 3], 2, 3, 1), ([1, 2, 3, 1, 2, 3], 0, 0, -1), ([0, 1, 2, 3, 4, 5], 3, 0, -1), ([0, 1, 2, 3, 4, 5], 3, 1, -1), ([0, 1, 2, 3, 4, 5], 3, 2, -1), ([0, 1, 2, 3, 4, 5], 3, 3, 3), ([0, 1, 2, 3, 4, 5], 3, 4, 3), ([0, 1, 2, 3, 4, 5], 3, 5, 3), ([0, 1, 2, 3, 4, 5], 3, 6, 3), ([0, 1, 2, 3, 4, 5], 3, -1, 3), ([0, 1, 2, 3, 4, 5], 3, -2, 3), ([0, 1, 2, 3, 4, 5], 3, -3, 3), ([0, 1, 2, 3, 4, 5], 3, -4, -1), ([0, 1, 2, 3, 4, 5], 3, -5, -1), ([0, 1, 2, 3, 4, 5], 3, -6, -1), ([0, 1, 2, 3, 4, 5], 3, None, 3), ], ) def test_last_index_of(case, value, from_index, expected): assert _.last_index_of(case, value, from_index) == expected @parametrize( "case,expected", [(([1, 2, None, 4, None, 6], lambda x, i: [str(i)] if x is None else []), ["2", "4"])], ) def test_mapcat(case, expected): assert _.mapcat(*case) == expected @parametrize( "case,pos,expected", [([11, 22, 33], 2, 33), ([11, 22, 33], 0, 11), ([11, 22, 33], -1, 33), ([11, 22, 33], 4, None)], ) def test_nth(case, pos, expected): assert _.nth(case, pos) == expected @parametrize( "case,expected,after", [ (([1, 2, 3],), 3, [1, 2]), (([1, 2, 3], 0), 1, [2, 3]), (([1, 2, 3], 1), 2, [1, 3]), ], ) def test_pop(case, expected, after): array = case[0] assert _.pop(*case) == expected assert array == after @parametrize("case,values,expected", [([1, 2, 3, 1, 2, 3], [2, 3], [1, 1])]) def test_pull(case, values, expected): assert _.pull(case, *values) == expected @parametrize( "case,values,expected", [ ([1, 2, 3, 1, 2, 3], [2, 3], [1, 1]), ([1, 2, 3, 1, 2, 3], [1, 2, 3], []), ([1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3], []), ], ) def test_pull_all(case, values, expected): assert _.pull_all(case, values) == expected @parametrize( "case,values,iteratee,expected", [ ([1, 2, 3, 1, 2, 3], [2, 3], None, [1, 1]), ([1, 2, 3, 1, 2, 3], [2, 3], lambda item: item + 2, [1, 1]), ], ) def test_pull_all_by(case, values, iteratee, expected): assert _.pull_all_by(case, values, iteratee) == expected @parametrize( "case,values,iteratee,expected", [ ([1, 2, 3, 1, 2, 3], [2, 3], None, [1, 1]), ([1, 2, 3, 1, 2, 3], [2, 3], lambda a, b: a == b, [1, 1]), ([1, 2, 3, 1, 2, 3], [2, 3], lambda a, b: a != b, []), ], ) def test_pull_all_with(case, values, iteratee, expected): assert _.pull_all_with(case, values, iteratee) == expected @parametrize( "case,expected", [ (([1, 2, 3, 1, 2, 3], [2, 3]), [1, 2, 2, 3]), (([1, 2, 3, 1, 2, 3], [3, 2]), [1, 2, 2, 3]), (([1, 2, 3, 1, 2, 3], 3, 2), [1, 2, 2, 3]), ], ) def test_pull_at(case, expected): assert _.pull_at(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3], 4), [1, 2, 3, 4]), (([1, 2, 3], 4, 5), [1, 2, 3, 4, 5]), (([1, 2, 3], [4, 5], 6, [7, 8]), [1, 2, 3, [4, 5], 6, [7, 8]]), ], ) def test_push(case, expected): assert _.push(*case) == expected @parametrize( "case,filter_by,expected", [ ([1, 2, 3, 4, 5, 6], lambda x: x % 2 == 0, [2, 4, 6]), ([1, 2, 3, 4], lambda x: x >= 3, [3, 4]), ], ) def test_remove(case, filter_by, expected): original = list(case) assert _.remove(case, filter_by) == expected assert set(case).intersection(expected) == set([]) assert set(original) == set(case + expected) @parametrize( "case,expected", [ ([1, 2, 3, 4], [4, 3, 2, 1]), ("abcdef", "fedcba"), ], ) def test_reverse(case, expected): assert _.reverse(case) == expected @parametrize( "case,expected,after", [ ([1, 2, 3], 1, [2, 3]), ], ) def test_shift(case, expected, after): assert _.shift(case) == expected assert case == after @parametrize( "case,expected", [ (([1, 2, 3, 4, 5], 0, 1), [1]), (([1, 2, 3, 4, 5], 1, 3), [2, 3]), (([1, 2, 3, 4, 5], 1, 4), [2, 3, 4]), (([1, 2, 3, 4, 5], 1, 5), [2, 3, 4, 5]), (([1, 2, 3, 4, 5], 0, -1), [1, 2, 3, 4]), (([1, 2, 3, 4, 5], 2), [3]), (([1, 2, 3, 4, 5], -1), [5]), (([1, 2, 3, 4, 5], -2), [4]), (([1, 2, 3, 4, 5], -3), [3]), (([1, 2, 3, 4, 5], -5), [1]), ], ) def test_slice_(case, expected): assert _.slice_(*case) == expected @parametrize( "case,expected", [ (([2, 1, 3, 4, 6, 5],), [1, 2, 3, 4, 5, 6]), (([2, 1, 3, 4, 6, 5], None, None, True), [6, 5, 4, 3, 2, 1]), (([{"v": 2}, {"v": 3}, {"v": 1}], None, lambda x: x["v"]), [{"v": 1}, {"v": 2}, {"v": 3}]), (([2, 1, 3, 4, 6, 5], lambda a, b: -1 if a > b else 1), [6, 5, 4, 3, 2, 1]), ], ) def test_sort(case, expected): array = case[0] assert _.sort(*case) == expected assert array == expected def test_sort_comparator_key_exception(): with pytest.raises(ValueError): _.sort([], comparator=lambda: None, key=lambda: None) @parametrize( "case,expected", [ (([4, 4, 5, 5, 6, 6], 5), 2), (([20, 30, 40, 40, 50], 40), 2), (([20, 30, 50], 40), 2), (([20, 30, 50], 10), 0), ], ) def test_sorted_index(case, expected): assert _.sorted_index(*case) == expected @parametrize( "case,expected", [ (([{"x": 20}, {"x": 30}, {"x": 50}], {"x": 40}, "x"), 2), ( ( ["twenty", "thirty", "fifty"], "forty", lambda x: {"twenty": 20, "thirty": 30, "forty": 40, "fifty": 50}[x], ), 2, ), ], ) def test_sorted_index_by(case, expected): assert _.sorted_index_by(*case) == expected @parametrize( "array,value,expected", [ ([2, 3, 4, 10, 10], 10, 3), ([10, 10, 4, 2, 3], 11, -1), ], ) def test_sorted_index_of(array, value, expected): assert _.sorted_index_of(array, value) == expected @parametrize( "case,expected", [ (([4, 4, 5, 5, 6, 6], 5), 4), (([20, 30, 40, 40, 50], 40), 4), (([20, 30, 50], 10), 0), ], ) def test_sorted_last_index(case, expected): assert _.sorted_last_index(*case) == expected @parametrize( "case,expected", [ (([{"x": 20}, {"x": 30}, {"x": 50}], {"x": 40}, "x"), 2), ( ( ["twenty", "thirty", "fifty"], "forty", lambda x: {"twenty": 20, "thirty": 30, "forty": 40, "fifty": 50}[x], ), 2, ), ], ) def test_sorted_last_index_by(case, expected): assert _.sorted_last_index_by(*case) == expected @parametrize( "array,value,expected", [ ([2, 3, 4, 10, 10], 10, 4), ([10, 10, 4, 2, 3], 11, -1), ], ) def test_sorted_last_index_of(array, value, expected): assert _.sorted_last_index_of(array, value) == expected @parametrize( "case,expected", [([2, 2, 1, 0.5, 4], [0.5, 1, 2, 4]), ([4, -2, -2, 0.5, -1], [-2, -1, 0.5, 4])] ) def test_sorted_uniq(case, expected): assert _.sorted_uniq(case) == expected @parametrize( "case,iteratee,expected", [ ([2.5, 3, 1, 2, 1.5], lambda num: math.floor(num), [1, 2.5, 3]), (["A", "b", "C", "a", "B", "c"], lambda letter: letter.lower(), ["A", "C", "b"]), ], ) def test_sorted_uniq_by(case, iteratee, expected): assert _.sorted_uniq_by(case, iteratee) == expected @parametrize( "case,expected,after", [ (([1, 2, 3], 1, 0, "splice"), [], [1, "splice", 2, 3]), (([1, 2, 3], 1, 1, "splice"), [2], [1, "splice", 3]), (([1, 2, 3], 0, 2, "splice", "slice", "dice"), [1, 2], ["splice", "slice", "dice", 3]), (([1, 2, 3], 0), [1, 2, 3], []), (([1, 2, 3], 1), [2, 3], [1]), ], ) def test_splice(case, expected, after): array = case[0] assert _.splice(*case) == expected assert array == after @parametrize( "case,expected", [ (("123", 1, 0, "splice"), "1splice23"), ], ) def test_splice_string(case, expected): assert _.splice(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4, 5], 2), [[1, 2], [3, 4, 5]]), (([1, 2, 3, 4, 5], 0), [[], [1, 2, 3, 4, 5]]), ], ) def test_split_at(case, expected): assert _.split_at(*case) == expected @parametrize("case,expected", [([1, 2, 3], [2, 3]), ([], [])]) def test_tail(case, expected): assert _.tail(case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4, 5],), [1]), (([1, 2, 3, 4, 5], 1), [1]), (([1, 2, 3, 4, 5], 2), [1, 2]), (([1, 2, 3, 4, 5], 5), [1, 2, 3, 4, 5]), (([1, 2, 3, 4, 5], 6), [1, 2, 3, 4, 5]), ], ) def test_take(case, expected): assert _.take(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4, 5], lambda item: item < 3), [1, 2]), ], ) def test_take_while(case, expected): assert _.take_while(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4, 5],), [5]), (([1, 2, 3, 4, 5], 1), [5]), (([1, 2, 3, 4, 5], 2), [4, 5]), (([1, 2, 3, 4, 5], 5), [1, 2, 3, 4, 5]), (([1, 2, 3, 4, 5], 6), [1, 2, 3, 4, 5]), ], ) def test_take_right(case, expected): assert _.take_right(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4, 5], lambda item: item > 3), [4, 5]), ], ) def test_take_right_while(case, expected): assert _.take_right_while(*case) == expected @parametrize( "case,expected", [ ([1, 2, 1, 3, 1], [1, 2, 3]), ([dict(a=1), dict(a=2), dict(a=1)], [dict(a=1), dict(a=2)]), ], ) def test_uniq(case, expected): assert _.uniq(case) == expected @parametrize( "case,iteratee,expected", [ ([1, 2, 1.5, 3, 2.5], lambda num: math.floor(num), [1, 2, 3]), ( [ {"name": "banana", "type": "fruit"}, {"name": "apple", "type": "fruit"}, {"name": "beet", "type": "vegetable"}, {"name": "beet", "type": "vegetable"}, {"name": "carrot", "type": "vegetable"}, {"name": "carrot", "type": "vegetable"}, ], {"type": "vegetable"}, [{"name": "banana", "type": "fruit"}, {"name": "beet", "type": "vegetable"}], ), ( [{"x": 1, "y": 1}, {"x": 2, "y": 1}, {"x": 1, "y": 1}], "x", [{"x": 1, "y": 1}, {"x": 2, "y": 1}], ), (["A", "b", "C", "a", "B", "c"], lambda letter: letter.lower(), ["A", "b", "C"]), ], ) def test_uniq_by(case, iteratee, expected): assert _.uniq_by(case, iteratee) == expected @parametrize( "case,iteratee,expected", [ ([1, 2, 3, 4, 5], lambda a, b: (a % 2) == (b % 2), [1, 2]), ([5, 4, 3, 2, 1], lambda a, b: (a % 2) == (b % 2), [5, 4]), ], ) def test_uniq_with(case, iteratee, expected): assert _.uniq_with(case, iteratee) == expected @parametrize( "case,expected", [(([1, 2, 3], [101, 2, 1, 10], [2, 1]), [1, 2, 3, 101, 10]), (([11, 22, 33],), [11, 22, 33])], ) def test_union(case, expected): assert _.union(*case) == expected @parametrize( "case,iteratee,expected", [ (([1, 2, 3], [2, 3, 4]), lambda x: x % 10, [1, 2, 3, 4]), (([1, 2, 3], [2, 3, 4]), lambda x: x % 2, [1, 2]), (([1, 2, 3], [2, 3, 4], lambda x: x % 2), None, [1, 2]), (([11, 22, 33],), None, [11, 22, 33]), ], ) def test_union_by(case, iteratee, expected): assert _.union_by(*case, iteratee=iteratee) == expected @parametrize( "case,expected", [ (([11, 22, 33], [22, 33, 44]), [11, 22, 33, 44]), (([11, 22, 33],), [11, 22, 33]), (([1, 2, 3], [2, 3, 4], lambda a, b: (a % 2) == (b % 2)), [1, 2]), ], ) def test_union_with(case, expected): assert _.union_with(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3], 4), [4, 1, 2, 3]), (([1, 2, 3], 4, 5), [4, 5, 1, 2, 3]), (([1, 2, 3], [4, 5], 6, [7, 8]), [[4, 5], 6, [7, 8], 1, 2, 3]), ], ) def test_unshift(case, expected): assert _.unshift(*case) == expected assert case[0] == expected @parametrize( "case,expected", [ ( [("moe", 30, True), ("larry", 40, False), ("curly", 35, True)], [("moe", "larry", "curly"), (30, 40, 35), (True, False, True)], ) ], ) def test_unzip(case, expected): assert _.unzip(case) == expected @parametrize( "case,expected", [ (([],), []), (([[1, 10, 100], [2, 20, 200]],), [(1, 2), (10, 20), (100, 200)]), (([[2, 4, 6], [2, 2, 2]], _.power), [4, 16, 36]), ], ) def test_unzip_with(case, expected): assert _.unzip_with(*case) == expected @parametrize("case,expected", [(([1, 2, 1, 0, 3, 1, 4], 0, 1), [2, 3, 4])]) def test_without(case, expected): assert _.without(*case) == expected @parametrize( "case,expected", [(([1, 2, 3], [5, 2, 1, 4]), [3, 5, 4]), (([1, 2, 5], [2, 3, 5], [3, 4, 5]), [1, 4, 5])], ) def test_xor(case, expected): assert _.xor(*case) == expected @parametrize("case,expected", [(([1, 2, 3], [5, 4], lambda val: val % 3), [3])]) def test_xor_by(case, expected): assert _.xor_by(*case) == expected @parametrize("case,expected", [(([1, 2, 3], [5, 4], lambda a, b: a <= b), [5, 4])]) def test_xor_with(case, expected): assert _.xor_with(*case) == expected @parametrize( "case,expected", [ ( (["moe", "larry", "curly"], [30, 40, 35], [True, False, True]), [("moe", 30, True), ("larry", 40, False), ("curly", 35, True)], ) ], ) def test_zip_(case, expected): assert _.zip_(*case) == expected @parametrize( "case,expected", [ ((["moe", "larry"], [30, 40]), {"moe": 30, "larry": 40}), (([["moe", 30], ["larry", 40]],), {"moe": 30, "larry": 40}), ], ) def test_zip_object(case, expected): assert _.zip_object(*case) == expected @parametrize( "case,expected", [ ((["a.b.c", "a.b.d"], [1, 2]), {"a": {"b": {"c": 1, "d": 2}}}), ((["a.b[0].c", "a.b[1].d"], [1, 2]), {"a": {"b": [{"c": 1}, {"d": 2}]}}), ], ) def test_zip_object_deep(case, expected): assert _.zip_object_deep(*case) == expected @parametrize( "case,expected", [ (([1, 2],), [(1,), (2,)]), (([1, 2], [3, 4], _.add), [4, 6]), ], ) def test_zip_with(case, expected): assert _.zip_with(*case) == expected pydash-8.0.3/tests/test_chaining.py000066400000000000000000000077241464745015500173720ustar00rootroot00000000000000from copy import deepcopy import pytest import pydash as _ parametrize = pytest.mark.parametrize pydash_methods = _.filter_(dir(_), lambda m: callable(getattr(_, m, None))) def test_chaining_methods(): chain = _.chain([]) for method in dir(_): if not callable(method): continue chained = getattr(chain, method) assert chained.method is getattr(_, method) @parametrize( "value,methods", [([1, 2, 3, 4], [("without", (2, 3)), ("reject", (lambda x: x > 1,))])] ) def test_chaining(value, methods): expected = deepcopy(value) actual = _.chain(deepcopy(value)) for method, args in methods: expected = getattr(_, method)(expected, *args) actual = getattr(actual, method)(*args) assert actual.value() == expected def test_chaining_invalid_method(): raised = False try: _.chain([]).foobar # noqa: B018 except _.InvalidMethod: raised = True assert raised def test_invalid_method_subclasses_attribute_error(): # NOTE: This needs to subclass AttributeError due to compatibility with typing.Protocol and # runtime_checkable. See https://github.com/dgilland/pydash/issues/165 assert issubclass(_.InvalidMethod, AttributeError) def test_chaining_lazy(): tracker = {"called": False} def interceptor(value): tracker["called"] = True return value.pop() chain = _.chain([1, 2, 3, 4, 5]).initial().tap(interceptor) assert not tracker["called"] chain = chain.last() assert not tracker["called"] result = chain.value() assert tracker["called"] assert result == 3 def test_chaining_late_value(): square_sum = _.chain().power(2).sum() assert square_sum([1, 2, 3, 4]) == 30 def test_chaining_late_value_reuse(): square_sum = _.chain().power(2).sum() assert square_sum([1, 2, 3, 4]) == 30 assert square_sum([2]) == 4 def test_chaining_late_value_override(): square_sum = _.chain([1, 2, 3, 4]).power(2).sum() assert square_sum([5, 6, 7, 8]) == 174 def test_chaining_plant(): value = [1, 2, 3, 4] square_sum1 = _.chain(value).power(2).sum() def root_value(wrapper): if isinstance(wrapper._value, _.chaining.chaining.ChainWrapper): return root_value(wrapper._value) return wrapper._value assert root_value(square_sum1._value) == value test_value = [5, 6, 7, 8] square_sum2 = square_sum1.plant(test_value) assert root_value(square_sum1._value) == value assert root_value(square_sum2._value) == test_value assert square_sum1.value() == 30 assert square_sum2.value() == 174 def test_chaining_commit(): chain = _.chain([1, 2, 3, 4]).power(2).sum() committed = chain.commit() assert chain is not committed assert chain.value() == committed.value() def test_dash_instance_chaining(): value = [1, 2, 3, 4] from__ = _._(value).without(2, 3).reject(lambda x: x > 1) from_chain = _.chain(value).without(2, 3).reject(lambda x: x > 1) assert from__.value() == from_chain.value() def test_dash_instance_methods(): assert pydash_methods for method in pydash_methods: assert getattr(_._, method) is getattr(_, method) def test_dash_suffixed_method_aliases(): methods = _.filter_(pydash_methods, lambda m: m.endswith("_")) assert methods for method in methods: assert getattr(_._, method[:-1]) is getattr(_, method) def test_dash_method_call(): value = [1, 2, 3, 4, 5] assert _._.initial(value) == _.initial(value) def test_dash_alias(): assert _.py_ is _._ @parametrize( "case,expected", [ ([1, 2, 3], "[1, 2, 3]"), ], ) def test_chaining_value_to_string(case, expected): assert _.chain(case).to_string() == expected @parametrize("value,interceptor,expected", [([1, 2, 3, 4, 5], lambda value: value.pop(), 3)]) def test_tap(value, interceptor, expected): actual = _.chain(value).initial().tap(interceptor).last().value() assert actual == expected pydash-8.0.3/tests/test_collections.py000066400000000000000000000726501464745015500201300ustar00rootroot00000000000000from collections import namedtuple import math from operator import attrgetter, itemgetter, methodcaller import pytest import pydash as _ from . import helpers parametrize = pytest.mark.parametrize @parametrize( "case,expected", [ ((["a", "b", "c", "d", "e"], [0], [2], [4]), ["a", "c", "e"]), ((["moe", "larry", "curly"], 0, 2), ["moe", "curly"]), (({"a": 1, "b": 2, "c": 3}, "a", "b"), [1, 2]), ], ) def test_at(case, expected): assert _.at(*case) == expected @parametrize( "case,expected", [ (([4.3, 6.1, 6.4], lambda num: int(math.floor(num))), {4: 1, 6: 2}), (([{"one": 1}, {"one": 1}, {"two": 2}, {"one": 1}], {"one": 1}), {True: 3, False: 1}), (([{"one": 1}, {"one": 1}, {"two": 2}, {"one": 1}], "one"), {1: 3, None: 1}), (({1: 0, 2: 0, 4: 3},), {0: 2, 3: 1}), ], ) def test_count_by(case, expected): assert _.count_by(*case) == expected @parametrize( "case,expected", [ (([True, 1, None, "yes"], bool), False), (([True, 1, None, "yes"],), False), (([{"name": "moe", "age": 40}, {"name": "larry", "age": 50}], "age"), True), (([{"name": "moe", "age": 40}, {"name": "larry", "age": 50}], {"age": 50}), False), ], ) def test_every(case, expected): assert _.every(*case) == expected @parametrize( "case,expected", [ (([0, True, False, None, 1, 2, 3],), [True, 1, 2, 3]), (([1, 2, 3, 4, 5, 6], lambda num: num % 2 == 0), [2, 4, 6]), ( ( [ {"name": "barney", "age": 36, "blocked": False}, {"name": "fred", "age": 40, "blocked": True}, ], "blocked", ), [{"name": "fred", "age": 40, "blocked": True}], ), ( ( [ {"name": "barney", "age": 36, "blocked": False}, {"name": "fred", "age": 40, "blocked": True}, ], {"age": 36}, ), [{"name": "barney", "age": 36, "blocked": False}], ), ( ([{"name": "moe", "age": 40}, {"name": "larry", "age": 50}], {"age": 40}), [{"name": "moe", "age": 40}], ), ], ) def test_filter_(case, expected): assert _.filter_(*case) == expected @parametrize( "case,expected", [ ( ( [ {"name": "barney", "age": 36, "blocked": False}, {"name": "fred", "age": 40, "blocked": True}, {"name": "pebbles", "age": 1, "blocked": False}, ], lambda c: c["age"] < 40, ), {"name": "barney", "age": 36, "blocked": False}, ), ( ( [ {"name": "barney", "age": 36, "blocked": False}, {"name": "fred", "age": 40, "blocked": True}, {"name": "pebbles", "age": 1, "blocked": False}, ], {"age": 1}, ), {"name": "pebbles", "age": 1, "blocked": False}, ), ( ( [ {"name": "barney", "age": 36, "blocked": False}, {"name": "fred", "age": 40, "blocked": True}, {"name": "pebbles", "age": 1, "blocked": False}, ], "blocked", ), {"name": "fred", "age": 40, "blocked": True}, ), ( ( [ {"name": "barney", "age": 36, "blocked": False}, {"name": "fred", "age": 40, "blocked": True}, {"name": "pebbles", "age": 1, "blocked": False}, ], ), {"name": "barney", "age": 36, "blocked": False}, ), (({"abc": 1, "xyz": 2, "c": 3}.keys(), lambda x: "y" in x), "xyz"), (({"abc": 1, "xyz": 2, "c": 3}.values(), lambda x: x < 2), 1), ], ) def test_find(case, expected): assert _.find(*case) == expected def test_find_class_object(): obj = helpers.Object(a=1, b=2) assert _.find([None, {}, obj], {"b": 2}) == obj def test_find_namedtuple(): User = namedtuple("User", ["first_name", "last_name"]) obj = User(first_name="Bob", last_name="Smith") assert _.find([None, {}, obj], {"first_name": "Bob"}) == obj @parametrize( "case,expected", [(({"abc": 1, "xyz": 2, "c": 3}.values(), helpers.Filter(lambda x: x < 2)), 1)], ) def test_find_using_callable_class(case, expected): assert _.find(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4], lambda num: num % 2 == 1), 3), (({"abc": 1, "xyz": 2, "c": 3}.keys(), lambda x: "y" in x), "xyz"), (({"abc": 1, "xyz": 2, "c": 3}.values(), lambda x: x < 2), 1), ], ) def test_find_last(case, expected): assert _.find_last(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3], None), [1, 2, 3]), (([[1], [2], [3]], None), [1, 2, 3]), (([[[1]], [[2]], [[3]]], None), [[1], [2], [3]]), (([1, 2, 3], lambda x: [x - 1]), [0, 1, 2]), (([1, 2, 3], lambda x: [[x], [x]]), [[1], [1], [2], [2], [3], [3]]), ], ) def test_flat_map(case, expected): assert _.flat_map(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3], None), [1, 2, 3]), (([[1], [2], [3]], None), [1, 2, 3]), (([[[1]], [[2]], [[3]]], None), [1, 2, 3]), (([1, 2, 3], lambda x: [x - 1]), [0, 1, 2]), (([1, 2, 3], lambda x: [[x], [x]]), [1, 1, 2, 2, 3, 3]), ], ) def test_flat_map_deep(case, expected): assert _.flat_map_deep(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3], None), [1, 2, 3]), (([[1], [2], [3]], None), [1, 2, 3]), (([[[1]], [[2]], [[3]]], None), [[1], [2], [3]]), (([[[1]], [[2]], [[3]]], None, 1), [[1], [2], [3]]), (([[[1]], [[2]], [[3]]], None, 2), [1, 2, 3]), ], ) def test_flat_map_depth(case, expected): assert _.flat_map_depth(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3], helpers.noop), [1, 2, 3]), (([1, 2, 3], lambda value: value < 2), [1, 2, 3]), (({"one": 1, "two": 2, "three": 3}, helpers.noop), {"one": 1, "two": 2, "three": 3}), ], ) def test_for_each(case, expected): assert _.for_each(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3], helpers.noop), [1, 2, 3]), (([1, 2, 3], lambda value: value < 2), [1, 2, 3]), (({"one": 1, "two": 2, "three": 3}, helpers.noop), {"one": 1, "two": 2, "three": 3}), ], ) def test_for_each_right(case, expected): assert _.for_each_right(*case) == expected @parametrize( "case,expected", [ (([4.2, 6.1, 6.4], lambda num: int(math.floor(num))), {4: [4.2], 6: [6.1, 6.4]}), ], ) def test_group_by(case, expected): assert _.group_by(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3], 1), True), (([1, 2, 3], 1, 2), False), (({"name": "fred", "age": 40}, "fred"), True), (("pebbles", "eb"), True), ], ) def test_includes(case, expected): assert _.includes(*case) == expected @parametrize( "case,expected", [ (([{"a": {"b": 2}}, {"a": {"c": 3}}], "a.items"), [{"b": 2}.items(), {"c": 3}.items()]), ( ([{"a": {"b": {"c": 2}}}, {"a": {"b": {"c": 3}}}], "a.b.items"), [{"c": 2}.items(), {"c": 3}.items()], ), ], ) def test_invoke_map(case, expected): assert _.invoke_map(*case) == expected @parametrize( "case,expected", [ ( ([{"dir": "left", "code": 97}, {"dir": "right", "code": 100}], "dir"), {"left": {"dir": "left", "code": 97}, "right": {"dir": "right", "code": 100}}, ), ], ) def test_key_by(case, expected): assert _.key_by(*case) == expected @parametrize( "case,expected,sort_results", [ (([1, 2, 3],), [1, 2, 3], False), (([1.1, 2.1, 3.1], int), [1, 2, 3], False), (([1, 2, 3], lambda num: num * 3), [3, 6, 9], False), (([[1], [2, 3], [4, 5, 6]], len), [1, 2, 3], False), (({"one": 1, "two": 2, "three": 3}, lambda num: num * 3), [3, 6, 9], True), ( ([{"name": "moe", "age": 40}, {"name": "larry", "age": 50}], "name"), ["moe", "larry"], False, ), ( ( [ {"level1": {"level2": {"level3": {"value": 1}}}}, {"level1": {"level2": {"level3": {"value": 2}}}}, {"level1": {"level2": {"level3": {"value": 3}}}}, {"level1": {"level2": {"level3": {"value": 4}}}}, {"level1": {"level2": {}}}, {}, ], "level1.level2.level3.value", ), [1, 2, 3, 4, None, None], False, ), (([[0, 1], [2, 3], [4, 5]], 1), [1, 3, 5], False), ( ( [{"a": 1, "b": 2, "c": -1}, {"a": 3, "b": 4, "c": -1}, {"a": 5, "b": 6, "c": -1}], itemgetter("a", "b"), ), [(1, 2), (3, 4), (5, 6)], False, ), ( ( [helpers.Object(a=1, b=2, c=-1), helpers.Object(a=3, b=4, c=-1)], attrgetter("a", "b"), ), [(1, 2), (3, 4)], False, ), ( ( [{"a": 1, "b": 2, "c": -1}, {"a": 3, "b": 4}, {"a": 5}], methodcaller("__len__"), ), [3, 2, 1], False, ), ], ) def test_map_(case, expected, sort_results): actual = _.map_(*case) if sort_results: actual = sorted(actual) assert actual == expected @parametrize( "case,expected", [ ( ( [ {"a": 1, "b": 2, "c": 3}, {"a": 1, "b": 2, "c": 4}, {"a": 1, "b": 2, "c": 5}, {"a": 1, "b": 1, "c": 6}, {"a": 1, "b": 1, "c": 7}, {"a": 2, "b": 2, "c": 8}, {"a": 2, "b": 2, "c": 9}, {"a": 2, "b": 2, "c": 10}, {"a": 3, "b": 1, "c": 11}, ], "a", ), { 1: [ {"a": 1, "b": 2, "c": 3}, {"a": 1, "b": 2, "c": 4}, {"a": 1, "b": 2, "c": 5}, {"a": 1, "b": 1, "c": 6}, {"a": 1, "b": 1, "c": 7}, ], 2: [{"a": 2, "b": 2, "c": 8}, {"a": 2, "b": 2, "c": 9}, {"a": 2, "b": 2, "c": 10}], 3: [{"a": 3, "b": 1, "c": 11}], }, ), ( ( [ {"a": 1, "b": 2, "c": 3}, {"a": 1, "b": 2, "c": 4}, {"a": 1, "b": 2, "c": 5}, {"a": 1, "b": 1, "c": 6}, {"a": 1, "b": 1, "c": 7}, {"a": 2, "b": 2, "c": 8}, {"a": 2, "b": 2, "c": 9}, {"a": 2, "b": 2, "c": 10}, {"a": 3, "b": 1, "c": 11}, ], "a", "b", ), { 1: { 2: [ {"a": 1, "b": 2, "c": 3}, {"a": 1, "b": 2, "c": 4}, {"a": 1, "b": 2, "c": 5}, ], 1: [{"a": 1, "b": 1, "c": 6}, {"a": 1, "b": 1, "c": 7}], }, 2: { 2: [ {"a": 2, "b": 2, "c": 8}, {"a": 2, "b": 2, "c": 9}, {"a": 2, "b": 2, "c": 10}, ] }, 3: {1: [{"a": 3, "b": 1, "c": 11}]}, }, ), ( ( [ {"a": 1, "b": 2, "c": 3}, {"a": 1, "b": 2, "c": 4}, {"a": 1, "b": 2, "c": 5}, {"a": 1, "b": 1, "c": 6}, {"a": 1, "b": 1, "c": 7}, {"a": 2, "b": 2, "c": 8}, {"a": 2, "b": 2, "c": 9}, {"a": 2, "b": 2, "c": 10}, {"a": 3, "b": 1, "c": 11}, ], ), [ {"a": 1, "b": 2, "c": 3}, {"a": 1, "b": 2, "c": 4}, {"a": 1, "b": 2, "c": 5}, {"a": 1, "b": 1, "c": 6}, {"a": 1, "b": 1, "c": 7}, {"a": 2, "b": 2, "c": 8}, {"a": 2, "b": 2, "c": 9}, {"a": 2, "b": 2, "c": 10}, {"a": 3, "b": 1, "c": 11}, ], ), ], ) def test_nest(case, expected): assert _.nest(*case) == expected @parametrize( "case,expected", [ ( ( [ {"user": "barney", "age": 36}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "fred", "age": 30}, ], [], ), [ {"user": "barney", "age": 36}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "fred", "age": 30}, ], ), ( ( [ {"user": "barney", "age": 36}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "fred", "age": 30}, ], ["user", "age"], ), [ {"user": "barney", "age": 26}, {"user": "barney", "age": 36}, {"user": "fred", "age": 30}, {"user": "fred", "age": 40}, ], ), ( ( [ {"user": "barney", "age": 36}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "fred", "age": 30}, ], ["-user", "age"], ), [ {"user": "fred", "age": 30}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "barney", "age": 36}, ], ), ( ( [ {"user": "barney", "age": 36}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "fred", "age": 30}, ], ["user", "-age"], ), [ {"user": "barney", "age": 36}, {"user": "barney", "age": 26}, {"user": "fred", "age": 40}, {"user": "fred", "age": 30}, ], ), ( ( [ {"user": "barney", "age": 36}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "fred", "age": 30}, ], ["-user", "-age"], ), [ {"user": "fred", "age": 40}, {"user": "fred", "age": 30}, {"user": "barney", "age": 36}, {"user": "barney", "age": 26}, ], ), ( ( { 1: {"user": "barney", "age": 36}, 2: {"user": "fred", "age": 40}, 3: {"user": "barney", "age": 26}, 4: {"user": "fred", "age": 30}, }, ["user", "age"], ), [ {"user": "barney", "age": 26}, {"user": "barney", "age": 36}, {"user": "fred", "age": 30}, {"user": "fred", "age": 40}, ], ), ( ( [ {"user": "barney", "age": 36}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "fred", "age": 30}, ], [], True, ), [ {"user": "barney", "age": 36}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "fred", "age": 30}, ], ), ( ( [ {"user": "barney", "age": 36}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "fred", "age": 30}, ], ["user", "age"], True, ), list( reversed( [ {"user": "barney", "age": 26}, {"user": "barney", "age": 36}, {"user": "fred", "age": 30}, {"user": "fred", "age": 40}, ] ) ), ), ( ( [ {"user": "barney", "age": 36}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "fred", "age": 30}, ], ["-user", "age"], True, ), list( reversed( [ {"user": "fred", "age": 30}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "barney", "age": 36}, ] ) ), ), ( ( [ {"user": "barney", "age": 36}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "fred", "age": 30}, ], ["user", "-age"], True, ), list( reversed( [ {"user": "barney", "age": 36}, {"user": "barney", "age": 26}, {"user": "fred", "age": 40}, {"user": "fred", "age": 30}, ] ) ), ), ( ( [ {"user": "barney", "age": 36}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "fred", "age": 30}, ], ["-user", "-age"], True, ), list( reversed( [ {"user": "fred", "age": 40}, {"user": "fred", "age": 30}, {"user": "barney", "age": 36}, {"user": "barney", "age": 26}, ] ) ), ), ( ( { 1: {"user": "barney", "age": 36}, 2: {"user": "fred", "age": 40}, 3: {"user": "barney", "age": 26}, 4: {"user": "fred", "age": 30}, }, ["user", "age"], True, ), list( reversed( [ {"user": "barney", "age": 26}, {"user": "barney", "age": 36}, {"user": "fred", "age": 30}, {"user": "fred", "age": 40}, ] ) ), ), ( ( [ {"user": "barney", "age": 36}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "fred", "age": 30}, ], ["user", "age"], [False, True], True, ), list( reversed( [ {"user": "fred", "age": 30}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "barney", "age": 36}, ] ) ), ), ( ( [ {"user": "barney", "age": 36}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "fred", "age": 30}, ], ["user", "age"], [False], True, ), list( reversed( [ {"user": "fred", "age": 30}, {"user": "fred", "age": 40}, {"user": "barney", "age": 26}, {"user": "barney", "age": 36}, ] ) ), ), ], ) def test_order_by(case, expected): assert _.order_by(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3], lambda item: item % 2), [[1, 3], [2]]), (([1.2, 2.3, 3.4], lambda item: math.floor(item) % 2), [[1.2, 3.4], [2.3]]), ( ( [ {"name": "barney", "age": 36}, {"name": "fred", "age": 40, "blocked": True}, {"name": "pebbles", "age": 1}, ], {"age": 1}, ), [ [{"name": "pebbles", "age": 1}], [{"name": "barney", "age": 36}, {"name": "fred", "age": 40, "blocked": True}], ], ), ( ( [ {"name": "barney", "age": 36}, {"name": "fred", "age": 40, "blocked": True}, {"name": "pebbles", "age": 1}, ], "blocked", ), [ [{"name": "fred", "age": 40, "blocked": True}], [{"name": "barney", "age": 36}, {"name": "pebbles", "age": 1}], ], ), ], ) def test_partition(case, expected): assert _.partition(*case) == expected @parametrize( "case,expected", [ (([{"name": "moe", "age": 40}, {"name": "larry", "age": 50}], "name"), ["moe", "larry"]), ( ( [ {"level1": {"level2": {"level3": {"value": 1}}}}, {"level1": {"level2": {"level3": {"value": 2}}}}, {"level1": {"level2": {"level3": {"value": 3}}}}, {"level1": {"level2": {"level3": {"value": 4}}}}, {"level1": {"level2": {}}}, {}, ], "level1.level2.level3.value", ), [1, 2, 3, 4, None, None], ), ], ) def test_pluck(case, expected): assert _.pluck(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3], None), 1), (([1, 2, 3], helpers.reduce_iteratee0), 6), (({"a": 1, "b": 2, "c": 3}, helpers.reduce_iteratee1, {}), {"a": 3, "b": 6, "c": 9}), ], ) def test_reduce_(case, expected): assert _.reduce_(*case) == expected @parametrize("case,exception", [(([],), TypeError)]) def test_reduce_raise(case, exception): raised = False try: _.reduce_(*case) except exception: raised = True assert raised @parametrize( "case,expected", [ (([1, 2, 3], None), 3), (([1, 2, 3], helpers.reduce_iteratee0), 6), (([[0, 1], [2, 3], [4, 5]], helpers.reduce_right_iteratee0), [4, 5, 2, 3, 0, 1]), (({"a": 1, "b": 2, "c": 3}, helpers.reduce_iteratee1, {}), {"a": 3, "b": 6, "c": 9}), ], ) def test_reduce_right(case, expected): assert _.reduce_right(*case) == expected @parametrize("case,exception", [(([],), TypeError)]) def test_reduce_right_exception(case, exception): raised = False try: _.reduce_right(*case) except exception: raised = True assert raised @parametrize( "case,expected", [ (([1, 2, 3], None), [1, 1]), (([1, 2, 3], helpers.reduce_iteratee0), [3, 6]), (([1, 2, 3, 4, 5], helpers.reduce_iteratee0, 0), [1, 3, 6, 10, 15]), ], ) def test_reductions(case, expected): assert _.reductions(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3], None), [3, 3]), (([1, 2, 3], helpers.reduce_iteratee0), [5, 6]), ( ([[0, 1], [2, 3], [4, 5]], helpers.reduce_right_iteratee0), [[4, 5, 2, 3], [4, 5, 2, 3, 0, 1]], ), ], ) def test_reductions_right(case, expected): assert _.reductions_right(*case) == expected @parametrize( "case,expected", [ (([0, True, False, None, 1, 2, 3],), [0, False, None]), (([1, 2, 3, 4, 5, 6], lambda num: num % 2 == 0), [1, 3, 5]), ( ( [ {"name": "barney", "age": 36, "blocked": False}, {"name": "fred", "age": 40, "blocked": True}, ], "blocked", ), [{"name": "barney", "age": 36, "blocked": False}], ), ( ( [ {"name": "barney", "age": 36, "blocked": False}, {"name": "fred", "age": 40, "blocked": True}, ], {"age": 36}, ), [{"name": "fred", "age": 40, "blocked": True}], ), ], ) def test_reject(case, expected): assert _.reject(*case) == expected @parametrize( "case", [ [1, 2, 3, 4, 5, 6], ], ) def test_sample(case): assert _.sample(case) in case @parametrize( "case", [ ([1, 2, 3, 4, 5, 6], 2), ([1, 2, 3, 4, 5, 6], 3), ([1, 2, 3, 4, 5, 6], 4), ], ) def test_sample_size(case): collection, n = case sample_n = _.sample_size(*case) assert isinstance(sample_n, list) assert len(sample_n) == min(n, len(collection)) assert set(sample_n).issubset(collection) @parametrize("case", [[1, 2, 3, 4, 5, 6], {"one": 1, "two": 2, "three": 3}]) def test_shuffle(case): shuffled = _.shuffle(case) assert len(shuffled) == len(case) if isinstance(case, dict): assert set(shuffled) == set(case.values()) else: assert set(shuffled) == set(case) @parametrize("case", [[1, 2, 3, 4, 5], {"1": 1, "2": 2, "3": 3}]) def test_size(case): assert _.size(case) == len(case) @parametrize( "case,expected", [ (([None, 0, "yes", False], bool), True), (([None, 0, "yes", False],), True), ( ( [ {"name": "apple", "organic": False, "type": "fruit"}, {"name": "carrot", "organic": True, "type": "vegetable"}, ], "organic", ), True, ), ( ( [ {"name": "apple", "organic": False, "type": "fruit"}, {"name": "carrot", "organic": True, "type": "vegetable"}, ], {"type": "meat"}, ), False, ), ], ) def test_some(case, expected): assert _.some(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3], lambda x: math.sin(x)), [3, 1, 2]), ( ( [ {"name": "barney", "age": 36}, {"name": "fred", "age": 40}, {"name": "barney", "age": 26}, {"name": "fred", "age": 30}, ], "age", ), [ {"name": "barney", "age": 26}, {"name": "fred", "age": 30}, {"name": "barney", "age": 36}, {"name": "fred", "age": 40}, ], ), (({"a": 1, "b": 2, "c": 3}, lambda x: math.sin(x)), [3, 1, 2]), (([1, 2, 3], lambda x: math.sin(x), True), [2, 1, 3]), ( ( [ {"name": "barney", "age": 36}, {"name": "fred", "age": 40}, {"name": "barney", "age": 26}, {"name": "fred", "age": 30}, ], "age", True, ), [ {"name": "fred", "age": 40}, {"name": "barney", "age": 36}, {"name": "fred", "age": 30}, {"name": "barney", "age": 26}, ], ), (({"a": 1, "b": 2, "c": 3}, lambda x: math.sin(x), True), [2, 1, 3]), ], ) def test_sort_by(case, expected): assert _.sort_by(*case) == expected @parametrize( "case,expected", [ (("cat",), ["c", "a", "t"]), ((b"cat",), ["c", "a", "t"]), (("cat",), ["c", "a", "t"]), (("cat", False), ["cat"]), ((b"cat", False), [b"cat"]), (("cat", False), ["cat"]), (({"a": 1, "b": 2, "c": 3},), [1, 2, 3]), ], ) def test_to_list(case, expected): assert set(_.to_list(*case)) == set(expected) pydash-8.0.3/tests/test_functions.py000066400000000000000000000251551464745015500176200ustar00rootroot00000000000000import time from unittest import mock import pytest import pydash as _ parametrize = pytest.mark.parametrize @parametrize( "case,expected", [ ((lambda: 3, 2), 3), ((lambda: 3, -1), 3), ], ) def test_after(case, expected): done = _.after(*case) for _x in range(case[1] - 1): ret = done() assert ret is None ret = done() assert ret == expected @parametrize( "case,args,kwargs,expected", [ ((lambda a=0, b=0, c=0, d=0: a + b + c + d, 1), (1, 2, 3, 4), {}, 1), ((lambda a=0, b=0, c=0, d=0: a + b + c + d, 2), (1, 2, 3, 4), {}, 3), ((lambda a=0, b=0, c=0, d=0: a + b + c + d, 3), (1, 2, 3, 4), {}, 6), ((lambda a=0, b=0, c=0, d=0: a + b + c + d, 1), (1, 2, 3, 4), {"d": 10}, 11), ((lambda a=0, b=0, c=0, d=0: a + b + c + d, 2), (1, 2, 3, 4), {"d": 10}, 13), ((lambda a=0, b=0, c=0, d=0: a + b + c + d, 3), (1, 2, 3, 4), {"d": 10}, 16), ((lambda a=0, b=0, c=0, d=0: a + b + c + d, None), (1, 2, 3, 4), {}, 10), ], ) def test_ary(case, args, kwargs, expected): assert _.ary(*case)(*args, **kwargs) == expected @parametrize( "case,expected", [ ((lambda: 3, 2), 3), ((lambda: 3, -1), 3), ], ) def test_before(case, expected): done = _.before(*case) for _x in range(case[1] - 1): ret = done() assert ret == expected ret = done() assert ret is None @parametrize( "case,arg,expected", [ ((_.is_boolean, _.is_empty), [False, True], True), ((_.is_boolean, _.is_empty), [False, None], False), ], ) def test_conjoin(case, arg, expected): assert _.conjoin(*case)(arg) == expected @parametrize( "case,arglist,expected", [ ((lambda a, b, c: [a, b, c],), [(1, 2, 3)], [1, 2, 3]), ((lambda a, b, c: [a, b, c],), [(1, 2), (3,)], [1, 2, 3]), ((lambda a, b, c: [a, b, c],), [(1,), (2,), (3,)], [1, 2, 3]), ((lambda *a: sum(a), 3), [(1, 1, 1)], 3), ((lambda *a: sum(a), 3), [(1,), (1,), (1,)], 3), ], ) def test_curry(case, arglist, expected): curried = _.curry(*case) # Run test twice to verify curried can be reused for _x in range(2): ret = curried for args in arglist: ret = ret(*args) assert ret == expected def test_curry_arity_max_from_func(): def func(data, accum, id): accum[id] = _.reduce_(data, lambda total, n: total + n) return accum ids = [1] data = [1, 2] curried_func_with_data = _.curry(func)(data) result = _.reduce_(ids, curried_func_with_data, {}) assert result == {1: 3} @parametrize( "case,arglist,expected", [ ((lambda a, b, c: [a, b, c],), [(1, 2, 3)], [1, 2, 3]), ((lambda a, b, c: [a, b, c],), [(2, 3), (1,)], [1, 2, 3]), ((lambda a, b, c: [a, b, c],), [(3,), (2,), (1,)], [1, 2, 3]), ((lambda *a: sum(a), 3), [(1, 1, 1)], 3), ((lambda *a: sum(a), 3), [(1,), (1,), (1,)], 3), ], ) def test_curry_right(case, arglist, expected): curried = _.curry_right(*case) # Run test twice to verify curried can be reused for _x in range(2): ret = curried for args in arglist: ret = ret(*args) assert ret == expected def test_debounce(): def func(): return _.now() wait = 250 debounced = _.debounce(func, wait) start = _.now() present = _.now() expected = debounced() while (present - start) <= wait + 100: result = debounced() present = _.now() assert result == expected time.sleep(wait / 1000.0) result = debounced() assert result > expected def test_debounce_max_wait(): def func(): return _.now() wait = 250 max_wait = 300 debounced = _.debounce(func, wait, max_wait=max_wait) start = _.now() present = _.now() expected = debounced() while (present - start) <= (max_wait + 5): result = debounced() present = _.now() assert result > expected @parametrize( "func,wait,args,kwargs,expected", [(lambda a, b, c: (a, b, c), 250, (1, 2), {"c": 3}, (1, 2, 3))], ) def test_delay(mock_sleep, func, wait, args, kwargs, expected): result = _.delay(func, wait, *args, **kwargs) assert result == expected assert mock_sleep.call_args_list == [mock.call(pytest.approx(wait / 1000.0))] @parametrize( "case,arg,expected", [ ((_.is_boolean, _.is_empty), [False, True], True), ((_.is_boolean, _.is_empty), [False, None], True), ((_.is_string, _.is_number), ["one", 1, "two", 2], True), ((_.is_string, _.is_number), [True, False, None, []], False), ], ) def test_disjoin(case, arg, expected): assert _.disjoin(*case)(arg) == expected @parametrize( "case,args,expected", [ (lambda args: args, (1, 2, 3), (3, 2, 1)), (lambda args: [i * 2 for i in args], (1, 2, 3), [6, 4, 2]), ], ) def flip(case, args, expected): func = _.flip(case) assert func(args) == expected @parametrize( "case,args,expected", [ ((lambda x: "!!!" + x + "!!!", lambda x: f"Hi {x}"), ("Bob",), "Hi !!!Bob!!!"), ((lambda x: x + x, lambda x: x * x), (5,), 100), ], ) def test_flow(case, args, expected): assert _.flow(*case)(*args) == expected @parametrize( "case,args,expected", [ ((lambda x: f"Hi {x}", lambda x: "!!!" + x + "!!!"), ("Bob",), "Hi !!!Bob!!!"), ((lambda x: x + x, lambda x: x * x), (5,), 50), ], ) def test_flow_right(case, args, expected): assert _.flow_right(*case)(*args) == expected @parametrize( "func,args,expected", [ (lambda x: x + x, (2, 0), 2), (lambda x: x + x, (2, 1), 4), (lambda x: x + x, (2, 2), 8), (lambda x: x + x, (2, 3), 16), ], ) def test_iterated(func, args, expected): assert _.iterated(func)(*args) == expected @parametrize( "funcs,args,expected", [ ((lambda a: a[0], lambda a: a[-1]), ("Foobar",), ["F", "r"]), ( (lambda a, b: a[0] + b[-1], lambda a, b: a[-1] + b[0]), ("Foobar", "Barbaz"), ["Fz", "rB"], ), ], ) def test_juxtapose(funcs, args, expected): assert _.juxtapose(*funcs)(*args) == expected @parametrize( "func,args", [ (lambda item: item, (True,)), (lambda item: item, (False,)), ], ) def test_negate(func, args): assert _.negate(func)(*args) == (not func(*args)) @parametrize("case,arglist,expected", [(lambda a: a * a, [(2,), (4,)], 4)]) def test_once(case, arglist, expected): fn = _.once(case) for args in arglist: assert fn(*args) == expected @parametrize( "func,transforms,args,expected", [ (lambda a, b: [a, b], [lambda x: x**2, lambda x: x * 2], (5, 10), [25, 20]), (lambda a, b: [a, b], ([lambda x: x**2, lambda x: x * 2],), (5, 10), [25, 20]), ], ) def test_over_args(func, transforms, args, expected): assert _.over_args(func, *transforms)(*args) == expected @parametrize( "case,case_args,case_kwargs,args,expected", [ (lambda a, b, c: a + b + c, ("a", "b"), {}, ("c",), "abc"), (lambda a, b, c: a + b + c, ("a",), {"c": "d"}, ("b",), "abd"), ], ) def test_partial(case, case_args, case_kwargs, args, expected): assert _.partial(case, *case_args, **case_kwargs)(*args) == expected def test_partial_as_iteratee(): func = _.partial(lambda offset, value, *args: value + offset, 5) case = [1, 2, 3] expected = [6, 7, 8] assert _.map_(case, func) == expected @parametrize( "case,case_args,case_kwargs,args,expected", [ (lambda a, b, c: a + b + c, ("a", "b"), {}, ("c",), "cab"), (lambda a, b, c: a + b + c, ("a",), {"c": "d"}, ("b",), "bad"), ], ) def test_partial_right(case, case_args, case_kwargs, args, expected): assert _.partial_right(case, *case_args, **case_kwargs)(*args) == expected @parametrize( "case,args,kwargs,expected", [ ((lambda a, b, c: [a, b, c], 2, 0, 1), ("b", "c", "a"), {}, ["a", "b", "c"]), ((lambda a, b, c: [a, b, c], [2, 0, 1]), ("b", "c", "a"), {}, ["a", "b", "c"]), ((lambda a, b, c: [a, b, c], 2, 1), ("b", "c", "a"), {}, ["a", "c", "b"]), ((lambda a, b, c: [a, b, c], 1), ("b", "c", "a"), {}, ["c", "b", "a"]), ((lambda a, b, c: [a, b, c], 3, 2, 0, 1), ("b", "c", "a"), {}, ["a", "b", "c"]), ], ) def test_rearg(case, args, kwargs, expected): assert _.rearg(*case)(*args, **kwargs) == expected @parametrize( "case,args,expected", [ (lambda *args: args, ["a", "b", "c"], ("a", "b", "c")), (lambda *args: ",".join(args), ["a", "b", "c"], "a,b,c"), (lambda a, b, c: f"{a} {b} {c}", [1, 2, 3], "1 2 3"), ], ) def test_spread(case, args, expected): assert _.spread(case)(args) == expected def test_throttle(): def func(): return _.now() wait = 250 throttled = _.throttle(func, wait) start = _.now() present = _.now() expected = throttled() while (present - start) < (wait - 50): result = throttled() present = _.now() assert result == expected time.sleep(100 / 1000.0) assert throttled() > expected @parametrize( "case,args,kwargs,expected", [ (lambda a=0, b=0, c=0, d=0: a + b + c + d, (1, 2, 3, 4), {}, 1), (lambda a=0, b=0, c=0, d=0: a + b + c + d, (1, 2, 3, 4), {"d": 10}, 11), ], ) def test_unary(case, args, kwargs, expected): assert _.unary(case)(*args, **kwargs) == expected @parametrize( "case,args,expected", [ ( (lambda a: a.strip(), lambda func, text: f"

{func(text)}

"), (" hello world! ",), "

hello world!

", ) ], ) def test_wrap(case, args, expected): assert _.wrap(*case)(*args) == expected def test_flow_argcount(): assert _.flow(lambda x, y: x + y, lambda x: x * 2)._argcount == 2 def test_flow_right_argcount(): assert _.flow_right(lambda x: x * 2, lambda x, y: x + y)._argcount == 2 def test_juxtapose_argcount(): assert _.juxtapose(lambda x, y, z: x + y + z, lambda x, y, z: x * y * z)._argcount == 3 def test_partial_argcount(): assert _.partial(lambda x, y, z: x + y + z, 1, 2)._argcount == 1 def test_partial_right_argcount(): assert _.partial_right(lambda x, y, z: x + y + z, 1, 2)._argcount == 1 def test_curry_argcount(): assert _.curry(lambda x, y, z: x + y + z)(1)._argcount == 2 def test_curry_right_argcount(): assert _.curry_right(lambda x, y, z: x + y + z)(1)._argcount == 2 def test_can_be_used_as_predicate_argcount_is_known(): def is_positive(x: int) -> bool: return x > 0 assert _.filter_([-1, 0, 1], _.negate(is_positive)) == [-1, 0] pydash-8.0.3/tests/test_numerical.py000066400000000000000000000170551464745015500175670ustar00rootroot00000000000000import pytest import pydash as _ parametrize = pytest.mark.parametrize @parametrize( "case,expected", [ ((5, 3), 8), ], ) def test_add(case, expected): assert _.add(*case) == expected @parametrize( "case,expected", [ ((4.006,), 5), ((6.004, 2), 6.01), ((6040, -2), 6100), (([4.006, 6.004], 2), [4.01, 6.01]), ], ) def test_ceil(case, expected): assert _.ceil(*case) == expected @parametrize( "case,expected", [ ((0, -1, 1), 0), ((1, -1, 1), 1), ((-1, -1, 1), -1), ((1, 1), 1), ((5, -1, 1), 1), ((-5, -1, 1), -1), ], ) def test_clamp(case, expected): assert _.clamp(*case) == expected @parametrize( "dividend,divisor,expected", [(10, 5, 2.0), (None, 1, 1.0), (None, None, 1.0), (1.5, 3, 0.5), (-10, 2, -5.0)], ) def test_divide(dividend, divisor, expected): assert _.divide(dividend, divisor) == expected @parametrize( "case,expected", [ ((4.006,), 4), ((0.046, 2), 0.04), ((4060, -2), 4000), (([4.006, 0.046], 2), [4.0, 0.04]), ], ) def test_floor(case, expected): assert _.floor(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3],), 3), (({"a": 3, "b": 2, "c": 1},), 3), ], ) def test_max_(case, expected): assert _.max_(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3],), 3), (({"a": 3, "b": 2, "c": 1},), 3), ((["anaconda", "bison", "camel"], lambda x: len(x)), "anaconda"), ( ( [{"name": "barney", "age": 36}, {"name": "fred", "age": 40}], "age", ), {"name": "fred", "age": 40}, ), ( ([{"name": "barney", "age": 36}, {"name": "fred", "age": 40}], lambda c: c["age"]), {"name": "fred", "age": 40}, ), ], ) def test_max_by(case, expected): assert _.max_by(*case) == expected @parametrize( "collection,default,expected", [([], -1, -1), ([1, 2, 3], -1, 3), ({}, -1, -1), ([], None, None), ({}, None, None)], ) def test_max_default(collection, default, expected): assert _.max_(collection, default=default) == expected @parametrize( "case,expected", [ ([1, 2, 3, 4, 5], 3), ([0, 0.5, 1], 0.5), ], ) def test_mean(case, expected): assert _.mean(case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4, 5],), 3), (([{"b": 4}, {"b": 5}, {"b": 6}], "b"), 5), (([0, 0.5, 1],), 0.5), (({"one": {"a": 1}, "two": {"a": 2}, "three": {"a": 3}}, "a"), 2), ], ) def test_mean_by(case, expected): assert _.mean_by(*case) == expected @parametrize( "case,expected", [ (([0, 0, 0, 0, 5],), 0), (([0, 0, 1, 2, 5],), 1), (([0, 0, 1, 2],), 0.5), (([0, 0, 1, 2, 3, 4],), 1.5), ], ) def test_median(case, expected): assert _.median(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3],), 1), (({"a": 3, "b": 2, "c": 1},), 1), ], ) def test_min_(case, expected): assert _.min_(*case) == expected @parametrize( "case,expected", [ (([1, 2, 3],), 1), (({"a": 3, "b": 2, "c": 1},), 1), ((["anaconda", "bison", "cat"], lambda x: len(x)), "cat"), ( ( [{"name": "barney", "age": 36}, {"name": "fred", "age": 40}], "age", ), {"name": "barney", "age": 36}, ), ( ([{"name": "barney", "age": 36}, {"name": "fred", "age": 40}], lambda c: c["age"]), {"name": "barney", "age": 36}, ), ], ) def test_min_by(case, expected): assert _.min_by(*case) == expected @parametrize( "collection,default,expected", [([], -1, -1), ([1, 2, 3], -1, 1), ({}, -1, -1), ([], None, None), ({}, None, None)], ) def test_min_default(collection, default, expected): assert _.min_(collection, default=default) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4, 5], 3), [2, 3, 4]), (([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3), [2, 3, 4, 5, 6, 7, 8, 9]), (([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 4), [2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5]), ], ) def test_moving_mean(case, expected): assert _.moving_mean(*case) == expected @parametrize( "multiplier,multiplicand,expected", [ (10, 5, 50), (None, 1, 1), (None, None, 1), (1.5, 3, 4.5), (-10, 2, -20), (0, 1, 0), (1, 0, 0), ], ) def test_multiply(multiplier, multiplicand, expected): assert _.multiply(multiplier, multiplicand) == expected @parametrize( "case,expected", [ ((2, 3), 8), ((3, 4), 81), (([1, 2, 3, 4, 5], 2), [1, 4, 9, 16, 25]), (("junk", 2), None), ], ) def test_power(case, expected): assert _.power(*case) == expected @parametrize( "case,expected", [ ((2.51,), 3), ((2.499,), 2), ((2.499, 2), 2.50), (([2.5, 2.499, 2.555], 2), [2.50, 2.50, 2.56]), (("junk",), None), ], ) def test_round_(case, expected): assert _.round_(*case) == expected @parametrize( "case,expected", [ (([2, 5, 10], 1), [0.2, 0.5, 1]), (([1, 2, 5], 1), [0.2, 0.4, 1]), (([1, 2, 5], 5), [1, 2, 5]), (([1, 2, 5],), [0.2, 0.4, 1]), ], ) def test_scale(case, expected): assert _.scale(*case) == expected @parametrize( "case,expected", [ (([0, 0], [5, 5]), 1), (([0, 0], [1, 10]), 10), (([0, 0], [0, 10]), float("inf")), (([0, 0], [10, 0]), 0), ], ) def test_slope(case, expected): assert _.slope(*case) == expected @parametrize( "case,expected", [ ([1, 2, 3], (2.0 / 3.0) ** 0.5), ], ) def test_std_deviation(case, expected): assert _.std_deviation(case) == expected @parametrize( "minuend,subtrahend,expected", [ (10, 4, 6), (-6, -4, -2), (4, -10, 14), (-10, 4, -14), ("10", "5", 5), (2, 0.5, 1.5), (None, None, 0), ], ) def test_subtract(minuend, subtrahend, expected): assert _.subtract(minuend, subtrahend) == expected @parametrize( "minuend,subtrahend", [ ("abs", 4), (4, "abc"), ("abs", "abc"), ([1, 2, 3, 4], 4), ], ) def test_subtract_exception(minuend, subtrahend): with pytest.raises(TypeError): _.subtract(minuend, subtrahend) @parametrize( "case,expected", [ ([1, 2, 3, 4, 5], 15), ([0, 14, 0.2], 14.2), ], ) def test_sum_(case, expected): assert _.sum_(case) == expected @parametrize( "case,expected", [ (([1, 2, 3, 4, 5], lambda a: a * 2), 30), (([{"b": 4}, {"b": 5}, {"b": 6}], "b"), 15), (({"one": {"a": 1}, "two": {"a": 2}, "three": {"a": 3}}, "a"), 6), ], ) def test_sum_by(case, expected): assert _.sum_by(*case) == expected @parametrize( "case,expected", [ ([[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[1, 4, 7], [2, 5, 8], [3, 6, 9]]), ], ) def test_transpose(case, expected): assert _.transpose(case) == expected @parametrize( "case,expected", [ ([1, 2, 3], 2.0 / 3.0), ], ) def test_variance(case, expected): assert _.variance(case) == expected @parametrize( "case,expected", [ (([1, 2, 3],), [-1.225, 0.0, 1.225]), (([{"a": 1}, {"a": 2}, {"a": 3}], "a"), [-1.225, 0.0, 1.225]), ], ) def test_zscore(case, expected): assert _.map_(_.zscore(*case), lambda v: round(v, 3)) == expected pydash-8.0.3/tests/test_objects.py000066400000000000000000000710541464745015500172400ustar00rootroot00000000000000from argparse import Namespace from collections import defaultdict, namedtuple import datetime as dt import pytest import pydash as _ from . import helpers parametrize = pytest.mark.parametrize today = dt.date.today() SomeNamedTuple = namedtuple("SomeNamedTuple", ["a", "b"]) @parametrize( "case,expected", [ (({"name": "fred"}, {"employer": "slate"}), {"name": "fred", "employer": "slate"}), ( ({"name": "fred"}, {"employer": "slate"}, {"employer": "medium"}), {"name": "fred", "employer": "medium"}, ), ], ) def test_assign(case, expected): assert _.assign(*case) == expected @parametrize( "case,expected", [ (({"name": "fred"}, {"age": 26}, lambda obj, src: src + 1), {"name": "fred", "age": 27}), ], ) def test_assign_with(case, expected): assert _.assign_with(*case) == expected @parametrize( "case,expected", [ (({"name": "fred", "greet": lambda: "Hello, world!"},), ["greet"]), ((["fred", lambda: "Hello, world!"],), [1]), ], ) def test_callables(case, expected): assert _.callables(*case) == expected @parametrize( "case", [ {"a": {"d": 1}, "b": {"c": 2}}, [{"a": {"d": 1}, "b": {"c": 2}}], ], ) def test_clone(case): result = _.clone(case) assert result is not case for key, value in _.helpers.iterator(result): assert value is case[key] @parametrize( "case,iteratee,expected", [ ({"a": {"d": 1}, "b": {"c": 2}}, lambda v: v, {"a": {"d": 1}, "b": {"c": 2}}), ( {"a": 1, "b": 2, "c": {"d": 3}}, lambda v, k: v + 2 if isinstance(v, int) and k else None, {"a": 3, "b": 4, "c": {"d": 3}}, ), ], ) def test_clone_with(case, iteratee, expected): result = _.clone_with(case, iteratee) assert result == expected @parametrize( "case", [ {"a": {"d": 1}, "b": {"c": 2}}, {"a": {"d": 1}, "b": {"c": 2}}, [{"a": {"d": 1}, "b": {"c": 2}}], ], ) def test_clone_deep(case): result = _.clone_deep(case) assert result is not case for key, value in _.helpers.iterator(result): assert value is not case[key] @parametrize( "case,iteratee,expected", [ ({"a": {"d": 1}, "b": {"c": 2}}, lambda v: v, {"a": {"d": 1}, "b": {"c": 2}}), ( {"a": 1, "b": 2, "c": {"d": 3}}, lambda v, k: v + 2 if isinstance(v, int) and k else None, {"a": 3, "b": 4, "c": {"d": 5}}, ), (["a"], lambda a: None, ["a"]), ("a", lambda a: None, "a"), ], ) def test_clone_deep_with(case, iteratee, expected): result = _.clone_deep_with(case, iteratee) assert result == expected @parametrize( "case,expected", [ ( ({"name": "barney"}, {"name": "fred", "employer": "slate"}), {"name": "barney", "employer": "slate"}, ), ], ) def test_defaults(case, expected): assert _.defaults(*case) == expected @parametrize( "case,expected", [ ( ({"user": {"name": "barney"}}, {"user": {"name": "fred", "age": 36}}), {"user": {"name": "barney", "age": 36}}, ), (({}, {"a": {"b": ["c"]}}, {"a": {"b": ["d"]}}), {"a": {"b": ["c"]}}), ( ({"a": {"b": [{"d": "e"}]}}, {"a": {"b": [{"d": "f"}]}}, {"a": {"b": [{"g": "h"}]}}), {"a": {"b": [{"d": "e", "g": "h"}]}}, ), ( ( {"a": {"b": [{"d": "e"}]}}, {"a": {"b": [{"d": "f"}, {"g": "h"}]}}, {"a": {"b": [{"i": "j"}]}}, ), {"a": {"b": [{"d": "e", "i": "j"}]}}, ), ( ( {"a": {"b": [{"d": "e"}, {"x": "y"}]}}, {"a": {"b": [{"d": "f"}, {"g": "h"}]}}, {"a": {"b": [{"i": "j"}]}}, ), {"a": {"b": [{"d": "e", "i": "j"}, {"x": "y", "g": "h"}]}}, ), ], ) def test_defaults_deep(case, expected): assert _.defaults_deep(*case) == expected @parametrize( "case,expected", [ ([1, 2, 3], {0: 1, 1: 2, 2: 3}), ({0: 1, 1: 2, 2: 3}, {0: 1, 1: 2, 2: 3}), ], ) def test_to_dict(case, expected): assert _.to_dict(case) == expected @parametrize( "case,expected", [ ({"a": 1, "b": 2, "c": 3}, {1: "a", 2: "b", 3: "c"}), ([1, 2, 3], {1: 0, 2: 1, 3: 2}), ], ) def test_invert(case, expected): assert _.invert(case) == expected @parametrize( "case,expected", [ (([1, 2, 3],), {1: [0], 2: [1], 3: [2]}), ( ({"first": "fred", "second": "barney", "third": "fred"},), {"fred": ["first", "third"], "barney": ["second"]}, ), (({"a": 1, "b": 2}, lambda val: val * 2), {2: ["a"], 4: ["b"]}), ], ) def test_invert_by(case, expected): result = _.invert_by(*case) for key in result: assert set(result[key]) == set(expected[key]) @parametrize( "case,expected", [ (({"a": 1, "b": 2}, "get", "a"), 1), (({"a": {"b": {"c": [1, 2, 3, 3]}}}, "a.b.c.count", 3), 2), (({}, "count"), None), ], ) def test_invoke(case, expected): assert _.invoke(*case) == expected @parametrize( "case,expected", [ # NOTE: The expected is a list of values but find_key returns only a single # value. However, since dicts do not have an order, it's unknown what the # "first" returned value will be. ( ( { "barney": {"age": 36, "blocked": False}, "fred": {"age": 40, "blocked": True}, "pebbles": {"age": 1, "blocked": False}, }, lambda obj: obj["age"] < 40, ), ["pebbles", "barney"], ), ( ( { "barney": {"age": 36, "blocked": False}, "fred": {"age": 40, "blocked": True}, "pebbles": {"age": 1, "blocked": False}, }, ), ["barney", "fred", "pebbles"], ), (([1, 2, 3],), [0]), ], ) def test_find_key(case, expected): assert _.find_key(*case) in expected @parametrize( "case,expected", [ # NOTE: The expected is a list of values but find_last_key returns only a # single value. However, since dicts do not have an order, it's unknown # what the "first" returned value will be. ( ( { "barney": {"age": 36, "blocked": False}, "fred": {"age": 40, "blocked": True}, "pebbles": {"age": 1, "blocked": False}, }, lambda obj: obj["age"] < 40, ), ["pebbles", "barney"], ), ( ( { "barney": {"age": 36, "blocked": False}, "fred": {"age": 40, "blocked": True}, "pebbles": {"age": 1, "blocked": False}, }, ), ["barney", "fred", "pebbles"], ), (([1, 2, 3],), [2]), ], ) def test_find_last_key(case, expected): assert _.find_last_key(*case) in expected @parametrize( "case,expected", [ ( ({"name": "fred", "employer": "slate"}, helpers.for_in_iteratee0), ({"name": "fredfred", "employer": "slateslate"},), ), ( ({"name": "fred", "employer": "slate"}, helpers.for_in_iteratee1), ({"name": "fredfred", "employer": "slate"}, {"name": "fred", "employer": "slateslate"}), ), (([1, 2, 3], helpers.for_in_iteratee2), ([False, True, 3],)), ], ) def test_for_in(case, expected): assert _.for_in(*case) in expected @parametrize( "case,expected", [ ( ({"name": "fred", "employer": "slate"}, helpers.for_in_iteratee0), ({"name": "fredfred", "employer": "slateslate"},), ), ( ({"name": "fred", "employer": "slate"}, helpers.for_in_iteratee1), ({"name": "fredfred", "employer": "slate"}, {"name": "fred", "employer": "slateslate"}), ), (([1, 2, 3], helpers.for_in_iteratee2), ([1, True, "index:2"],)), ], ) def test_for_in_right(case, expected): assert _.for_in_right(*case) in expected @parametrize( "case,expected", [ (({"one": {"two": {"three": 4}}}, "one.two"), {"three": 4}), (({"one": {"two": {"three": 4}}}, "one.two.three"), 4), (({"one": {"two": {"three": 4}}}, ["one", "two"]), {"three": 4}), (({"one": {"two": {"three": 4}}}, ["one", "two", "three"]), 4), (({"one": {"two": {"three": 4}}}, "one.four"), None), (({"one": {"two": {"three": 4}}}, "one.four.three", []), []), (({"one": {"two": {"three": 4}}}, "one.four.0.a", [{"a": 1}]), [{"a": 1}]), (({"one": {"two": {"three": [{"a": 1}]}}}, "one.four.three.0.a", []), []), (({"one": {"two": {"three": 4}}}, "one.four.three"), None), (({"one": {"two": {"three": [{"a": 1}]}}}, "one.four.three.0.a"), None), (({"one": {"two": {"three": 4}}}, "one.four.three", 2), 2), (({"one": {"two": {"three": [{"a": 1}]}}}, "one.four.three.0.a", 2), 2), (({"one": {"two": {"three": 4}}}, "one.four.three", {"test": "value"}), {"test": "value"}), ( ({"one": {"two": {"three": [{"a": 1}]}}}, "one.four.three.0.a", {"test": "value"}), {"test": "value"}, ), (({"one": {"two": {"three": 4}}}, "one.four.three", "haha"), "haha"), (({"one": {"two": {"three": [{"a": 1}]}}}, "one.four.three.0.a", "haha"), "haha"), (({"one": {"two": {"three": 4}}}, "five"), None), (({"one": ["two", {"three": [4, 5]}]}, ["one", 1, "three", 1]), 5), (({"one": ["two", {"three": [4, 5]}]}, "one.[1].three.[1]"), 5), (({"one": ["two", {"three": [4, 5]}]}, "one.1.three.1"), 5), ((["one", {"two": {"three": [4, 5]}}], "[1].two.three.[0]"), 4), ((["one", {"two": {"three": [4, [{"four": [5]}]]}}], "[1].two.three[1][0].four[0]"), 5), ((["one", {"two": {"three": [4, [{"four": [5]}], 6]}}], "[1].two.three[-2][0].four[0]"), 5), ((range(50), "[42]"), 42), ((range(50), "[-1]"), 49), (([[[[[[[[[[42]]]]]]]]]], "[0][0][0][0][0][0][0][0][0][0]"), 42), (([range(50)], "[0][42]"), 42), (({"a": [{"b": range(50)}]}, "a[0].b[42]"), 42), ( ({"lev.el1": {"lev\\el2": {"level3": ["value"]}}}, "lev\\.el1.lev\\\\el2.level3.[0]"), "value", ), (({"one": ["hello", "there"]}, "one.bad.hello", []), []), (({"one": ["hello", None]}, "one.1.hello"), None), ((SomeNamedTuple(1, 2), "a"), 1), ((SomeNamedTuple(1, 2), 0), 1), ((SomeNamedTuple({"c": {"d": 1}}, 2), "a.c.d"), 1), (({}, "update"), None), (([], "extend"), None), (({(1,): {(2,): 3}}, (1,)), {(2,): 3}), (({(1,): {(2,): 3}}, [(1,), (2,)]), 3), (({object: 1}, object), 1), (({object: {object: 1}}, [object, object]), 1), (({1: {"name": "John Doe"}}, "1.name"), "John Doe"), ((helpers.Object(), "[0].field"), None), ], ) def test_get(case, expected): assert _.get(*case) == expected def test_get__should_not_populate_defaultdict(): data = defaultdict(list) _.get(data, "a") assert data == {} @parametrize( "obj,path", [ (helpers.Object(), "__init__.__globals__"), (SomeNamedTuple(1, 2), "__globals__"), (helpers.Object(subobj=helpers.Object()), "subobj.__builtins__"), (helpers.Object(subobj=helpers.Object()), "__builtins__"), ], ) def test_get__raises_for_objects_when_path_restricted(obj, path): with pytest.raises(KeyError, match="access to restricted key"): _.get(obj, path) @parametrize( "obj,path", [ ({}, "__globals__"), ({}, "__builtins__"), ([], "__globals__"), ([], "__builtins__"), ], ) def test_get__does_not_raise_for_dict_or_list_when_path_restricted(obj, path): assert _.get(obj, path) is None @parametrize( "obj,path", [ (helpers.Object(), "__name__"), (helpers.Object(), "foo.__dict__"), (helpers.Object(), "__len__"), ], ) def test_get__does_not_raise_for_objects_when_path_is_unrestricted(obj, path): assert _.get(obj, path) is None @parametrize( "case,expected", [ (({"a": 1, "b": 2, "c": 3}, "b"), True), (([1, 2, 3], 0), True), (([1, 2, 3], 1), True), (([1, 2, 3], 3), False), (({"a": 1, "b": 2, "c": 3}, "b"), True), (([1, 2, 3], 0), True), (([1, 2, 3], 1), True), (([1, 2, 3], 3), False), (({"one": {"two": {"three": 4}}}, "one.two"), True), (({"one": {"two": {"three": 4}}}, "one.two.three"), True), (({"one": {"two": {"three": 4}}}, ["one", "two"]), True), (({"one": {"two": {"three": 4}}}, ["one", "two", "three"]), True), (({"one": {"two": {"three": 4}}}, "one.four"), False), (({"one": {"two": {"three": 4}}}, "five"), False), (({"one": {"two": {"three": 4}}}, "one.four.three"), False), (({"one": {"two": {"three": [{"a": 1}]}}}, "one.four.three.0.a"), False), (({"one": ["two", {"three": [4, 5]}]}, ["one", 1, "three", 1]), True), (({"one": ["two", {"three": [4, 5]}]}, "one.[1].three.[1]"), True), (({"one": ["two", {"three": [4, 5]}]}, "one.1.three.1"), True), ((["one", {"two": {"three": [4, 5]}}], "[1].two.three.[0]"), True), (({"lev.el1": {r"lev\el2": {"level3": ["value"]}}}, r"lev\.el1.lev\\el2.level3.[0]"), True), ], ) def test_has(case, expected): assert _.has(*case) == expected def test_has__should_not_populate_defaultdict(): data = defaultdict(list) _.has(data, "a") assert data == {} @parametrize("case,expected", [({"a": 1, "b": 2, "c": 3}, ["a", "b", "c"]), ([1, 2, 3], [0, 1, 2])]) def test_keys(case, expected): assert set(_.keys(case)) == set(expected) @parametrize( "case,expected", [ (({"a": 1, "b": 2, "c": 3}, lambda num: num * 3), {"a": 3, "b": 6, "c": 9}), ( ( {"fred": {"name": "fred", "age": 40}, "pebbles": {"name": "pebbles", "age": 1}}, "age", ), {"fred": 40, "pebbles": 1}, ), ], ) def test_map_values(case, expected): assert _.map_values(*case) == expected @parametrize( "case,expected", [ ( ( { "level1": { "value": "value 1", "level2": {"value": "value 2", "level3": {"value": "value 3"}}, } }, lambda value, property_path: ".".join(property_path) + "==" + value, ), { "level1": { "value": "level1.value==value 1", "level2": { "value": "level1.level2.value==value 2", "level3": {"value": "level1.level2.level3.value==value 3"}, }, } }, ), ( ( [["value 1", [["value 2", ["value 3"]]]]], lambda value, property_path: (_.join(property_path, ".") + "==" + value), ), [["0.0==value 1", [["0.1.0.0==value 2", ["0.1.0.1.0==value 3"]]]]], ), ], ) def test_map_values_deep(case, expected): assert _.map_values_deep(*case) == expected @parametrize( "case,expected", [ ( ( {"characters": [{"name": "barney"}, {"name": "fred"}]}, {"characters": [{"age": 36}, {"age": 40}]}, ), {"characters": [{"name": "barney", "age": 36}, {"name": "fred", "age": 40}]}, ), ( ( {"characters": [{"name": "barney"}, {"name": "fred"}, {}]}, {"characters": [{"age": 36}, {"age": 40}]}, ), {"characters": [{"name": "barney", "age": 36}, {"name": "fred", "age": 40}, {}]}, ), ( ( {"characters": [{"name": "barney"}, {"name": "fred"}]}, {"characters": [{"age": 36}, {"age": 40}, {}]}, ), {"characters": [{"name": "barney", "age": 36}, {"name": "fred", "age": 40}, {}]}, ), ( ( {"characters": [{"name": "barney"}, {"name": "fred"}]}, {"characters": [{"age": 36}, {"age": 40}]}, {"characters": [{"score": 5}, {"score": 7}]}, ), { "characters": [ {"name": "barney", "age": 36, "score": 5}, {"name": "fred", "age": 40, "score": 7}, ] }, ), ( ( {"characters": {"barney": {"age": 36}, "fred": {"score": 7}}}, {"characters": {"barney": {"score": 5}, "fred": {"age": 40}}}, ), {"characters": {"barney": {"age": 36, "score": 5}, "fred": {"age": 40, "score": 7}}}, ), ( ( {"characters": {"barney": {"age": 36}, "fred": {"score": 7}}}, {"characters": {"barney": [5], "fred": 7}}, ), {"characters": {"barney": [5], "fred": 7}}, ), ( ( {"characters": {"barney": {"age": 36}, "fred": {"score": 7}}}, {"foo": {"barney": [5], "fred": 7}}, ), { "characters": {"barney": {"age": 36}, "fred": {"score": 7}}, "foo": {"barney": [5], "fred": 7}, }, ), (({"foo": {"bar": 1}}, {"foo": {}}), {"foo": {"bar": 1}}), (({},), {}), (([],), []), ((None,), None), ((None, {"a": 1}), None), ((None, None, None, {"a": 1}), None), (({"a": 1}, None), {"a": 1}), (({"a": 1}, None, None, None, {"b": 2}), {"a": 1, "b": 2}), (({"a": None}, None, None, None, {"b": None}), {"a": None, "b": None}), ], ) def test_merge(case, expected): assert _.merge(*case) == expected def test_merge_no_link_dict(): case1 = {"foo": {"bar": None}} case2 = {"foo": {"bar": False}} result = _.merge({}, case1, case2) result["foo"]["bar"] = True assert case1 == {"foo": {"bar": None}} assert case2 == {"foo": {"bar": False}} def test_merge_no_link_list(): case = {"foo": [{}]} result = _.merge({}, case) result["foo"][0]["bar"] = True assert case == {"foo": [{}]} @parametrize( "case,expected", [ ( ( {"fruits": ["apple"], "others": {"vegetables": ["beet"]}}, {"fruits": ["banana"], "others": {"vegetables": ["carrot"]}}, lambda a, b: a + b if isinstance(a, list) else None, ), {"fruits": ["apple", "banana"], "others": {"vegetables": ["beet", "carrot"]}}, ), ], ) def test_merge_with(case, expected): assert _.merge_with(*case) == expected @parametrize( "case,expected", [ (({"a": 1, "b": 2, "c": 3}, "a"), {"b": 2, "c": 3}), (({"a": 1, "b": 2, "c": 3}, "a", "b"), {"c": 3}), (({"a": 1, "b": 2, "c": 3}, ["a", "b"]), {"c": 3}), (({"a": 1, "b": 2, "c": 3}, ["a"], ["b"]), {"c": 3}), (([1, 2, 3],), {0: 1, 1: 2, 2: 3}), (([1, 2, 3], 0), {1: 2, 2: 3}), (([1, 2, 3], 0, 1), {2: 3}), (({"a": {"b": {"c": "d"}}, "e": "f"}, "a.b.c", "e"), {"a": {"b": {}}}), (({"a": [{"b": 1, "c": 2}, {"d": 3}]}, "a[0].c", "a[1].d"), {"a": [{"b": 1}, {}]}), ], ) def test_omit(case, expected): assert _.omit(*case) == expected @parametrize( "case,expected", [ (({"a": 1, "b": 2, "c": 3}, ["a", "b"]), {"c": 3}), (({"a": 1, "b": 2, "c": 3}, lambda value, key: key == "a"), {"b": 2, "c": 3}), (([1, 2, 3],), {0: 1, 1: 2, 2: 3}), (([1, 2, 3], [0]), {1: 2, 2: 3}), ], ) def test_omit_by(case, expected): assert _.omit_by(*case) == expected @parametrize( "case,expected", [ ((1,), 1), ((1.0,), 1), (("1",), 1), (("00001",), 1), ((13, 8), 11), (("0A",), 10), (("08",), 8), (("10",), 16), (("10", 10), 10), (("xyz",), None), ], ) def test_parse_int(case, expected): assert _.parse_int(*case) == expected @parametrize( "case,expected", [ (({"a": 1, "b": 2, "c": 3}, "a"), {"a": 1}), (({"a": 1, "b": 2, "c": 3}, "a", "b"), {"a": 1, "b": 2}), (({"a": 1, "b": 2, "c": 3}, ["a", "b"]), {"a": 1, "b": 2}), (({"a": 1, "b": 2, "c": 3}, ["a"], ["b"]), {"a": 1, "b": 2}), (([1, 2, 3],), {}), (([1, 2, 3], 0), {0: 1}), ((helpers.Object(a=1, b=2, c=3), "a"), {"a": 1}), ((helpers.ItemsObject({"a": 1, "b": 2, "c": 3}), "a"), {"a": 1}), ((helpers.IteritemsObject({"a": 1, "b": 2, "c": 3}), "a"), {"a": 1}), (({"a": {"b": 1, "c": 2, "d": 3}}, "a.b", "a.d"), {"a": {"b": 1, "d": 3}}), ( ({"a": [{"b": 1}, {"c": 2}, {"d": 3}]}, "a[0]", "a[2]"), {"a": [{"b": 1}, None, {"d": 3}]}, ), ], ) def test_pick(case, expected): assert _.pick(*case) == expected @parametrize( "case,expected", [ (({"a": 1, "b": 2, "c": 3}, ["a", "b"]), {"a": 1, "b": 2}), (({"a": 1, "b": 2, "c": 3}, lambda value, key: key in ["a"]), {"a": 1}), (([1, 2, 3],), {0: 1, 1: 2, 2: 3}), (([1, 2, 3], [0]), {0: 1}), ((helpers.Object(a=1, b=2, c=3), "a"), {"a": 1}), ((helpers.ItemsObject({"a": 1, "b": 2, "c": 3}), "a"), {"a": 1}), ((helpers.IteritemsObject({"a": 1, "b": 2, "c": 3}), "a"), {"a": 1}), ], ) def test_pick_by(case, expected): assert _.pick_by(*case) == expected @parametrize( "case,expected", [ (({"a": 1, "b": 2}, {"a": "A", "b": "B"}), {"A": 1, "B": 2}), (({"a": 1, "b": 2}, {"a": "A"}), {"A": 1, "b": 2}), (({"a": 1, "b": 2}, {"c": "C", "b": "B"}), {"a": 1, "B": 2}), ], ) def test_rename_keys(case, expected): assert _.rename_keys(*case) == expected @parametrize( "case,expected", [ (({}, ["one", "two", "three", "four"], 1), {"one": {"two": {"three": {"four": 1}}}}), (({}, "one.two.three.four", 1), {"one": {"two": {"three": {"four": 1}}}}), ( ({"one": {"two": {}, "three": {}}}, ["one", "two", "three", "four"], 1), {"one": {"two": {"three": {"four": 1}}, "three": {}}}, ), ( ({"one": {"two": {}, "three": {}}}, "one.two.three.four", 1), {"one": {"two": {"three": {"four": 1}}, "three": {}}}, ), (({}, "one", 1), {"one": 1}), (([], [0, 0, 0], 1), [[[1]]]), (([], "[0].[0].[0]", 1), [[[1]]]), (([1, 2, [3, 4, [5, 6]]], [2, 2, 1], 7), [1, 2, [3, 4, [5, 7]]]), (([1, 2, [3, 4, [5, 6]]], "[2].[2].[1]", 7), [1, 2, [3, 4, [5, 7]]]), (([1, 2, [3, 4, [5, 6]]], [2, 2, 2], 7), [1, 2, [3, 4, [5, 6, 7]]]), (([1, 2, [3, 4, [5, 6]]], "[2].[2].[2]", 7), [1, 2, [3, 4, [5, 6, 7]]]), (({}, "a.b[0].c", 1), {"a": {"b": [{"c": 1}]}}), (({}, "a.b[0][0].c", 1), {"a": {"b": [[{"c": 1}]]}}), (({}, "a", tuple), {"a": tuple}), (({}, r"a.b\.c.d", 1), {"a": {"b.c": {"d": 1}}}), ], ) def test_set_(case, expected): assert _.set_(*case) == expected def test_set_on_class_works_the_same_with_string_and_list(): class A: def __init__(self): self.x = {} a1 = A() a2 = A() assert _.set_(a1, "x.a.b", 1).x == _.set_(a2, ["x", "a", "b"], 1).x @parametrize( "case,expected", [ (({}, "[0][1]", "a", lambda: {}), {0: {1: "a"}}), (({}, "[0][1]", dict, lambda: {}), {0: {1: dict}}), ((Namespace(), "a.b", 5, lambda: Namespace()), Namespace(a=Namespace(b=5))), ( (Namespace(a=Namespace(b=5)), "a.c.d", 55, lambda: Namespace()), Namespace(a=Namespace(b=5, c=Namespace(d=55))), ), ], ) def test_set_with(case, expected): assert _.set_with(*case) == expected @parametrize( "case,expected", [ (("1",), True), (("0",), False), (("true",), True), (("True",), True), (("false",), False), (("False",), False), (("",), None), (("a",), None), ((0,), False), ((1,), True), (([],), False), ((True,), True), ((False,), False), ((None,), False), (("Truthy", ["truthy"]), True), (("Falsey", [], ["falsey"]), False), (("foobar", ["^[f]"]), True), (("ofobar", ["^[f]"]), None), (("foobar", [], [".+[r]$"]), False), (("foobra", [], [".+[r]$"]), None), ], ) def test_to_boolean(case, expected): assert _.to_boolean(*case) is expected @parametrize( "case,expected", [ (1.4, 1), (1.9, 1), ("1.4", 1), ("1.9", 1), ("foo", 0), (None, 0), (True, 1), (False, 0), ({}, 0), ([], 0), ((), 0), ], ) def test_to_integer(case, expected): assert _.to_integer(case) == expected @parametrize( "case,expected", [(("2.556",), 3.0), (("2.556", 1), 2.6), (("999.999", -1), 990.0), (("foo",), None)], ) def test_to_number(case, expected): assert _.to_number(*case) == expected @parametrize( "case,expected", [ ({"a": 1, "b": 2, "c": 3}, [("a", 1), ("b", 2), ("c", 3)]), ([1, 2, 3], [(0, 1), (1, 2), (2, 3)]), ], ) def test_to_pairs(case, expected): assert dict(_.to_pairs(case)) == dict(expected) @parametrize( "case,expected", [ (1, "1"), (1.25, "1.25"), (True, "True"), ([1], "[1]"), ("d\xc3\xa9j\xc3\xa0 vu", "d\xc3\xa9j\xc3\xa0 vu"), ("", ""), (None, ""), (today, str(today)), ], ) def test_to_string(case, expected): assert _.to_string(case) == expected @parametrize( "case,expected", [ ( ([1, 2, 3, 4, 5], lambda acc, value, key: acc.append((key, value))), [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], ), (([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], helpers.transform_iteratee0), [1, 9, 25]), (([1, 2, 3, 4, 5],), []), ], ) def test_transform(case, expected): assert _.transform(*case) == expected @parametrize( "case,expected", [ ( ( {"rome": "Republic"}, ["rome"], lambda value: "Empire" if value == "Republic" else value, ), {"rome": "Empire"}, ), (({}, ["rome"], lambda value: "Empire" if value == "Republic" else value), {"rome": None}), ( ( {"earth": {"rome": "Republic"}}, ["earth", "rome"], lambda value: "Empire" if value == "Republic" else value, ), {"earth": {"rome": "Empire"}}, ), ], ) def test_update(case, expected): assert _.update(*case) == expected @parametrize( "case,expected", [ (({}, "[0][1]", _.constant("a"), lambda *_: {}), {0: {1: "a"}}), (({}, "[0][1]", _.constant("a"), {}), {0: {1: "a"}}), (({}, "[0][1]", "a", {}), {0: {1: "a"}}), ], ) def test_update_with(case, expected): assert _.update_with(*case) == expected @parametrize( "obj,path,expected,new_obj", [ ({"a": [{"b": {"c": 7}}]}, "a.0.b.c", True, {"a": [{"b": {}}]}), ([1, 2, 3], "1", True, [1, 3]), ([1, 2, 3], 1, True, [1, 3]), ([1, [2, 3]], [1, 1], True, [1, [2]]), ([1, 2, 3], "[0][0]", False, [1, 2, 3]), ([1, 2, 3], "[0][0][0]", False, [1, 2, 3]), ], ) def test_unset(obj, path, expected, new_obj): assert _.unset(obj, path) == expected assert obj == new_obj @parametrize("case,expected", [({"a": 1, "b": 2, "c": 3}, [1, 2, 3]), ([1, 2, 3], [1, 2, 3])]) def test_values(case, expected): assert set(_.values(case)) == set(expected) @parametrize("case,expected", [((5, lambda x: x * 2), 10)]) def test_apply(case, expected): assert _.apply(*case) == expected @parametrize( "case,expected", [((5, lambda x: x * 2, lambda x: x == 5), 10), ((5, lambda x: x * 2, lambda x: x == 10), 5)], ) def test_apply_if(case, expected): assert _.apply_if(*case) == expected @parametrize("case,expected", [((5, lambda x: x * 2), 10), ((None, lambda x: x * 2), None)]) def test_apply_if_not_none(case, expected): assert _.apply_if_not_none(*case) == expected @parametrize( "case,expected", [((5, lambda x: x * 2, [ValueError]), 10), ((5, lambda x: x / 0, [ZeroDivisionError]), 5)], ) def test_apply_catch(case, expected): assert _.apply_catch(*case) == expected pydash-8.0.3/tests/test_predicates.py000066400000000000000000000317441464745015500177340ustar00rootroot00000000000000import datetime import decimal import operator import re import pytest import pydash as _ from . import helpers parametrize = pytest.mark.parametrize @parametrize( "value,other,expected", [("a", "a", True), (None, None, True), (None, "", False), (1, str(1), False)], ) def test_eq(value, other, expected): assert _.eq(value, other) == expected assert _.eq_cmp(other)(value) == expected @parametrize( "case,expected", [ ((2, 1), True), ((2.5, 2.3), True), ((1, 2), False), ((2.3, 2.5), False), ((1, 1), False), ], ) def test_gt(case, expected): assert _.gt(*case) == expected assert _.gt_cmp(case[1])(case[0]) == expected @parametrize( "case,expected", [ ((2, 1), True), ((2.5, 2.3), True), ((1, 2), False), ((2.3, 2.5), False), ((1, 1), True), ], ) def test_gte(case, expected): assert _.gte(*case) == expected assert _.gte_cmp(case[1])(case[0]) == expected @parametrize( "case,expected", [ ((2, 1), False), ((2.5, 2.3), False), ((1, 2), True), ((2.3, 2.5), True), ((1, 1), False), ], ) def test_lt(case, expected): assert _.lt(*case) == expected assert _.lt_cmp(case[1])(case[0]) == expected @parametrize( "case,expected", [ ((2, 1), False), ((2.5, 2.3), False), ((1, 2), True), ((2.3, 2.5), True), ((1, 1), True), ], ) def test_lte(case, expected): assert _.lte(*case) == expected assert _.lte_cmp(case[1])(case[0]) == expected @parametrize( "case,expected", [ ((3, 2, 4), True), ((4, 8), True), ((4, 2), False), ((2, 2), False), ((1.2, 2), True), ((5.2, 4), False), (("", 5), False), ((2, ""), False), ((-1, -2, ""), True), ], ) def test_in_range(case, expected): assert _.in_range(*case) == expected assert _.in_range_cmp(*case[1:])(case[0]) == expected @parametrize( "case,expected", [ ([], True), ({}, True), ("", True), (0, False), (True, False), ], ) def test_is_associative(case, expected): assert _.is_associative(case) == expected @parametrize( "case,expected", [ ("", True), ("\n", True), (" ", True), ("a", False), ], ) def test_is_blank(case, expected): assert _.is_blank(case) == expected @parametrize( "case,expected", [ (True, True), (False, True), (0, False), ("", False), ], ) def test_is_boolean(case, expected): assert _.is_boolean(case) == expected @parametrize( "case,expected", [ (list, True), (dict, True), ([].append, True), ({}.update, True), (1, True), (True, True), (None, True), (datetime, False), (parametrize, False), ("a", False), (lambda: None, False), ], ) def test_is_builtin(case, expected): assert _.is_builtin(case) == expected @parametrize( "case,expected", [ (datetime.date.today(), True), (datetime.datetime.today(), True), ("2014-01-01", False), ("2014-01-01T00:00:00", False), (1, False), ], ) def test_is_date(case, expected): assert _.is_date(case) == expected @parametrize( "case,expected", [ ([3, 2, 1], True), ([6, 5, 5], True), (0, True), ([5, 4, 4, 3, 1], True), ([5, 4, 4, 5, 4, 3], False), ], ) def test_is_decreasing(case, expected): assert _.is_decreasing(case) == expected @parametrize( "case,expected", [({}, True), ([], False), (1, False), ("a", False), (iter([]), False), (iter({}), False)], ) def test_is_dict(case, expected): assert _.is_dict(case) == expected @parametrize( "case,expected", [ (True, True), (0, True), (123.45, True), ("", True), ({}, True), ([], True), (False, True), (None, True), ({"a": 1}, False), ([1], False), ("Hello", False), (["Hello", "World"], False), ], ) def test_is_empty(case, expected): assert _.is_empty(case) == expected @parametrize( "case,expected", [ ((1, 1), True), ((1, 2), False), (("1", "1"), True), (("1", "2"), False), (([1], {"a": 1}), False), (({"a": 1}, {"a": 1}), True), (({"a": 1}, {"b": 1}), False), (([1, 2, 3], [1, 2, 3]), True), (([1, 2, 3], [1, 2]), False), (([1, 2], [1, 2, 3]), False), (([1, 2, 3], [1, 2]), False), (([1, 2], [1, 2, 3]), False), ((["hello", "goodbye"], ["hi", "goodbye"]), False), ], ) def test_is_equal(case, expected): assert _.is_equal(*case) == expected assert _.is_equal_cmp(*case[1:])(case[0]) == expected @parametrize( "case,expected", [ ((1, 1, None), True), ((1, 2, None), False), (("1", "1", None), True), (("1", "2", None), False), (([1], {"a": 1}, None), False), (([1], {"a": 1}, lambda a, b: True), True), (({"a": 1}, {"a": 1}, None), True), (({"a": 1}, {"b": 1}, lambda a, b: None if isinstance(a, dict) else True), False), (([1, 2, 3], [1, 2, 3], None), True), (([1, 2, 3], [1, 2], None), False), (([1, 2], [1, 2, 3], None), False), (([1, 2, 3], [1, 2], lambda a, b: None if isinstance(a, list) else True), False), (([1, 2], [1, 2, 3], lambda a, b: None if isinstance(a, list) else True), False), ((["hello", "goodbye"], ["hi", "goodbye"], helpers.is_equal_iteratee0), True), ], ) def test_is_equal_with(case, expected): assert _.is_equal_with(*case) == expected assert _.is_equal_with_cmp(*case[1:])(case[0]) == expected @parametrize("case,expected", [(Exception(), True), ({}, False), ([], False)]) def test_is_error(case, expected): assert _.is_error(case) == expected @parametrize( "case,expected", [ (2, True), (16, True), (1, False), (3.0, False), (3.5, False), (None, False), ], ) def test_is_even(case, expected): assert _.is_even(case) == expected @parametrize( "case,expected", [ (1.0, True), (3.245, True), (1, False), (True, False), ("", False), ], ) def test_is_float(case, expected): assert _.is_float(case) == expected @parametrize( "case,expected", [ (lambda x: x + 1, True), ("Hello, world!", False), ], ) def test_is_function(case, expected): assert _.is_function(case) == expected @parametrize( "case,expected", [ ([1, 2, 3], True), ([5, 5, 6], True), (0, True), ([1, 2, 3, 4, 4, 5], True), ([1, 2, 3, 4, 4, 3], False), ], ) def test_is_increasing(case, expected): assert _.is_increasing(case) == expected @parametrize( "case,expected", [ ([], True), ("", True), ((), True), ({}, False), (1, False), (None, False), ], ) def test_is_indexed(case, expected): assert _.is_indexed(case) == expected @parametrize( "case,expected", [ ((1, int), True), ((1.0, float), True), (([], (list, str)), True), (([], dict), False), ((True, float), False), ], ) def test_is_instance_of(case, expected): assert _.is_instance_of(*case) == expected assert _.is_instance_of_cmp(case[1])(case[0]) == expected @parametrize( "case,expected", [ (1, True), (2, True), (1.0, False), (True, False), (None, False), ([], False), ], ) def test_is_integer(case, expected): assert _.is_integer(case) == expected @parametrize( "case,expected", [ ([], True), ({}, True), ((), True), ("a", True), (5, False), (None, False), ], ) def test_is_iterable(case, expected): assert _.is_iterable(case) == expected @parametrize( "case,expected", [ ('{"one": 1, "two": {"three": "3"}, "four": [4]}', True), ({"one": 1, "two": {"three": "3"}, "four": [4]}, False), ("", False), (1, False), (True, False), ], ) def test_is_json(case, expected): assert _.is_json(case) == expected @parametrize( "case,expected", [ ([1, 2, 3], True), ({}, False), ], ) def test_is_list(case, expected): assert _.is_list(case) == expected @parametrize( "case,expected", [ (({"name": "fred", "age": 40}, {"age": 40}), True), (({"name": "fred", "age": 40}, {"age": 40, "active": True}), False), (([1, 2, 3], [1, 2]), True), (([1, 2, 3], [1, 2, 3, 4]), False), (({}, {}), True), (({"a": 1}, {}), True), (([], []), True), (([1], []), True), ], ) def test_is_match(case, expected): assert _.is_match(*case) == expected assert _.is_match_cmp(case[1])(case[0]) == expected @parametrize( "case,expected", [ (([1, 2], [2, 4], lambda a, b: None if isinstance(a, list) else b == a + a), True), (([1, 2], [2, 4], lambda a, b, key: a == b == key), False), (([0, 1], [0, 1], lambda a, b, key: a == b == key), True), ], ) def test_is_match_with(case, expected): assert _.is_match_with(*case) == expected assert _.is_match_with_cmp(*case[1:])(case[0]) == expected @parametrize( "case,expected", [ (([1, 2, 3], operator.le), True), (([3, 2, 1], operator.ge), True), (([1, 1, 2], operator.lt), False), (([3, 3, 2], operator.gt), False), ], ) def test_is_monotone(case, expected): assert _.is_monotone(*case) == expected assert _.is_monotone_cmp(case[1])(case[0]) == expected @parametrize( "case,expected", [ (0, False), (123456789123456789123456789, False), (123.45, False), (decimal.Decimal(1), False), ("1", True), ], ) def test_is_nan(case, expected): assert _.is_nan(case) == expected @parametrize( "case,expected", [ (-1, True), (-1.25, True), (-0.1, True), (0, False), (1, False), (True, False), (False, False), ], ) def test_is_negative(case, expected): assert _.is_negative(case) == expected @parametrize( "case,expected", [ (None, True), (0, False), ], ) def test_is_none(case, expected): assert _.is_none(case) == expected @parametrize( "case,expected", [ (0, True), (123456789123456789123456789, True), (123.45, True), (decimal.Decimal(1), True), ("1", False), (True, False), (False, False), ], ) def test_is_number(case, expected): assert _.is_number(case) == expected @parametrize( "case,expected", [({}, True), ([], True), (1, False), ("a", False), (iter([]), False), (iter({}), False)], ) def test_is_object(case, expected): assert _.is_object(case) == expected @parametrize( "case,expected", [ (1, True), (3.0, True), (3.5, True), (2, False), (16, False), (None, False), ], ) def test_is_odd(case, expected): assert _.is_odd(case) == expected @parametrize( "case,expected", [(1, True), (1.25, True), (0.1, True), (-1, False), (0, False), (True, False), (False, False)], ) def test_is_positive(case, expected): assert _.is_positive(case) == expected @parametrize( "case,expected", [ (re.compile(""), True), ("", False), ("Hello, world!", False), (1, False), ({}, False), ([], False), (None, False), ], ) def test_is_reg_exp(case, expected): assert _.is_reg_exp(case) == expected @parametrize("case,expected", [(set(), True), ([1, 2, 3], False)]) def test_is_set(case, expected): assert _.is_set(case) == expected @parametrize( "case,expected", [ ([1, 2, 3], False), ([3, 2, 1], True), ([1, 1, 2], False), ([3, 3, 2], False), ], ) def test_is_strictly_decreasing(case, expected): assert _.is_strictly_decreasing(case) == expected @parametrize( "case,expected", [ ([1, 2, 3], True), ([3, 2, 1], False), ([1, 1, 2], False), ([3, 3, 2], False), ], ) def test_is_strictly_increasing(case, expected): assert _.is_strictly_increasing(case) == expected @parametrize( "case,expected", [("", True), ("Hello, world!", True), (1, False), ({}, False), ([], False), (None, False)], ) def test_is_string(case, expected): assert _.is_string(case) == expected @parametrize( "case,expected", [ ((), True), ([], False), ({}, False), ], ) def test_is_tuple(case, expected): assert _.is_tuple(case) == expected @parametrize( "case,expected", [ (0, True), (0.0, False), ("", False), (True, False), (False, False), ], ) def test_is_zero(case, expected): assert _.is_zero(case) == expected pydash-8.0.3/tests/test_strings.py000066400000000000000000001011501464745015500172670ustar00rootroot00000000000000import re from urllib.parse import parse_qsl, urlsplit import pytest import pydash as _ parametrize = pytest.mark.parametrize @parametrize( "case,expected", [ ("foo bar baz", "fooBarBaz"), ("foo bar baz", "fooBarBaz"), ("foo__bar_baz", "fooBarBaz"), ("foo-_bar-_-baz", "fooBarBaz"), ("foo!bar,baz", "fooBarBaz"), ("--foo.bar;baz", "fooBarBaz"), ("Foo!#Bar's", "fooBars"), ("你好,世界", "你好世界"), ("你好 世界", "你好世界"), ("你好(世界)", "你好世界"), ("你好(世界)", "你好(世界)"), ("你好,世界", "你好,世界"), ("Σὲ γνωρίζω ἀπὸ τὴν κόψη", "σὲΓνωρίζωἈπὸΤὴνΚόψη"), ("แผ่นดินฮั่นเสื่อมโทรมแสนสังเวช", "แผ่นดินฮั่นเสื่อมโทรมแสนสังเวช"), ("いろはにほへど ちりぬるを", "いろはにほへどちりぬるを"), ( "나는 유리를 먹을 수 있어요. 그래도 아프지 않아요", "나는유리를먹을수있어요그래도아프지않아요", ), ("J'peux manger d'la vitre, ça m'fa pas mal.", "jpeuxMangerDlaVitreCaMfaPasMal"), ("e peux manger du verre, ça ne me fait pas mal.", "ePeuxMangerDuVerreCaNeMeFaitPasMal"), (8, "8"), ("", ""), (None, ""), ], ) def test_camel_case(case, expected): assert _.camel_case(case) == expected @parametrize( "case,expected", [ (("foo",), "Foo"), (("foo bar",), "Foo bar"), (("fOO bar",), "Foo bar"), (("foo Bar",), "Foo bar"), (("foo", False), "Foo"), (("foo bar", False), "Foo bar"), (("fOO bar", False), "FOO bar"), (("foo Bar", False), "Foo Bar"), ((8,), "8"), ((" ",), " "), (("",), ""), ((None,), ""), ], ) def test_capitalize(case, expected): assert _.capitalize(*case) == expected @parametrize( "case,expected", [ (("foobarbaz", 3), ["foo", "bar", "baz"]), (("foobarbazaa", 3), ["foo", "bar", "baz", "aa"]), (("foo", 4), ["foo"]), (("foo", 0), ["foo"]), (("", 3), []), (("foo", -2), ["foo"]), ((None, 0), []), ((None, 1), []), ((None, -1), []), ], ) def test_chop(case, expected): assert _.chop(*case) == expected @parametrize( "case,expected", [ (("foobarbaz", 3), ["foo", "bar", "baz"]), (("foobarbazaa", 3), ["fo", "oba", "rba", "zaa"]), (("foo", 4), ["foo"]), (("foo", 0), ["foo"]), (("", 3), []), (("foo", -2), ["foo"]), ((None, 0), []), ((None, 1), []), ((None, -1), []), ], ) def test_chop_right(case, expected): assert _.chop_right(*case) == expected @parametrize( "case,expected", [ (" foo bar", "foo bar"), (" foo bar", "foo bar"), (" foo bar ", "foo bar"), ("", ""), (None, ""), ], ) def test_clean(case, expected): assert _.clean(case) == expected @parametrize( "case,expected", [ ("foobar", ["f", "o", "o", "b", "a", "r"]), ("", []), (5, ["5"]), (-5.6, ["-", "5", ".", "6"]), (None, []), ], ) def test_chars(case, expected): assert _.chars(case) == expected @parametrize( "case,expected", [ (("foobar", "o"), 2), (("foobar", "oo"), 1), (("foobar", "ooo"), 0), (("", "ooo"), 0), (("", None), 0), ((None, "ooo"), 0), ((None, None), 0), ((5, None), 0), ((5, 5), 1), ((5.5, 5), 2), ((5.5, "5."), 1), ((65.5, "5."), 1), ((654.5, "5."), 0), (("", ""), 1), (("1", ""), 2), ((1.4, ""), 4), ], ) def test_count_substr(case, expected): assert _.count_substr(*case) == expected @parametrize( "case,expected", [ ( "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef" "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", "AAAAAAAeCEEEEIIII" "DNOOOOO OUUUUYThss" "aaaaaaaeceeeeiiii" "dnooooo ouuuuythy", ), ("abcABC", "abcABC"), ("", ""), (None, ""), ], ) def test_deburr(case, expected): assert _.deburr(case) == expected @parametrize( "case,expected", [ ("Foo", "foo"), ("Foo bar", "foo bar"), ("Foo Bar", "foo Bar"), ("FOO BAR", "fOO BAR"), (None, ""), ], ) def test_decapitalize(case, expected): assert _.decapitalize(case) == expected @parametrize( "case,expected", [ (("abc", "c"), True), (("abc", "b"), False), (("abc", None), True), (("", "b"), False), (("", None), True), ((None, "b"), False), ((None, None), True), ((6.34, 4), True), ((6.34, 3), False), (("abc", "c", 3), True), (("abc", "c", 2), False), (("abc", "b", 2), True), (("abc", "b", 1), False), ((6.34, "b", 1), False), ], ) def test_ends_with(case, expected): assert _.ends_with(*case) == expected @parametrize( "case,expected", [ ("abc<> &\"'`efg", "abc<> &"'`efg"), ("abc", "abc"), ("", ""), (None, ""), ], ) def test_escape(case, expected): assert _.escape(case) == expected @parametrize( "case,expected", [ ("abc", "abc"), ("", ""), (None, ""), ( "[pydash](http://pydash.readthedocs.org/)", "\\[pydash\\]\\(http://pydash\\.readthedocs\\.org/\\)", ), ], ) def test_escape_reg_exp(case, expected): assert _.escape_reg_exp(case) == expected @parametrize( "text,prefix,expected", [ ("Hello world!", "Hello", "Hello world!"), (" world!", "Hello", "Hello world!"), ("", "Hello", "Hello"), ("", "", ""), ("1", "", "1"), ("1", "1", "1"), (5, 6, "65"), (None, 6, "6"), (None, None, ""), ], ) def test_ensure_starts_with(text, prefix, expected): assert _.ensure_starts_with(text, prefix) == expected @parametrize( "text,suffix,expected", [ ("Hello world!", "world!", "Hello world!"), ("Hello ", "world!", "Hello world!"), ("", "Hello", "Hello"), ("", "", ""), ("1", "", "1"), ("1", "1", "1"), (5, 6, "56"), (None, 6, "6"), (None, None, ""), ], ) def test_ensure_ends_with(text, suffix, expected): assert _.ensure_ends_with(text, suffix) == expected @parametrize( "case,expected", [ (("foobar", "oo"), True), (("foobar", "x"), False), (("foobar", "f"), True), (("foobar", "r"), True), (("foobar", ""), True), (("foobar", None), True), (("", ""), True), (("", None), True), ((None, None), True), ((56, 6), True), ((56, 7), False), ((5.6, "."), True), ], ) def test_has_substr(case, expected): assert _.has_substr(*case) == expected @parametrize( "case,expected", [ ( " capitalize dash-CamelCase_underscore trim_id ", "Capitalize dash camel case underscore trim", ), ("foo_bar_id", "Foo bar"), ("FooBar", "Foo bar"), (5, "5"), ("", ""), (None, ""), ], ) def test_human_case(case, expected): assert _.human_case(case) == expected @parametrize( "case,expected", [ (("foobar", 0, "xx"), "xxfoobar"), (("foobar", 1, "xx"), "fxxoobar"), (("foobar", 4, "xx"), "foobxxar"), (("foobar", 6, "xx"), "foobarxx"), (("foobar", 7, "xx"), "foobarxx"), (("f", 7, "xx"), "fxx"), (("", 7, "xx"), "xx"), (("", 7, ""), ""), (("", 7, None), ""), ((None, 7, None), ""), ((None, 0, None), ""), ], ) def test_insert_substr(case, expected): assert _.insert_substr(*case) == expected @parametrize( "case,expected", [ (((1, 2, 3), "."), "1.2.3"), ((("one", "two", "three"), "-.-"), "one-.-two-.-three"), ((("s", "t", "r", "i", "n", "g"), ""), "string"), ((("s", "t", "r", "i", "n", "g"),), "string"), ((("s", "t", "r", "i", "n", "g"), None), "string"), ((("string1", "string2"), ","), "string1,string2"), ((("string1", "string2"), 5.6), "string15.6string2"), (((None, "string2"), 5.6), "5.6string2"), (((7,), 5.6), "7"), (((None,), 5.6), ""), (((None, None), 5.6), "5.6"), (((None, None, None), 5.6), "5.65.6"), (((None, None, None), None), ""), ((None, None), ""), ], ) def test_join(case, expected): assert _.join(*case) == expected @parametrize( "case,expected", [ ("foo bar baz", "foo-bar-baz"), ("foo__bar_baz", "foo-bar-baz"), ("foo-_bar-_-baz", "foo-bar-baz"), ("foo!bar,baz", "foo-bar-baz"), ("--foo.bar;baz", "foo-bar-baz"), ("Foo Bar", "foo-bar"), ("fooBar", "foo-bar"), ("你好,世界", "你好-世界"), ("你好(世界)", "你好-世界"), ("你好(世界)", "你好(世界)"), ("Σὲ γνωρίζω ἀπὸ τὴν κόψη", "σὲ-γνωρίζω-ἀπὸ-τὴν-κόψη"), ("แผ่นดินฮั่นเสื่อมโทรมแสนสังเวช", "แผ่นดินฮั่นเสื่อมโทรมแสนสังเวช"), ("いろはにほへど ちりぬるを", "いろはにほへど-ちりぬるを"), ( "나는 유리를 먹을 수 있어요. 그래도 아프지 않아요", "나는-유리를-먹을-수-있어요-그래도-아프지-않아요", ), ("J'peux manger d'la vitre, ça m'fa pas mal.", "jpeux-manger-dla-vitre-ca-mfa-pas-mal"), ( "e peux manger du verre, ça ne me fait pas mal.", "e-peux-manger-du-verre-ca-ne-me-fait-pas-mal", ), (None, ""), (5, "5"), (5.6, "5-6"), (-5.6, "5-6"), ], ) def test_kebab_case(case, expected): assert _.kebab_case(case) == expected @parametrize( "case,expected", [ ("foo\nbar", ["foo", "bar"]), ("foo\rbar", ["foo", "bar"]), ("foo\r\nbar", ["foo", "bar"]), ("foo\n", ["foo"]), ("\nfoo", ["", "foo"]), ("", []), (None, []), ], ) def test_lines(case, expected): assert _.lines(case) == expected @parametrize( "case,expected", [ # noqa ("fooBar", "foo bar"), ("--foo-Bar--", "foo bar"), ("*Foo*B_a*R", "foo b a r"), ("*Foo10_B*Ar", "foo 10 b ar"), ('/?*Foo10/;"B*Ar', "foo 10 b ar"), ('/?F@O__o10456?>.B?>";Ar', "f o o 10456 b ar"), ("FoO Bar", "fo o bar"), ("F\n\\soO Bar", "f so o bar"), ("Foo1054665Bar", "foo 1054665 bar"), ], ) def test_lower_case(case, expected): assert _.lower_case(case) == expected @parametrize( "case,expected", [("Foobar", "foobar"), ("Foo Bar", "foo Bar"), ("1foobar", "1foobar"), (";foobar", ";foobar")], ) def test_lower_first(case, expected): assert _.lower_first(case) == expected @parametrize( "case,expected", [ ((1234,), "1,234"), ((1234567890,), "1,234,567,890"), ((1234, 2), "1,234.00"), ((1234, 1), "1,234.0"), ((1234, 2, ",", "."), "1.234,00"), (("1234",), ""), ], ) def test_number_format(case, expected): assert _.number_format(*case) == expected @parametrize( "case,expected", [ (("abc", 5, "12"), "1abc1"), (("abc", 8), " abc "), (("abc", 8, "_-"), "_-abc_-_"), (("abc", 3), "abc"), (("", 3), " "), ((" ", 3), " "), ((None, 3), " "), ], ) def test_pad(case, expected): assert _.pad(*case) == expected @parametrize( "case,expected", [ (("aaaaa", 3), "aaaaa"), (("aaaaa", 6), " aaaaa"), (("aaaaa", 10), " aaaaa"), (("aaaaa", 6, "b"), "baaaaa"), (("aaaaa", 6, "bc"), "caaaaa"), (("aaaaa", 9, "bc"), "bcbcaaaaa"), (("a", 9, "12"), "12121212a"), (("a", 8, "12"), "2121212a"), (("", 8, "12"), "12121212"), ((None, 8, "12"), "12121212"), ], ) def test_pad_start(case, expected): assert _.pad_start(*case) == expected @parametrize( "case,expected", [ (("aaaaa", 3), "aaaaa"), (("aaaaa", 6), "aaaaa "), (("aaaaa", 10), "aaaaa "), (("aaaaa", 6, "b"), "aaaaab"), (("aaaaa", 6, "bc"), "aaaaab"), (("aaaaa", 9, "bc"), "aaaaabcbc"), (("a", 9, "12"), "a12121212"), (("a", 8, "12"), "a1212121"), (("", 8, "12"), "12121212"), ((None, 8, "12"), "12121212"), ], ) def test_pad_end(case, expected): assert _.pad_end(*case) == expected @parametrize( "case,expected", [ ("foo bar baz", "FooBarBaz"), ("foo bar baz", "FooBarBaz"), ("foo__bar_baz", "FooBarBaz"), ("foo-_bar-_-baz", "FooBarBaz"), ("foo!bar,baz", "FooBarBaz"), ("--foo.bar;baz", "FooBarBaz"), ("", ""), (None, ""), ], ) def test_pascal_case(case, expected): assert _.pascal_case(case) == expected @parametrize( "case,expected", [ ("b", "a"), ("B", "A"), ], ) def test_predecessor(case, expected): assert _.predecessor(case) == expected @parametrize( "case,expected", [ (("Hello, world",), "..."), (("Hello, world", 5), "Hello..."), (("Hello, world", 8), "Hello..."), (("Hello, world", 5, " (read a lot more)"), "Hello, world"), (("Hello, cruel world", 15), "Hello, cruel..."), (("Hello", 10), "Hello"), (("", 10), ""), (("",), ""), ((None,), ""), ], ) def test_prune(case, expected): assert _.prune(*case) == expected @parametrize( "case,expected", [ (("hello world!",), '"hello world!"'), (("",), '""'), (("", None), ""), ((None,), '""'), ((None, None), ""), ((5,), '"5"'), ((5, None), "5"), ((-89,), '"-89"'), (("hello world!", "*"), "*hello world!*"), (("hello world!", "**"), "**hello world!**"), (("", "**"), "****"), (("hello world!", ""), "hello world!"), ((28, "**"), "**28**"), ((28, ""), "28"), ((2, 8), "828"), ((-2, 8), "8-28"), ((-2, -8), "-8-2-8"), ((-8.5, 0), "0-8.50"), ], ) def test_quote(case, expected): assert _.quote(*case) == expected @parametrize( "case,expected", [ (("Hello World", "/[A-Z]/"), ["H"]), (("Hello World", "/[A-Z]/g"), ["H", "W"]), (("hello world", "/[A-Z]/i"), ["h"]), (("hello world", "/[A-Z]/gi"), ["h", "e", "l", "l", "o", "w", "o", "r", "l", "d"]), (("12345", "/[A-Z]/"), []), ], ) def test_reg_exp_js_match(case, expected): assert _.reg_exp_js_match(*case) == expected @parametrize( "case,expected", [ (("Hello World", "/[A-Z]/", "!"), "!ello World"), (("Hello World", "/[A-Z]/g", "!"), "!ello !orld"), (("hello world", "/[A-Z]/i", "!"), "!ello world"), (("hello world", "/[A-Z]/gi", "!"), "!!!!! !!!!!"), (("hello world", "/[A-Z]/gi", ""), " "), (("hello world", "/[A-Z]/gi", None), " "), ], ) def test_reg_exp_js_replace(case, expected): assert _.reg_exp_js_replace(*case) == expected @parametrize( "case,expected", [ (("foo", "o", "a"), "faa"), (("foo", "o", "a", False, 1), "fao"), (("fOO", "o", "a"), "fOO"), (("fOO", "o", "a", True), "faa"), (("", "", ""), ""), (("foo", "o", ""), "f"), (("foo", "x", "y"), "foo"), (("foo", "^o", "a"), "foo"), (("ooo", "^o", "f"), "foo"), (("foo", "o$", "a"), "foa"), (("foo", "", "a"), "afaoaoa"), (("foo", "", ""), "foo"), (("foo", "", None), "foo"), (("foo", None, None), "foo"), (("foo", None, ""), "foo"), (("foo", None, "a"), "foo"), ((54.7, 5, 6), "64.7"), ], ) def test_reg_exp_replace(case, expected): assert _.reg_exp_replace(*case) == expected @parametrize( "case,expected", [ (("foo",), ""), (("foo", 0), ""), (("foo", 1), "foo"), (("foo", 3), "foofoofoo"), (("", 3), ""), (("", 0), ""), ((None, 0), ""), ((None, 1), ""), ], ) def test_repeat(case, expected): assert _.repeat(*case) == expected @parametrize( "case,expected", [ (("foo", "o", "a"), "faa"), (("foo", re.compile("o"), "a"), "faa"), (("foo", "o", "a", False, 1), "fao"), (("fOO", "o", "a"), "fOO"), (("fOO", "o", "a", True), "faa"), (("", "", ""), ""), (("foo", "o", ""), "f"), (("foo", "x", "y"), "foo"), (("foo", "", ""), "foo"), (("foo", "", None), "foo"), (("foo", None, None), "foo"), (("foo", None, ""), "foo"), (("foo", None, "a"), "foo"), ((54.7, 5, 6), "64.7"), ], ) def test_replace(case, expected): assert _.replace(*case) == expected @parametrize( "case,expected", [ (("foo", "o", "a"), "foa"), (("foo", "f", "a"), "foo"), ], ) def test_replace_end(case, expected): assert _.replace_end(*case) == expected @parametrize( "case,expected", [ (("foo", "o", "a"), "foo"), (("foo", "f", "a"), "aoo"), ], ) def test_replace_start(case, expected): assert _.replace_start(*case) == expected @parametrize( "case,expected", [ (("foo bar baz", "-"), "foo-bar-baz"), (("foo__bar_baz", "-"), "foo-bar-baz"), (("foo-_bar-_-baz", "-"), "foo-bar-baz"), (("foo!bar,baz", "-"), "foo-bar-baz"), (("--foo.bar;baz", "-"), "foo-bar-baz"), (("Foo Bar", "-"), "foo-bar"), (("foo bar baz", "_"), "foo_bar_baz"), (("foo__bar_baz", "_"), "foo_bar_baz"), (("foo-_bar-_-baz", "_"), "foo_bar_baz"), (("foo!bar,baz", "_"), "foo_bar_baz"), (("--foo.bar;baz", "_"), "foo_bar_baz"), (("Foo Bar", "_"), "foo_bar"), (("", "_"), ""), ((None, "_"), ""), ], ) def test_separator_case(case, expected): assert _.separator_case(*case) == expected @parametrize( "case,expected", [ (([],), ""), ((tuple(),), ""), (((None,),), ""), (((None, None),), ""), ((("", None),), ""), (((None, ""),), ""), (((None, 5),), "5"), (((7.88, None),), "7.88"), ((["", ""],), ""), ((["foo"],), "foo"), ((["foo", "bar"],), "foo and bar"), ((["foo", "bar", "baz"],), "foo, bar and baz"), ((["foo", "bar", "baz", "qux"], ", ", " or "), "foo, bar, baz or qux"), ((["foo", "bar", "baz", "qux"], ";", " or "), "foo;bar;baz or qux"), ((["foo", "bar", "baz", "qux"], 0.6, " or "), "foo0.6bar0.6baz or qux"), ((["foo", "bar", "baz", "qux"], 0.6, 1), "foo0.6bar0.6baz1qux"), ((["foo", "bar", "baz", "qux"], 0.6, None), "foo0.6bar0.6bazqux"), ((["foo", 23, "baz", "qux"], None, None), "foo23bazqux"), ], ) def test_series_phrase(case, expected): assert _.series_phrase(*case) == expected @parametrize( "case,expected", [ (([],), ""), ((tuple(),), ""), (((None,),), ""), (((None, None),), ""), ((("", None),), ""), (((None, ""),), ""), (((None, 5),), "5"), (((7.88, None),), "7.88"), ((["", ""],), ""), ((["foo"],), "foo"), ((["foo", "bar"],), "foo and bar"), ((["foo", "bar", "baz"],), "foo, bar, and baz"), ((["foo", "bar", "baz", "qux"], ", ", " or "), "foo, bar, baz, or qux"), ((["foo", "bar", "baz", "qux"], ";", " or "), "foo;bar;baz; or qux"), ], ) def test_series_phrase_serial(case, expected): assert _.series_phrase_serial(*case) == expected @parametrize( "case,expected", [ ("Foo Bar", "foo-bar"), (" foo bar ", "foo-bar"), ("Un éléphant à l'orée du bois", "un-elephant-a-loree-du-bois"), ("shouldn't couldn't wouldn't", "shouldnt-couldnt-wouldnt"), ("", ""), (5, "5"), (None, ""), ], ) def test_slugify(case, expected): assert _.slugify(case) == expected @parametrize( "case,expected", [ ("foo bar baz", "foo_bar_baz"), ("foo__bar_baz", "foo_bar_baz"), ("foo-_bar-_-baz", "foo_bar_baz"), ("foo!bar,baz", "foo_bar_baz"), ("--foo.bar;baz", "foo_bar_baz"), ("FooBar", "foo_bar"), ("fooBar", "foo_bar"), ("你好,世界", "你好_世界"), ("你好(世界)", "你好_世界"), ("你好(世界)", "你好(世界)"), ("你好,世界", "你好,世界"), ("Σὲ γνωρίζω ἀπὸ τὴν κόψη", "σὲ_γνωρίζω_ἀπὸ_τὴν_κόψη"), ("แผ่นดินฮั่นเสื่อมโทรมแสนสังเวช", "แผ่นดินฮั่นเสื่อมโทรมแสนสังเวช"), ("いろはにほへど ちりぬるを", "いろはにほへど_ちりぬるを"), ( "나는 유리를 먹을 수 있어요. 그래도 아프지 않아요", "나는_유리를_먹을_수_있어요_그래도_아프지_않아요", ), ("J'peux manger d'la vitre, ça m'fa pas mal.", "jpeux_manger_dla_vitre_ca_mfa_pas_mal"), ( "e peux manger du verre, ça ne me fait pas mal.", "e_peux_manger_du_verre_ca_ne_me_fait_pas_mal", ), ("", ""), (None, ""), (5, "5"), ], ) def test_snake_case(case, expected): assert _.snake_case(case) == expected @parametrize( "case,expected", [ (("string1 string2",), ["string1", "string2"]), (("string", ""), ["s", "t", "r", "i", "n", "g"]), (("string", None), ["s", "t", "r", "i", "n", "g"]), (("", ""), []), (("", None), []), ((None, None), []), (("string1,string2", ","), ["string1", "string2"]), ], ) def test_split(case, expected): assert _.split(*case) == expected @parametrize( "case,expected", [ ("foo bar baz", "Foo Bar Baz"), ("foo-bar-baz", "Foo Bar Baz"), ("Foo!#Bar's", "Foo Bars"), ("fooBar", "Foo Bar"), ("___FOO-BAR___", "FOO BAR"), ("XMLHttp", "XML Http"), ("", ""), (None, ""), (5, "5"), ], ) def test_start_case(case, expected): assert _.start_case(case) == expected @parametrize( "case,expected", [ (("abc", "a"), True), (("abc", "b"), False), (("abc", "a", 0), True), (("abc", "a", 1), False), (("abc", "b", 1), True), (("abc", "b", 2), False), ((5.78, 5), True), (("5.78", 5), True), ((5.78, "5"), True), ((5.78, ""), True), ((5.78, None), True), ((None, None), True), (("", None), True), ((" ", None), True), ], ) def test_starts_with(case, expected): assert _.starts_with(*case) == expected @parametrize( "case,expected", [ ('a link', "a link"), ( 'a link', 'a linkalert("hello world!")', ), ("", ""), (None, ""), ], ) def test_strip_tags(case, expected): assert _.strip_tags(case) == expected @parametrize( "case,expected", [ (("This_is_a_test_string", "_"), "This"), (("This_is_a_test_string", ""), "This_is_a_test_string"), (("This_is_a_test_string", " "), "This_is_a_test_string"), (("This_is_a_test_string", None), "This_is_a_test_string"), ((None, None), ""), ((None, "4"), ""), ((None, ""), ""), ], ) def test_substr_left(case, expected): assert _.substr_left(*case) == expected @parametrize( "case,expected", [ (("This_is_a_test_string", "_"), "This_is_a_test"), (("This_is_a_test_string", ""), "This_is_a_test_string"), (("This_is_a_test_string", " "), "This_is_a_test_string"), (("This_is_a_test_string", None), "This_is_a_test_string"), ((None, None), ""), ((None, "4"), ""), ((None, ""), ""), ], ) def test_substr_left_end(case, expected): assert _.substr_left_end(*case) == expected @parametrize( "case,expected", [ (("This_is_a_test_string", "_"), "is_a_test_string"), (("This_is_a_test_string", ""), "This_is_a_test_string"), (("This_is_a_test_string", " "), "This_is_a_test_string"), (("This_is_a_test_string", None), "This_is_a_test_string"), ((None, None), ""), ((None, "4"), ""), ((None, ""), ""), ], ) def test_substr_right(case, expected): assert _.substr_right(*case) == expected @parametrize( "case,expected", [ (("This_is_a_test_string", "_"), "string"), (("This_is_a_test_string", ""), "This_is_a_test_string"), (("This_is_a_test_string", " "), "This_is_a_test_string"), (("This_is_a_test_string", None), "This_is_a_test_string"), ((None, None), ""), ((None, "4"), ""), ((None, ""), ""), ], ) def test_substr_right_end(case, expected): assert _.substr_right_end(*case) == expected @parametrize( "case,expected", [ ("a", "b"), ("A", "B"), ], ) def test_successor(case, expected): assert _.successor(case) == expected @parametrize( "source,wrapper,expected", [ ("hello world!", "*", "*hello world!*"), ("hello world!", "**", "**hello world!**"), ("", "**", "****"), ("hello world!", "", "hello world!"), (5, "1", "151"), (5, 1, "151"), ("5", 1, "151"), ("5", 12, "12512"), (5, "", "5"), ("", 5, "55"), ("", None, ""), (None, None, ""), (5, None, "5"), ], ) def test_surround(source, wrapper, expected): assert _.surround(source, wrapper) == expected @parametrize( "case,expected", [ ("fOoBaR", "FoObAr"), ("", ""), (None, ""), ], ) def test_swap_case(case, expected): assert _.swap_case(case) == expected @parametrize( "case,expected", [ ("foo bar baz", "Foo Bar Baz"), ("they're bill's friends from the UK", "They're Bill's Friends From The Uk"), ("", ""), (None, ""), (5, "5"), ], ) def test_title_case(case, expected): assert _.title_case(case) == expected @parametrize( "case,expected", [("--Foo-Bar--", "--foo-bar--"), ("fooBar", "foobar"), ("__FOO_BAR__", "__foo_bar__")], ) def test_to_lower(case, expected): assert _.to_lower(case) == expected @parametrize( "case,expected", [("--Foo-Bar--", "--FOO-BAR--"), ("fooBar", "FOOBAR"), ("__FOO_BAR__", "__FOO_BAR__")], ) def test_to_upper(case, expected): assert _.to_upper(case) == expected @parametrize( "case,expected", [ ((" fred ",), "fred"), (("-_-fred-_-", "_-"), "fred"), (("-_-fred-_-", None), "-_-fred-_-"), ((None,), ""), ((None, None), ""), ], ) def test_trim(case, expected): assert _.trim(*case) == expected @parametrize( "case,expected", [ ((" fred ",), "fred "), (("-_-fred-_-", "_-"), "fred-_-"), (("-_-fred-_-", None), "-_-fred-_-"), ((None,), ""), ((None, None), ""), ], ) def test_trim_start(case, expected): assert _.trim_start(*case) == expected @parametrize( "case,expected", [ ((" fred ",), " fred"), (("-_-fred-_-", "_-"), "-_-fred"), (("-_-fred-_-", None), "-_-fred-_-"), ((None,), ""), ((None, None), ""), ], ) def test_trim_end(case, expected): assert _.trim_end(*case) == expected @parametrize( "case,expected", [ (("hi-diddly-ho there, neighborino",), "hi-diddly-ho there, neighbo..."), (("hi-diddly-ho there, neighborino", 24), "hi-diddly-ho there, n..."), (("hi-diddly-ho there, neighborino", 24, "...", " "), "hi-diddly-ho there,..."), ( ("hi-diddly-ho there, neighborino", 24, "...", re.compile(",? +")), "hi-diddly-ho there...", ), (("hi-diddly-ho there, neighborino", 30, " [...]"), "hi-diddly-ho there, neig [...]"), (("123456789", 9), "123456789"), (("123456789", 8), "12345..."), (("x",), "x"), ((" ",), " "), (("",), ""), ((None,), ""), ], ) def test_truncate(case, expected): assert _.truncate(*case) == expected @parametrize( "case,expected", [ ("abc<> &"'`efg", "abc<> &\"'`efg"), ("", ""), (" ", " "), (None, ""), ], ) def test_unescape(case, expected): assert _.unescape(case) == expected @parametrize( "case,expected", [ (('"foo"',), "foo"), (("'foo'", "'"), "foo"), (('"foo',), '"foo'), (('foo"',), 'foo"'), ((" ",), " "), (("",), ""), ((None,), ""), ], ) def test_unquote(case, expected): assert _.unquote(*case) == expected @parametrize( "case,expected", [ # noqa ("fooBar", "FOO BAR"), ("--foo-Bar--", "FOO BAR"), ("*Foo*B_a*R", "FOO B A R"), ("*Foo10_B*Ar", "FOO 10 B AR"), ('/?*Foo10/;"B*Ar', "FOO 10 B AR"), ('/?F@O__o10456?>.B?>";Ar', "F O O 10456 B AR"), ("FoO Bar", "FO O BAR"), ("F\n\\soO Bar", "F SO O BAR"), ("Foo1054665Bar", "FOO 1054665 BAR"), ], ) def test_upper_case(case, expected): assert _.upper_case(case) == expected @parametrize( "case,expected", [("foobar", "Foobar"), ("Foobar", "Foobar"), ("1foobar", "1foobar"), (";foobar", ";foobar")], ) def test_upper_first(case, expected): assert _.upper_first(case) == expected @parametrize( "case,expected", [ ({"args": [""], "kwargs": {}}, ""), ({"args": ["/"], "kwargs": {}}, "/"), ({"args": ["http://github.com"], "kwargs": {}}, "http://github.com"), ({"args": ["http://github.com:80"], "kwargs": {}}, "http://github.com:80"), ( {"args": ["http://github.com:80", "pydash", "issues/"], "kwargs": {}}, "http://github.com:80/pydash/issues/", ), ({"args": ["/foo", "bar", "/baz", "/qux/"], "kwargs": {}}, "/foo/bar/baz/qux/"), ({"args": ["/foo/bar"], "kwargs": {"a": 1, "b": "two"}}, "/foo/bar?a=1&b=two"), ({"args": ["/foo/bar?x=5"], "kwargs": {"a": 1, "b": "two"}}, "/foo/bar?x=5&a=1&b=two"), ( {"args": ["/foo/bar?x=5", "baz?z=3"], "kwargs": {"a": 1, "b": "two"}}, "/foo/bar/baz?x=5&a=1&b=two&z=3", ), ( {"args": ["/foo/bar?x=5", "baz?z=3"], "kwargs": {"a": [1, 2], "b": "two"}}, "/foo/bar/baz?x=5&a=1&a=2&b=two&z=3", ), ( {"args": ["/foo#bar", "baz"], "kwargs": {"a": [1, 2], "b": "two"}}, "/foo?a=1&a=2&b=two#bar/baz", ), ( {"args": ["/foo", "baz#bar"], "kwargs": {"a": [1, 2], "b": "two"}}, "/foo/baz?a=1&a=2&b=two#bar", ), ], ) def test_url(case, expected): result = _.url(*case["args"], **case["kwargs"]) r_scheme, r_netloc, r_path, r_query, r_fragment = urlsplit(result) e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected) assert r_scheme == e_scheme assert r_netloc == e_netloc assert r_path == e_path assert set(parse_qsl(r_query)) == set(parse_qsl(e_query)) assert r_fragment == e_fragment @parametrize( "case,expected", [ # noqa ("hello world!", ["hello", "world"]), # noqa ("hello_world", ["hello", "world"]), ("hello!@#$%^&*()_+{}|:\"<>?-=[]\\;\\,.'/world", ["hello", "world"]), ("hello 12345 world", ["hello", "12345", "world"]), ("enable 24h format", ["enable", "24", "h", "format"]), ("tooLegit2Quit", ["too", "Legit", "2", "Quit"]), ("xhr2Request", ["xhr", "2", "Request"]), (" ", []), ("", []), (None, []), ], ) def test_words(case, expected): assert _.words(case) == expected @parametrize( "case,expected", [ ("enable 24h format", "enable24HFormat"), ], ) def test_word_cycle(case, expected): actual = _.chain(case).camel_case().kebab_case().snake_case().start_case().camel_case().value() assert actual == expected pydash-8.0.3/tests/test_utilities.py000066400000000000000000000434551464745015500176260ustar00rootroot00000000000000import time from unittest import mock import pytest import pydash as _ parametrize = pytest.mark.parametrize @parametrize("case,expected", [((lambda a, b: a / b, 4, 2), 2)]) def test_attempt(case, expected): assert _.attempt(*case) == expected @parametrize("case,expected", [((lambda a, b: a / b, 4, 0), ZeroDivisionError)]) def test_attempt_exception(case, expected): assert isinstance(_.attempt(*case), expected) @parametrize( "pairs,case,expected", [ ( ( [_.matches({"b": 2}), _.constant("matches B")], [_.matches({"a": 1}), _.constant("matches A")], ), {"a": 1, "b": 2}, "matches B", ), ( ([_.matches({"b": 2}), _.constant("matches B")], [_.matches({"a": 1}), _.invert]), {"a": 1, "b": 3}, {1: "a", 3: "b"}, ), ( ( [_.matches({"a": 1}), _.constant("matches A")], [_.matches({"b": 2}), _.constant("matches B")], ), {"a": 1, "b": 2}, "matches A", ), ], ) def test_cond(pairs, case, expected): func = _.cond(*pairs) assert func(case) == expected @parametrize( "case,expected", [ ([_.matches({"b": 2})], ValueError), ([_.matches({"b": 2}), _.matches({"a": 1}), _.constant("matches B")], ValueError), ([1, 2], ValueError), ([[1, 2]], TypeError), ([_.matches({"b": 2}), 2], ValueError), ], ) def test_cond_exception(case, expected): with pytest.raises(expected): _.cond(case) @parametrize( "source,case,expected", [ ({"a": lambda n: n == 0, "b": lambda n: n < 0}, {"a": 0, "b": -1}, True), ({"a": lambda n: n == 0, "b": lambda n: n < 0}, {"a": 1, "b": -1}, False), ({"a": lambda n: n == 0, "b": lambda n: n < 0}, {"a": 0, "b": 1}, False), ({"b": lambda n: n > 1}, {"b": 2}, True), ({"b": lambda n: n > 1}, {"b": 0}, False), ([lambda n: n == 0, lambda n: n < 1], [0, -1], True), ([lambda n: n == 0, lambda n: n < 1], [1, -1], False), ([lambda n: n == 0, lambda n: n < 1], [0, 1], False), ], ) def test_conforms(source, case, expected): func = _.conforms(source) assert func(case) == expected @parametrize( "source,case,expected", [ ({"a": lambda n: n == 0, "b": lambda n: n < 0}, {"a": 0, "b": -1}, True), ({"a": lambda n: n == 0, "b": lambda n: n < 0}, {"a": 1, "b": -1}, False), ({"a": lambda n: n == 0, "b": lambda n: n < 0}, {"a": 0, "b": 1}, False), ({"b": lambda n: n > 1}, {"b": 2}, True), ({"b": lambda n: n > 1}, {"b": 0}, False), ([lambda n: n == 0, lambda n: n < 1], [0, -1], True), ([lambda n: n == 0, lambda n: n < 1], [1, -1], False), ([lambda n: n == 0, lambda n: n < 1], [0, 1], False), ], ) def test_conforms_to(source, case, expected): assert _.conforms_to(case, source) == expected @parametrize("case", ["foo", "bar"]) def test_constant(case): assert _.constant(case)() == case @parametrize( "case,expected", [ (([1, 10, 20]), 1), (([None, 10, 20]), 10), (([None, None, 20]), 20), (([None, [1, 2], [3, 4]]), [1, 2]), (([None, None, [3, 4]]), [3, 4]), (([None, None, None]), None), ], ) def test_default_to_any(case, expected): assert _.default_to_any(*case) == expected @parametrize( "case,expected", [(([1, 10]), 1), (([None, 10]), 10), (([[1, 2], [3, 4]]), [1, 2]), (([None, [3, 4]]), [3, 4])], ) def test_default_to(case, expected): assert _.default_to(*case) == expected @parametrize("case,expected", [((1,), 1), ((1, 2), 1), ((), None)]) def test_identity(case, expected): assert _.identity(*case) == expected @parametrize( "case,arg,expected", [ ("name", [{"name": "fred", "age": 40}, {"name": "barney", "age": 36}], ["fred", "barney"]), ( "friends.1", [ {"name": "fred", "age": 40, "friends": ["barney", "betty"]}, {"name": "barney", "age": 36, "friends": ["fred", "wilma"]}, ], ["betty", "wilma"], ), ( ["friends.1"], [ {"name": "fred", "age": 40, "friends.1": "betty"}, {"name": "barney", "age": 36, "friends.1": "wilma"}, ], ["betty", "wilma"], ), ( {"name": "fred"}, [{"name": "fred", "age": 40}, {"name": "barney", "age": 36}], [True, False], ), ( lambda obj: obj["age"], [{"name": "fred", "age": 40}, {"name": "barney", "age": 36}], [40, 36], ), ( None, [{"name": "fred", "age": 40}, {"name": "barney", "age": 36}], [{"name": "fred", "age": 40}, {"name": "barney", "age": 36}], ), ( ["name", "fred"], [{"name": "fred", "age": 40}, {"name": "barney", "age": 36}], [True, False], ), (1, [[0, 1], [2, 3], [4, 5]], [1, 3, 5]), (("a", "b.c", ["d", 0, "f"]), [{"a": 1, "b": {"c": 2}, "d": [{"f": 3}, 4]}], [[1, 2, 3]]), (("a",), [{"a": 1}], [[1]]), ], ) def test_iteratee(case, arg, expected): getter = _.iteratee(case) assert _.map_(arg, getter) == expected @parametrize( "case,arg,expected", [ ({"age": 36}, {"name": "barney", "age": 36}, True), ({"age": 36}, {"name": "barney", "age": 40}, False), ({"a": {"b": 2}}, {"a": {"b": 2, "c": 3}}, True), ], ) def test_matches(case, arg, expected): assert _.matches(case)(arg) is expected @parametrize( "case,arg,expected", [ (("a", 1), {"a": 1, "b": 2}, True), (("a", 2), {"a": 1, "b": 2}, False), ((1, 2), [1, 2, 3], True), ((1, 3), [1, 2, 3], False), ], ) def test_matches_property(case, arg, expected): assert _.matches_property(*case)(arg) is expected @parametrize( "case,args,kwargs,key", [ ((lambda a, b: a + b,), (1, 2), {}, "(1, 2){}"), ((lambda a, b: a + b,), (1,), {"b": 2}, "(1,){'b': 2}"), ((lambda a, b: a + b, lambda a, b: a * b), (1, 2), {}, 2), ((lambda a, b: a + b, lambda a, b: a * b), (1,), {"b": 2}, 2), ], ) def test_memoize(case, args, kwargs, key): memoized = _.memoize(*case) expected = case[0](*args, **kwargs) assert memoized(*args, **kwargs) == expected assert memoized.cache assert memoized.cache[key] == expected @parametrize( "case,args,kwargs,expected", [ (("a.b",), ({"a": {"b": lambda x, y: x + y}}, 1, 2), {}, 3), ( ("a.b",), ( {"a": {"b": lambda x, y: x + y}}, 1, ), {"y": 2}, 3, ), ( ("a.b", 1), ( {"a": {"b": lambda x, y: x + y}}, 2, ), {}, 3, ), ], ) def test_method(case, args, kwargs, expected): assert _.method(*case)(*args, **kwargs) == expected @parametrize( "case,args,kwargs,expected", [ (({"a": {"b": lambda x, y: x + y}},), ("a.b", 1, 2), {}, 3), ( ({"a": {"b": lambda x, y: x + y}},), ( "a.b", 1, ), {"y": 2}, 3, ), ], ) def test_method_of(case, args, kwargs, expected): assert _.method_of(*case)(*args, **kwargs) == expected @parametrize("case,expected", [((), None), ((1, 2, 3), None)]) def test_noop(case, expected): assert _.noop(*case) == expected @parametrize( "args,pos,expected", [ ([11, 22, 33, 44], 0, 11), ([11, 22, 33, 44], -1, 44), ([11, 22, 33, 44], -4, 11), ([11, 22, 33, 44], -5, None), ([11, 22, 33, 44], 4, None), ([11, 22, 33], "1", 22), ([11, 22, 33], "xyz", 11), ([11, 22, 33], 1.45, 33), ([11, 22, 33], 1.51, 33), ], ) def test_nth_arg(args, pos, expected): func = _.nth_arg(pos) assert func(*args) == expected def test_now(): present = int(time.time() * 1000) # Add some leeway when comparing time. assert (present - 1) <= _.now() <= (present + 1) @parametrize("funcs,data,expected", [([max, min], [1, 2, 3, 4], [4, 1])]) def test_over(funcs, data, expected): assert _.over(funcs)(*data) == expected @parametrize( "funcs,data,expected", [([lambda x: x is not None, bool], [1], True), ([lambda x: x is None, bool], [1], False)], ) def test_over_every(funcs, data, expected): assert _.over_every(funcs)(*data) == expected @parametrize( "funcs,data,expected", [ ([lambda x: x is not None, bool], [1], True), ([lambda x: x is None, bool], [1], True), ([lambda x: x is False, lambda y: y == 2], [True], False), ], ) def test_over_some(funcs, data, expected): assert _.over_some(funcs)(*data) == expected @parametrize( "case,arg,expected", [ ("name", [{"name": "fred", "age": 40}, {"name": "barney", "age": 36}], ["fred", "barney"]), ("name", [{"name": "fred", "age": 40}, {"name": "barney", "age": 36}], ["fred", "barney"]), ( "spouse.name", [ {"name": "fred", "age": 40, "spouse": {"name": "wilma"}}, {"name": "barney", "age": 36, "spouse": {"name": "betty"}}, ], ["wilma", "betty"], ), ], ) def test_property_(case, arg, expected): assert _.map_(arg, _.property_(case)) == expected @parametrize( "case,arg,expected", [ ({"name": "fred", "age": 40}, ["name", "age"], ["fred", 40]), ], ) def test_property_of(case, arg, expected): assert _.map_(arg, _.property_of(case)) == expected @parametrize("case,minimum,maximum", [((), 0, 1), ((25,), 0, 25), ((5, 10), 5, 10)]) def test_random(case, minimum, maximum): for _x in range(50): rnd = _.random(*case) assert isinstance(rnd, int) assert minimum <= rnd <= maximum @parametrize( "case,floating,minimum,maximum", [ ((), True, 0, 1), ((25,), True, 0, 25), ((5, 10), True, 5, 10), ((5.0, 10), False, 5, 10), ((5, 10.0), False, 5, 10), ((5.0, 10.0), False, 5, 10), ((5.0, 10.0), True, 5, 10), ], ) def test_random_float(case, floating, minimum, maximum): for _x in range(50): rnd = _.random(*case, floating=floating) assert isinstance(rnd, float) assert minimum <= rnd <= maximum @parametrize( "case,expected", [ ((10,), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), ((1, 11), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), ((11, 1), [11, 10, 9, 8, 7, 6, 5, 4, 3, 2]), ((11, 1, -2), [11, 9, 7, 5, 3]), ((11, 1, 2), []), ((0, 30, 5), [0, 5, 10, 15, 20, 25]), ((0, -10, -1), [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]), ((0,), []), ((), []), ], ) def test_range_(case, expected): assert list(_.range_(*case)) == expected @parametrize( "case,expected", [ ((4,), [3, 2, 1, 0]), ((-4,), [-3, -2, -1, 0]), ((1, 5), [4, 3, 2, 1]), ((0, 20, 5), [15, 10, 5, 0]), ((0, -20, -5), [-15, -10, -5, -0]), ((0, -4, -1), [-3, -2, -1, 0]), ((1, 10), [9, 8, 7, 6, 5, 4, 3, 2, 1]), ((-1, 10), [9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1]), ((-1, -10), [-9, -8, -7, -6, -5, -4, -3, -2, -1]), ((1, 10, 2), [9, 7, 5, 3, 1]), ((1, 10, -2), []), ((-1, 10, 2), [9, 7, 5, 3, 1, -1]), ((-1, 10, -2), []), ((-1, -10, 2), []), ((-1, -10, -2), [-9, -7, -5, -3, -1]), ((10, 1), [2, 3, 4, 5, 6, 7, 8, 9, 10]), ((-10, 1), [0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10]), ((-10, -1), [-2, -3, -4, -5, -6, -7, -8, -9, -10]), ((10, 1, 2), []), ((10, 1, -2), [2, 4, 6, 8, 10]), ((-10, 1, 2), [0, -2, -4, -6, -8, -10]), ((-10, 1, -2), []), ((-10, -1, 2), [-2, -4, -6, -8, -10]), ((-10, -1, -2), []), ((1, 4, 0), [1, 1, 1]), ((0,), []), ((10,), [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]), ((1, 5), [4, 3, 2, 1]), ((0, 20, 5), [15, 10, 5, 0]), ((0, -4, -1), [-3, -2, -1, 0]), ((0,), []), ((), []), ], ) def test_range_right(case, expected): assert list(_.range_right(*case)) == expected @parametrize( "case,expected", [ (({"cheese": "crumpets", "stuff": lambda: "nonsense"}, "cheese"), "crumpets"), (({"cheese": "crumpets", "stuff": lambda: "nonsense"}, "stuff"), "nonsense"), (({"cheese": "crumpets", "stuff": lambda: "nonsense"}, "foo"), None), ((False, "foo"), None), ((False, "foo", "default"), "default"), ], ) def test_result(case, expected): assert _.result(*case) == expected @parametrize( "case,delay_count,delay_times", [ ({}, 2, [0.5, 1.0]), ({"attempts": 1}, 0, []), ({"attempts": 3, "delay": 0.5, "scale": 2.0}, 2, [0.5, 1.0]), ({"attempts": 5, "delay": 1.5, "scale": 2.5}, 4, [1.5, 3.75, 9.375, 23.4375]), ({"attempts": 5, "delay": 1.5, "max_delay": 8.0, "scale": 2.5}, 4, [1.5, 3.75, 8.0, 8.0]), ], ) def test_retry(mock_sleep, case, delay_count, delay_times): @_.retry(**case) def func(): raise KeyError() with pytest.raises(KeyError): func() assert delay_count == mock_sleep.call_count delay_calls = [mock.call(time) for time in delay_times] assert delay_calls == mock_sleep.call_args_list @parametrize( "case,delay_count", [({"attempts": 3}, 0), ({"attempts": 3}, 1), ({"attempts": 3}, 2), ({"attempts": 5}, 3)], ) def test_retry_success(mock_sleep, case, delay_count): counter = {True: 0} @_.retry(**case) def func(): if counter[True] != delay_count: counter[True] += 1 raise Exception() return True result = func() assert result is True assert counter[True] == delay_count assert delay_count == mock_sleep.call_count @parametrize( "case,unexpected_delay_times", [ ({"jitter": 5, "delay": 2, "scale": 1, "attempts": 5}, [2, 2, 2, 2]), ({"jitter": 10, "delay": 3, "scale": 1.5, "attempts": 5}, [3, 4.5, 6.75, 10.125]), ({"jitter": 1.0, "delay": 3, "scale": 1.5, "attempts": 5}, [3, 4.5, 6.75, 10.125]), ], ) def test_retry_jitter(mock_sleep, case, unexpected_delay_times): @_.retry(**case) def func(): raise ValueError() with pytest.raises(ValueError): func() unexpected_delay_calls = [mock.call(time) for time in unexpected_delay_times] assert len(unexpected_delay_calls) == mock_sleep.call_count assert unexpected_delay_calls != mock_sleep.call_args_list @parametrize( "case,raise_exc,delay_count", [ ({"attempts": 1, "exceptions": (RuntimeError,)}, RuntimeError, 0), ({"attempts": 2, "exceptions": (RuntimeError,)}, RuntimeError, 1), ({"attempts": 2, "exceptions": (RuntimeError,)}, Exception, 0), ], ) def test_retry_exceptions(mock_sleep, case, raise_exc, delay_count): @_.retry(**case) def func(): raise raise_exc() with pytest.raises(raise_exc): func() assert delay_count == mock_sleep.call_count def test_retry_on_exception(mock_sleep): attempts = 5 error_count = {True: 0} def on_exception(exc): error_count[True] += 1 @_.retry(attempts=attempts, on_exception=on_exception) def func(): raise TypeError() with pytest.raises(TypeError): func() assert error_count[True] == attempts @parametrize( "case,exception", [ ({"attempts": 0}, ValueError), ({"attempts": "1"}, ValueError), ({"delay": -1}, ValueError), ({"delay": "1"}, ValueError), ({"max_delay": -1}, ValueError), ({"max_delay": "1"}, ValueError), ({"scale": 0}, ValueError), ({"scale": "1"}, ValueError), ({"jitter": -1}, ValueError), ({"jitter": "1"}, ValueError), ({"jitter": (1,)}, ValueError), ({"jitter": ("1", "2")}, ValueError), ({"exceptions": (1, 2)}, TypeError), ({"exceptions": 1}, TypeError), ({"exceptions": (Exception, 2)}, TypeError), ({"on_exception": 5}, TypeError), ], ) def test_retry_invalid_args(case, exception): with pytest.raises(exception): _.retry(**case) @parametrize("case,expected", [(_.times(2, _.stub_list), [[], []]), (_.stub_list(), [])]) def test_stub_list(case, expected): assert case == expected @parametrize("case,expected", [(_.times(2, _.stub_dict), [{}, {}]), (_.stub_dict(), {})]) def test_stub_dict(case, expected): assert case == expected @parametrize("case,expected", [(_.times(2, _.stub_false), [False, False]), (_.stub_false(), False)]) def test_stub_false(case, expected): assert case == expected @parametrize("case,expected", [(_.times(2, _.stub_string), ["", ""]), (_.stub_string(), "")]) def test_stub_string(case, expected): assert case == expected @parametrize("case,expected", [(_.times(2, _.stub_true), [True, True]), (_.stub_true(), True)]) def test_stub_true(case, expected): assert case == expected @parametrize("case,expected", [((5, lambda i: i * i), [0, 1, 4, 9, 16]), ((5,), [0, 1, 2, 3, 4])]) def test_times(case, expected): assert _.times(*case) == expected @parametrize( "case,expected", [ ("a.b.c", ["a", "b", "c"]), ("a[0].b.c", ["a", 0, "b", "c"]), ("a[0][1][2].b.c", ["a", 0, 1, 2, "b", "c"]), ("a[0][1][2].b.c", ["a", 0, 1, 2, "b", "c"]), ("a[0][-1][-2].b.c", ["a", 0, -1, -2, "b", "c"]), ], ) def test_to_path(case, expected): assert _.to_path(case) == expected def test_unique_id_setup(): _.utilities.ID_COUNTER = 0 @parametrize("case,expected", [((), "1"), (("foo",), "foo2")]) def test_unique_id(case, expected): assert _.unique_id(*case) == expected pydash-8.0.3/tox.ini000066400000000000000000000004731464745015500143440ustar00rootroot00000000000000[tox] envlist = py38, py39, py310, py311, py312 isolated_build = true [gh-actions] python = 3.8: py38 3.9: py39 3.10: py310 3.11: py311 3.12: py312 [testenv] passenv = * download = true extras = dev commands = {posargs:inv ci} setenv = TOX_ENV_SITE_PACKAGES_DIR = {envsitepackagesdir}